content
stringlengths
22
815k
id
int64
0
4.91M
def recognize_emotion(name, mode, dataset): """ The main program for building the system. And we support following kinds of model: 1. Convolutional Neural Network (CNN) 2. Support Vector Machine (SVM) 3. Adaboost 4. Multilayer Perceptron (MLP) Args: name: path of the photo for recognizing mode: mode used for face detection, 'auto' or 'manual dataset: dataset used for face recognition, 'CK+48' or 'fer2013' Returns: predicted: emotion prediction (numerical) of detected faces using cnn and fisherfaces recognition: emotion recognition (categorical) of detected faces using cnn and fisherfaces Note: result will be printed to standard output, accuracy needs to be improved. """ # Load the dataset into a shuffled list of tuples dataset_tuple_list = dp.load_dataset(dataset) # Split the dataset into train, test, validation and their corresponding labels img_train, img_train_label, img_validation, img_validation_label, img_test, img_test_label, le = \ dp.split_data(dataset_tuple_list) # Fisherfaces: Get the fisherfaces_train and fisherfaces_test feature vectors for further training and predicting fisher_train, fisher_test, fisher_validation, pca, lda = fe.fisherfaces(img_train, img_test, img_validation, img_train_label, le) # Construct and train the selected model with the input train and validation datasets model_trained = mc.train_model('cnn', fisher_train, img_train_label, fisher_validation, img_validation_label, 'fisherfaces') # detect faces in photo and get coordinates of them face_coordinates, resized_list = fd.detect_face(name, mode) # project faces to fisherfaces face_column_matrix = fe.constructRowMatrix(np.array(resized_list)) pca_face = pca.transform(face_column_matrix) fisherfaces_face = lda.transform(pca_face) # use trained cnn to recognize emotions fisherfaces_face = fisherfaces_face.reshape(-1, 1, 6) prediction = model_trained.predict(fisherfaces_face) recognized = np.argmax(prediction, axis=1) print(f'\nprediction:\n{prediction}\nrecognized:\n{recognized}') return prediction, recognized
5,352,000
def check_main_depencies(): """ Check if dependencies listed in TOOLS_NEEDED are installed """ print("# Checking dependencies") for tool in TOOLS_NEEDED: print("[+] Checking %s... " % tool, end='') if which(tool) is not None: print("ok!") else: print("missing!") sys.exit() print() print("[+] Dependencies ok !") print()
5,352,001
def feedback(request): """ Feedback page. Here one can send feedback to improve the website further. """ return render(request, "groundfloor/common/feedback.html", context = None)
5,352,002
def AddCreateArgsToParser(parser): """Add flags for creating a node template to the argument parser.""" parser.add_argument( '--description', help='An optional description of this resource.') parser.add_argument( '--node-affinity-labels', metavar='KEY=VALUE', type=arg_parsers.ArgDict( key_type=labels_util.KEY_FORMAT_VALIDATOR, value_type=labels_util.VALUE_FORMAT_VALIDATOR), action=arg_parsers.UpdateAction, help='Labels to use for node affinity, which will be used in instance ' 'scheduling. This corresponds to the `--node-affinity` flag on ' '`compute instances create` and `compute instance-templates ' 'create`.') node_type_group = parser.add_group(mutex=True, required=True) node_type_group.add_argument( '--node-type', help="""\ The node type to use for nodes in node groups using this template. The type of a node determines what resources are available to instances running on the node. See the following for more information: $ {grandparent_command} node-types list""") node_type_group.add_argument( '--node-requirements', type=arg_parsers.ArgDict( spec={ 'vCPU': _IntOrAny(), 'memory': _BinarySizeOrAny('MB'), 'localSSD': _BinarySizeOrAny('GB'), }), help="""\ The requirements for nodes. Google Compute Engine will automatically choose a node type that fits the requirements on Node Group creation. If multiple node types match your defined criteria, the NodeType with the least amount of each resource will be selected. You can specify 'any' to indicate any non-zero value for a certain resource. The following keys are allowed: *vCPU*:::: The number of committed cores available to the node. *memory*:::: The amount of memory available to the node. This value should include unit (eg. 3072MB or 9GB). If no units are specified, *MB is assumed*. *localSSD*:::: Optional. The amount of SSD space available on the node. This value should include unit (eg. 3072MB or 9GB). If no units are specified, *GB is assumed*. If this key is not specified, local SSD is unconstrained. """)
5,352,003
def fill_nodata_image(dataset: xr.Dataset) -> Tuple[np.ndarray, np.ndarray]: """ Interpolate no data values in image. If no mask was given, create all valid masks :param dataset: Dataset image :type dataset: xarray.Dataset containing : - im : 2D (row, col) xarray.DataArray :return: a Tuple that contains the filled image and mask :rtype: Tuple of np.ndarray """ if 'msk' in dataset: img, msk = interpolate_nodata_sgm(dataset['im'].data, dataset['msk'].data) else: msk = np.full((dataset['im'].data.shape[0], dataset['im'].data.shape[1]), int(dataset.attrs['valid_pixels'])) img = dataset['im'].data return img, msk
5,352,004
def find(value, a_list): """ TestCase for find >>> find(26, [12,14]) True >>> find(40, [14, 15, 16, 4, 6, 5]) False >>> find(1, [1]) False >>> find(1, []) False >>> find(4, [2, 3, 2]) True """ # 现将列表变为<value, index>字典 if a_list is None or len(a_list) < 2: return False d = {} for i in range(len(a_list)): if d.has_key(a_list[i]): d[a_list[i]] = d[a_list[i]] + 1 else: d[a_list[i]] = 1 # 第二次遍历 for i in a_list: if d.has_key(value-i): # 排除自己本身 x = value == i*2 if(not (x and d[i] == 1)): return True return False
5,352,005
def insert_from_query( conn: connection, relation: LoadableRelation, table_name: Optional[TableName] = None, columns: Optional[Sequence[str]] = None, query_stmt: Optional[str] = None, dry_run=False, ) -> None: """ Load data into table from its query (aka materializing a view). The table name, query, and columns may be overridden from their defaults, which are the values from the relation. """ if table_name is None: table_name = relation.target_table_name if columns is None: columns = relation.unquoted_columns if query_stmt is None: query_stmt = relation.query_stmt insert_func = partial( etl.dialect.redshift.insert_from_query, conn, table_name, columns, query_stmt, dry_run=dry_run ) if relation.in_transaction: insert_func() else: call_with_retry(etl.config.get_config_int("arthur_settings.insert_data_retries"), insert_func)
5,352,006
def transform_bbox( bbox, source_epsg_code, target_epsg_code, all_coords=False ): """ Transform bbox from source_epsg_code to target_epsg_code, if necessary :returns np.array of shape 4 which represent the two coordinates: left, bottom and right, top. When `all_coords` is set to `True`, a np.array of shape 8 is given which represents coords of the bbox in the following order: left top, right top, right bottom, left bottom """ if source_epsg_code != target_epsg_code: # XXX: Not entirely sure whether transformations between two projected # coordinate systems always do retain the rectangular shape of a bbox. # Transformations between an unprojected system (e.g. WGS84) and a # projected system (e.g. RDNEW) will experience distortion: the # resulting shape cannot be accurately represented by top left # and bottom right. source_srs = get_spatial_reference(source_epsg_code) target_srs = get_spatial_reference(target_epsg_code) if source_srs.IsProjected() != target_srs.IsProjected(): msg = "Transforming a bbox from %s to %s is inaccurate." logger.warning(msg, source_epsg_code, target_epsg_code) # Transform to [[left, right],[top, bottom]] input_x = [bbox[BBOX_LEFT], bbox[BBOX_RIGHT]] input_y = [bbox[BBOX_TOP], bbox[BBOX_BOTTOM]] if all_coords: input_x += [bbox[BBOX_RIGHT], bbox[BBOX_LEFT]] input_y += [bbox[BBOX_TOP], bbox[BBOX_BOTTOM]] bbox_trans = np.array( transform_xys( np.array(input_x), np.array(input_y), source_epsg_code, target_epsg_code ) ) if all_coords: bbox = np.array([ bbox_trans[0][0], bbox_trans[1][0], # left_top bbox_trans[0][2], bbox_trans[1][2], # right_top bbox_trans[0][1], bbox_trans[1][1], # right_bottom bbox_trans[0][3], bbox_trans[1][3] # left_bottom ]) else: # Transform back to [left,bottom,right,top] bbox = np.array( [min(bbox_trans[0]), min(bbox_trans[1]), # left_bottom max(bbox_trans[0]), max(bbox_trans[1]) # right_top ] ) return bbox
5,352,007
def distance_metric(vector1, vector2): """ Returns a score value using Jaccard distance Args: vector1 (np.array): first vector with minHash values vector2 (np.array): second vector with minHash values Returns: float: Jaccard similarity """ return distance.pdist(np.array([vector1,vector2]), 'jaccard').sum()
5,352,008
def update_calendar(request): """ to update an entry to the academic calendar to be updated. @param: request - contains metadata about the requested page. @variables: from_date - The starting date for the academic calendar event. to_date - The ending date for the academic caldendar event. desc - Description for the academic calendar event. prev_desc - Description for the previous event which is to be updated. get_calendar_details = Get the object of the calendar instance from the database for the previous Description. """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') calendar = Calendar.objects.all() context= { 'academic_calendar' :calendar, 'tab_id' :['4','1'] } if request.method == "POST": try: from_date = request.POST.getlist('from_date') to_date = request.POST.getlist('to_date') desc = request.POST.getlist('description')[0] prev_desc = request.POST.getlist('prev_desc')[0] from_date = from_date[0].split('-') from_date = [int(i) for i in from_date] from_date = datetime.datetime(*from_date).date() to_date = to_date[0].split('-') to_date = [int(i) for i in to_date] to_date = datetime.datetime(*to_date).date() get_calendar_details = Calendar.objects.all().filter(description=prev_desc).first() get_calendar_details.description = desc get_calendar_details.from_date = from_date get_calendar_details.to_date = to_date get_calendar_details.save() except Exception as e: from_date="" to_date="" desc="" return render(request, "ais/ais.html", context) return render(request, "ais/ais.html", context)
5,352,009
def get_section(entry: LogEntry) -> str: """returns the section of the request (/twiki/bin/edit/Main -> /twiki)""" section = entry.request.split('/')[:2] return '/'.join(section)
5,352,010
def reverse_lookup(d, v): """ Reverse lookup all corresponding keys of a given value. Return a lisy containing all the keys. Raise and exception if the list is empty. """ l = [] for k in d: if d[k] == v: l.append(k) if l == []: raise ValueError else: return l
5,352,011
def compile_channels_table(*, channels_meta, sources, detectors, wavelengths): """Compiles a NIRSChannelsTable given the details about the channels, sources, detectors, and wavelengths. """ table = NIRSChannelsTable() for channel_id, channel in channels_meta.items(): source_label = sources.label[channel["source_idx"]] detector_label = detectors.label[channel["detector_idx"]] source_wavelength = wavelengths[channel["wavelength_idx"]] table.add_row( label=f"{source_label}_{detector_label} {source_wavelength:.0f}", source=channel["source_idx"], detector=channel["detector_idx"], source_wavelength=source_wavelength, ) table.source.table = sources table.detector.table = detectors return table
5,352,012
def cpe2pkg_tool(): """Unsupported ecosystem CVE fixture.""" bin = Path(__file__).parent.parent / Path('tools/bin/cpe2pkg.jar') if bin.exists(): return str(bin) else: raise RuntimeError('`cpe2pkg.jar` is not available, please run `make build-cpe2pkg once.`')
5,352,013
def put_block_public_access_configuration(BlockPublicAccessConfiguration=None): """ Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide . See also: AWS API Documentation Exceptions :example: response = client.put_block_public_access_configuration( BlockPublicAccessConfiguration={ 'BlockPublicSecurityGroupRules': True|False, 'PermittedPublicSecurityGroupRuleRanges': [ { 'MinRange': 123, 'MaxRange': 123 }, ] } ) :type BlockPublicAccessConfiguration: dict :param BlockPublicAccessConfiguration: [REQUIRED]\nA configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration . By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating BlockPublicSecurityGroupRules to remove the exception.\n\nNote\nFor accounts that created clusters in a Region before November 25, 2019, block public access is disabled by default in that Region. To use this feature, you must manually enable and configure it. For accounts that did not create an EMR cluster in a Region before this date, block public access is enabled by default in that Region.\n\n\nBlockPublicSecurityGroupRules (boolean) -- [REQUIRED]Indicates whether EMR block public access is enabled (true ) or disabled (false ). By default, the value is false for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true .\n\nPermittedPublicSecurityGroupRuleRanges (list) --Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for PermittedPublicSecurityGroupRuleRanges , Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.\nBy default, Port 22, which is used for SSH access to the cluster EC2 instances, is in the list of PermittedPublicSecurityGroupRuleRanges .\n\n(dict) --A list of port ranges that are permitted to allow inbound traffic from all public IP addresses. To specify a single port, use the same value for MinRange and MaxRange .\n\nMinRange (integer) -- [REQUIRED]The smallest port number in a specified range of port numbers.\n\nMaxRange (integer) --The smallest port number in a specified range of port numbers.\n\n\n\n\n\n\n :rtype: dict ReturnsResponse Syntax{} Response Structure (dict) -- Exceptions EMR.Client.exceptions.InternalServerException EMR.Client.exceptions.InvalidRequestException :return: {} :returns: EMR.Client.exceptions.InternalServerException EMR.Client.exceptions.InvalidRequestException """ pass
5,352,014
def spawn_thread(func, *args, **kwds): """ Utility function for creating and starting a daemonic thread. """ thr = threading.Thread(target=func, args=args, kwargs=kwds) thr.setDaemon(True) thr.start() return thr
5,352,015
def decision(question): """Asks user for a question returning True/False answed""" if sys.version_info[0] < 3: if raw_input("\n%s [Y/n] " % question) in ["", "y", "Y"]: return True else: if input("\n%s [Y/n] " % question) in ["", "y", "Y"]: return True return False
5,352,016
def _fail(msg): """Output failure message when auto configuration fails.""" fail("%s %s\n" % (red("ROS Confiugation Error:"), msg))
5,352,017
def get_thread_replies(parent_id): """ Get all replies to a thread If the thread does not exist, return an empty list :param parent_id: Thread ID :return: replies to thread """ assert type(parent_id) is uuid.UUID, """parent_id is not correct type""" reply_query = Query() results = db.search(reply_query.parent == str(parent_id)) return results
5,352,018
def compute_acc(pred, labels): """ Compute the accuracy of prediction given the labels. """ return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred)
5,352,019
async def read_update_status() -> str: """Read update status.""" return ( await cache.get(Config.update_status_id()) if await cache.exists(Config.update_status_id()) else "ready_to_update" )
5,352,020
def _login_and_select_first_active_device(api): """Login Erie Connect and select first active device""" # These do i/o _LOGGER.debug(f'{DOMAIN}: erie_connect.login()') api.login() _LOGGER.debug(f'{DOMAIN}: erie_connect.select_first_active_device()') api.select_first_active_device() if ( api.device is None or api.auth is None ): raise InvalidData return api.device.id
5,352,021
def stopall(qthread: Thread, target: str) -> None: """Stops child processes and queue thread.""" stop_processes(target) sleep(5) qthread.join()
5,352,022
def PCopy (inFA, err): """ Make copy an GPUFArray returns copy * inFA = input Python GPUFArray * err = Python Obit Error/message stack """ ################################################################ # Checks if not PIsA(inFA): print("Actually ",inFA.__class__) raise TypeError("inFA MUST be a Python Obit GPUFArray") outFA = GPUFArray("None") outFA.me = Obit.GPUFArrayCopy (inFA.me, outFA.me, err.me); if err.isErr: OErr.printErrMsg(err, "Error copying GPUFArray") return outFA # end PCopy
5,352,023
def _get_statuses(policy_type_id, policy_instance_id): """ shared helper to get statuses for an instance """ _instance_is_valid(policy_type_id, policy_instance_id) prefixes_for_handler = "{0}{1}.{2}.".format(HANDLER_PREFIX, policy_type_id, policy_instance_id) return list(SDL.find_and_get(A1NS, prefixes_for_handler).values())
5,352,024
def _interval_example(avg_price_with_interval): """# Plot the data ordered by the numerical axis""" ch = chartify.Chart(blank_labels=True, x_axis_type='categorical') ch.set_title("Interval plots") ch.set_subtitle( "Represent variation. Optional `middle_column` to mark a middle point." ) ch.plot.interval( data_frame=avg_price_with_interval, categorical_columns='fruit', lower_bound_column='lower_ci', upper_bound_column='upper_ci', middle_column='mean') ch.show(_OUTPUT_FORMAT)
5,352,025
def phase_lines(graph): """ Determines the phase lines of a graph. :param graph: Graph :return: dictionary with node id : phase in cut. """ if has_cycles(graph): raise ValueError("a cyclic graph will not have phaselines.") phases = {n: 0 for n in graph.nodes()} q = graph.nodes(in_degree=0) while q: n = q.pop(0) level = phases[n] children = graph.nodes(from_node=n) for c in children: if phases[c] <= level: phases[c] = level + 1 q.append(c) return phases
5,352,026
def creds() -> Account: """Load or obtain credentials for user.""" credentials = "8da780f3-5ea0-4d97-ab13-9e7976370624" protocol = MSGraphProtocol(timezone="Europe/Stockholm") scopes = protocol.get_scopes_for(SCOPES) token_backend = FileSystemTokenBackend( token_path=os.path.dirname(__file__), token_filename="o365_token.txt" ) connection = Connection( credentials, auth_flow_type="public", token_backend=token_backend ) account = Account( credentials, auth_flow_type="public", protocol=protocol, token_backend=token_backend, ) if ( not os.path.exists("kronoxToGCalendar/logic/o365_token.txt") and not account.is_authenticated ): print("AUTH TRIGGERED") auth_url = connection.get_authorization_url( requested_scopes=scopes, redirect_uri="https://kronox-client-api.herokuapp.com/return_token_url", ) webbrowser.open_new(auth_url[0]) token_req = lambda: requests.get( "https://kronox-client-api.herokuapp.com/get_token_url" ) while token_req().text == "None": continue token_res_arr = token_req().text.split("&") print(token_res_arr) token_code = token_res_arr[0].split("?")[1][5:] token_state = token_res_arr[1][6:] token_url = ( "https://login.microsoftonline.com/common/oauth2/nativeclient?code=" + token_code + "&state=" + token_state ) connection.request_token(token_url) print("AUTH PASSED") account.is_authenticated return account
5,352,027
def b64decode_str(b64string): """ Decodes an arbitrary string from a base 64 ASCII string """ output = base64.b64decode(b64string).decode("UTF-8") logger.debug("Decoded %s as %s", b64string, output) return output
5,352,028
def BCELossConfig(argument_parser): """ Set CLI arguments :param argument_parser: argument parser :type argument_parser: ```ArgumentParser``` :returns: argument_parser :rtype: ```ArgumentParser``` """ argument_parser.description = """Creates a criterion that measures the Binary Cross Entropy between the target and the output: The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: .. math:: \\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad l_n = - w_n \\left[ y_n \\cdot \\log x_n + (1 - y_n) \\cdot \\log (1 - x_n) \\right], where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then .. math:: \\ell(x, y) = \\begin{cases} \\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\ \\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.} \\end{cases} This is used for measuring the error of a reconstruction in for example an auto-encoder. Note that the targets :math:`y` should be numbers between 0 and 1. Notice that if :math:`x_n` is either 0 or 1, one of the log terms would be mathematically undefined in the above loss equation. PyTorch chooses to set :math:`\\log (0) = -\\infty`, since :math:`\\lim_{x\\to 0} \\log (x) = -\\infty`. However, an infinite term in the loss equation is not desirable for several reasons. For one, if either :math:`y_n = 0` or :math:`(1 - y_n) = 0`, then we would be multiplying 0 with infinity. Secondly, if we have an infinite loss value, then we would also have an infinite term in our gradient, since :math:`\\lim_{x\\to 0} \\frac{d}{dx} \\log (x) = \\infty`. This would make BCELoss's backward method nonlinear with respect to :math:`x_n`, and using it for things like linear regression would not be straight-forward. Our solution is that BCELoss clamps its log function outputs to be greater than or equal to -100. This way, we can always have a finite loss value and a linear backward method. Shape: - Input: :math:`(N, *)` where :math:`*` means, any number of additional dimensions - Target: :math:`(N, *)`, same shape as the input - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same shape as input. Examples:: >>> m = nn.Sigmoid() >>> loss = nn.BCELoss() >>> input = torch.randn(3, requires_grad=True) >>> target = torch.empty(3).random_(2) >>> output = loss(m(input), target) >>> output.backward()""" argument_parser.add_argument( "--weight", help="""a manual rescaling weight given to the loss of each batch element. If given, has to be a Tensor of size `nbatch`.""", required=True, ) argument_parser.add_argument( "--size_average", type=bool, help="""Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``.""", default=True, ) argument_parser.add_argument( "--reduce", type=bool, help="""Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`.""", default=True, ) argument_parser.add_argument( "--reduction", help="""Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`.""", required=True, default="mean", ) argument_parser.add_argument( "--__constants__", type=str, action="append", required=True, default="reduction" ) return argument_parser
5,352,029
def gen_cities_avg(climate, multi_cities, years): """ Compute the average annual temperature over multiple cities. Args: climate: instance of Climate multi_cities: the names of cities we want to average over (list of str) years: the range of years of the yearly averaged temperature (list of int) Returns: a pylab 1-d array of floats with length = len(years). Each element in this array corresponds to the average annual temperature over the given cities for a given year. """ # MY_CODE return np.array([np.mean([np.mean(climate.get_yearly_temp(city, year)) for city in multi_cities]) for year in years])
5,352,030
def tangentVectorsOnSphere( points, northPole = np.array([0.0,0.0,1.0]) ): """ Acquire a basis for the tangent space at given points on the surface of the unit sphere. :param points: N x 3 array of N points at which to acquire basis of tangent space. :param northPole: 3 array of point corresponding to the north pole. :return A N x 3 x 3 array. Each point has three orthogonal tangent vectors of unit length. They are constructed such as the first vector is pointing towards the 'northPole'. The second vector is orthogonal to both the first vector and the vector from the origin to the point of interest. The third vector is equal to the vector between the origin and the point of interest. The last dimension represent the elements of the vectors. The next to last dimension indices the vectors """ vectors = np.zeros( (points.shape[0], 3,3) ) # Get third vector vectors[:, 2, :] = points / np.linalg.norm(points, axis= 1).reshape((-1,1)) # Get second vector vectors[:, 1, :] = np.cross( northPole.reshape( (1,3) ), vectors[:,2,:] ) # Get first vector vectors[:, 0, :] = np.cross( vectors[:,2,:], vectors[:,1,:] ) # Normalize vectors lengths = np.linalg.norm( vectors, axis=2 ).reshape((-1, 3)) inds = np.any( lengths == 0.0, axis=1 ) vectors[inds, :, : ] = np.nan vectors[~inds, :, :] = vectors[~inds, :, :] / lengths[~inds, :].reshape( (-1,3,1) ) return vectors
5,352,031
def _colorvar_patch_destroy(fn): """Internal function.\n Deletes the traces if any when widget is destroy.""" def _patch(self): """Interanl function.""" if self._tclCommands is not None: # Deletes the widget from the _all_traces_colorvar # and deletes the traces too. for key, value in dict(_all_traces_colorvar).items(): if self == key[0]: var, cbname = value try: var.trace_vdelete('w', cbname) except tkinter.TclError: pass _all_traces_colorvar.pop(key) return fn(self) return _patch
5,352,032
def menu(screen): """prints the menu on the main screen and intiates all exeuctions Args: screen (screen object): object to refrence screen """ global viewMode global dayDelta global todayDate global calName while True: screen.print_at( "Day(0) | Week (1) | Add(a) | Delete(d) | View/Edit(v) | Previous(p) | Next (n) | Go To Date(g) | Quit(q)", 0, screen.height-1, 2) ev = screen.get_key() if ev == ord("0"): dayViewExe(screen) break elif ev == ord("1"): weekViewExe(screen) break elif ev == ord("a"): addEvent(screen) refreshMain(screen) break elif ev == ord("d"): delEvent(screen) refreshMain(screen) break elif ev == ord("v"): viewEvent(screen) refreshMain(screen) break elif ev == ord("p"): if viewMode == 0: dayDelta -= 1 dayViewExe(screen) elif viewMode == 1: dayDelta -= 7 weekViewExe(screen) break elif ev == ord("n"): if viewMode == 0: dayDelta += 1 dayViewExe(screen) elif viewMode == 1: dayDelta += 7 weekViewExe(screen) break elif ev == ord("g"): screen.print_at("Enter date:", 0, screen.height-2, 1) screen.refresh() tempString = crapInput(screen, 12, screen.height-2) tempDate = datetime.datetime.strptime(tempString, "%d/%m/%Y") tempDate = tempDate.date() if todayDate == tempDate: dayDelta = 0 elif todayDate > tempDate: dayDelta = 0-abs((tempDate-todayDate).days) elif todayDate < tempDate: dayDelta = 0+abs((tempDate-todayDate).days) if viewMode == 0: dayViewExe(screen) elif viewMode == 1: weekViewExe(screen) break elif ev == ord("q"): saveCal(calName) quit() screen.refresh()
5,352,033
def dispatch_files(dispatch_dir='dump'): """ Moves audio files from 'dump' directory to proper audio sub-folders. --- IN dispatch_dir: directory from which to start if not 'dump' NO OUT """ root = '/Users/dluther/ds/metis/metisgh/projects/05-kojak/audio/' for file in os.listdir('../audio/' + dispatch_dir): fpath_current = root + dispatch_dir + '/' + file if not re.match(r'^\w{2}\d{4}.wav', file): print("No match:", file) continue if re.match('^ns', file): target = 'no_sax/' if re.match('^sc', file): target = 'sax_sec/' if re.match('^ss', file): target = 'sax_solo/' os.rename(fpath_current, root + target + file)
5,352,034
def distros_for_filename(filename, metadata=None): """Yield possible egg or source distribution objects based on a filename""" return distros_for_location( normalize_path(filename), os.path.basename(filename), metadata )
5,352,035
def tsne( features: np.ndarray, labels: np.ndarray, fp: FreePlot, index: Union[Tuple[int], str] = (0, 0), fontsize: Union[int, str] = 'large', annotate: bool = False, style: Union[str, Iterable[str]] = 'bright', **kwargs: "other kwargs of fp.scatterplot" ) -> None: """ Args: features: n x d labels: n Kwargs: fontsize: large annot: bool, If True, add labels on the scatters. """ from sklearn.manifold import TSNE data_embedded = TSNE(n_components=2, learning_rate=10, n_iter=1000).fit_transform(features) fp[index].set_xticks([]) fp[index].set_yticks([]) data = pd.DataFrame( { "x": data_embedded[:, 0], "y": data_embedded[:, 1], "label": labels } ) for label in np.unique(labels): event = data.loc[data['label']==label] x = event['x'] y = event['y'] if annotate: x_mean = x.median() y_mean = y.median() plt.text(x_mean, y_mean, label, fontsize=fontsize) fp.scatterplot(x, y, index, label=label, s=1.5, edgecolors="none", style=style, **kwargs) sns.despine(left=True, bottom=True)
5,352,036
def control(_): """ Provides a control scenario for testing. In this case, none of the functions share any overhead, so this function is empty. :param _: a placeholder for the int input :return: None """ pass
5,352,037
def pres_from_hybrid(psfc, hya, hyb, p0=100000.): """Return pressure field on hybrid-sigma coordinates, assuming formula is p = a(k)*p0 + b(k)*ps. """ return hya*p0 + hyb*psfc
5,352,038
def run(): """ Warning system 1) Obtain a list of stations where the current relative level is already high (Threshold set at 1) 2) For these stations, query the past 2 days data and find their fitting curves respectively 3) From the fitting curves, predict the water level in the next hour """ stations = build_station_list() update_water_levels(stations) list_of_stations_levels = stations_level_over_threshold(stations, 1) # Note list of tuples dt=2 t=176400 # next hour from now - it is number of seconds from two days ago s_tol = 10 h_tol = 3 m_tol = 1 risk = 0 severe_warnings = [[],[],[]] for station, _ in list_of_stations_levels: dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt)) if dates != None and levels != None: coeffs = polyfit(dates, levels, 3) poly = np.poly1d(coeffs) rel_level = relt_level(poly(t), station) if rel_level != None: if rel_level > s_tol: # risk = "Severe" severe_warnings[0].append((station.name,rel_level)) elif rel_level > h_tol: # risk = "High" severe_warnings[1].append((station.name,rel_level)) elif rel_level > m_tol: # risk = "Moderate" severe_warnings[2].append((station.name,rel_level)) print("WARNING: These stations have a SEVERE risk of flooding:\n"+str(severe_warnings[0])) print("\n\n") print("WARNING: These stations have a HIGH risk of flooding:\n"+str(severe_warnings[1])) print("\n\n") print("WARNING: These stations have a MODERATE risk of flooding:\n"+str(severe_warnings[2]))
5,352,039
def url_exist(file_url): """ Check if an url exist Parameters ---------- file_url : string url of www location Returns ------- verdict : dtype=boolean verdict if present """ try: urllib.request.urlopen(file_url).code == 200 return True except: return False
5,352,040
def chi_square(observed, expected): """ Compute the chi square test """ from scipy import stats # glen cowan pp61 temp = [] for (n, nu) in zip(observed, expected): if nu != 0: temp += [((n - nu) ** 2) / nu] # compute p value mychi = sum(temp) p = stats.chi2.sf(mychi, len(temp)) return mychi, p
5,352,041
def fix_deform_for_children(pmx: pmxstruct.Pmx, me: int, already_visited=None) -> int: """ Recursively ensure everything that inherits from the specified bone will deform after it. Only cares about parent and partial-inherit, doesnt try to understand IK groups. Return the number of bones that were changed. :param pmx: full PMX object :param me: int index of current bone :param already_visited: leave empty, used to prevent recursive looping :return: number of bones that were changed """ def guarantee_good_relationship(parent: int, child: int) -> bool: # check & fix the relationship between these two bones # if the deform layers are improper, then fix them and return True # if the deform layers are proper, then return False # todo: do i care about deform_after_phys? child_deform = pmx.bones[child].deform_layer parent_deform = pmx.bones[parent].deform_layer if child < parent: # if the child has lower index than parent, then the child MUST have greater deform_layer if child_deform <= parent_deform: pmx.bones[child].deform_layer = pmx.bones[parent].deform_layer + 1 return True else: return False elif child > parent: # if the child has greater index than parent, then the child MUST have greater (or equal) deform_layer if child_deform < parent_deform: pmx.bones[child].deform_layer = pmx.bones[parent].deform_layer return True else: return False else: # if child == parent, idk? don't change anything tho return False retme = 0 # safety system to prevent infinite recursion: if already_visited is None: already_visited = set() if me in already_visited: return 0 else: already_visited.add(me) # check every single bone to find the ones that inherit from "me" for d,bone in enumerate(pmx.bones): # if bone d is inheriting from "me", if (bone.parent_idx == me) or ((bone.inherit_rot or bone.inherit_trans) and (bone.inherit_parent_idx == me)): # check/fix their relationship. return True if something was changed. if guarantee_good_relationship(me, d): # the check also fixes it, all thats left is to recurse print(d) retme += 1 retme += fix_deform_for_children(pmx, d, already_visited) return retme
5,352,042
def theta_8(p, q, h, phi, a, b): """Lower limit of integration for the case rho > a, rho > b.""" result = np.arctan(r_8(p, q, phi, a, b)/h) return(result)
5,352,043
def test_db(app): """ Setup database, this only gets executed once per module. :param app: Pytest fixture :return: SQLAlchemy database session """ _db.drop_all() _db.create_all() yield _db _db.session.remove() _db.drop_all()
5,352,044
def run(config): """start http exporter server""" start_http_server(int(config.get("port", 9108))) REGISTRY.register(VcenterCollector(config.get("collector"))) while True: time.sleep(1)
5,352,045
def test_add_se_beta() -> None: """ Test arguments for beta column. @return: None """ se_beta = str(uuid.uuid4()) arguments = parse_harness([FLAG_SE_BETA, se_beta], add_se_beta_value_flag) assert arguments.se_beta == se_beta assert parse_harness([], add_se_beta_value_flag).se_beta == OUTPUT_COLUMN_SE_BETA
5,352,046
def collect_genewise(fst_file, file_name, gene_names, gene_to_fst): """take in the file name, opens it. populates a dictionary to [gene] = fst file_name = defaultdict(str) FBgn0031208 500000 16 0.002 21.0 1:2=0.05752690 """ file_name = file_name.split("_gene")[0] f_in = open(fst_file, "r") for line in f_in: if test_line(line): data = line.split() if "1:2=" in line: gene = data[0].strip() gene_names.add(gene) fst = data[5].strip() fst = fst.split("=")[1] data = "%s\t%s" % (gene, fst) gene_to_fst[file_name].append(data) return gene_to_fst, gene_names
5,352,047
def bbox_overlaps_batch(anchors, gt_boxes): """ :param anchors: (N, 4) ndarray of float :param gt_boxes: (b, K, 5) ndarray of float :return: (N, K) ndarray of overlap between boxes and query_boxes """ batch_size = gt_boxes.size(0) if anchors.dim() == 2: N = anchors.size(0) K = gt_boxes.size(1) anchors = anchors.view(1, N, 4).expand(batch_size, N, 4).contiguous() gt_boxes = gt_boxes[:,:,:4].contiguous() gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1) gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1) gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K) anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1) anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1) anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1) gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1) anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1) boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4) query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4) iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) - torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1) iw[iw < 0] = 0 ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) - torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1) ih[ih < 0] = 0 ua = anchors_area + gt_boxes_area - (iw * ih) overlaps = iw * ih / ua # mask the overlap here. overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0) overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1) elif anchors.dim() == 3: N = anchors.size(1) K = gt_boxes.size(1) if anchors.size(2) == 4: anchors = anchors[:,:,:4].contiguous() else: anchors = anchors[:,:,1:5].contiguous() gt_boxes = gt_boxes[:,:,:4].contiguous() gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1) gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1) gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K) anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1) anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1) anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1) gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1) anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1) boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4) query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4) iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) - torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1) iw[iw < 0] = 0 ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) - torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1) ih[ih < 0] = 0 ua = anchors_area + gt_boxes_area - (iw * ih) overlaps = iw * ih / ua # mask the overlap here. overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0) overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1) else: raise ValueError('anchors input dimension is not correct.') return overlaps
5,352,048
def extract_curve_and_test(curve_names: str, name: str) -> Tuple[str, Callable[[Any], bool]]: """Return a curve and a test to apply for which of it's components to twist.""" twist_match = re.match(rf"(?P<curve>[{curve_names}])_(?P<n>-?\d+)$", name) twist_index_match = re.match(rf"(?P<curve>[{curve_names}])\[ *(?P<n>-?\d+) *\]$", name) twist_slice_match = re.match(rf"(?P<curve>[{curve_names}])(\[ *(?P<start>-?\d*) *: *(?P<stop>-?\d*) *(: *(?P<step>-?\d*) *)?\])?$", name) twist_expr_match = re.match(rf"(?P<curve>[{curve_names}])\{{(?P<expr>.*)\}}$", name) if twist_match is not None: parameters = twist_match.groupdict() curve = parameters["curve"] n = int(parameters["n"]) test = lambda edge: edge == n elif twist_index_match is not None: parameters = twist_index_match.groupdict() curve = parameters["curve"] n = int(parameters["n"]) test = lambda edge: edge == n elif twist_slice_match is not None: parameters = twist_slice_match.groupdict() curve = parameters["curve"] start = int(parameters["start"]) if parameters["start"] else -inf stop = int(parameters["stop"]) if parameters["stop"] else inf step = int(parameters["step"]) if parameters["step"] else 1 test = lambda edge: start <= edge < stop and (edge % step == (0 if start == -inf else start % step)) elif twist_expr_match is not None: parameters = twist_expr_match.groupdict() curve = parameters["curve"] test = lambda n: eval(parameters["expr"], {"n": n, **globals()}) # pylint: disable=eval-used else: raise ValueError(f"Unknown mapping class {name}") return curve, test
5,352,049
def test_filt_tx_from_proto(): """ Tests FilteredTX.from_proto """ proto = FilteredTXProto( txid='09876', type=HeaderType.MESSAGE, tx_validation_code=TxValidationCode.VALID, transaction_actions=FilteredTransactionActions() ) print(proto.transaction_actions) assert FilteredTX.from_proto(proto) == FilteredTX( tx_id='09876', type=TransactionType.Message, tx_validation_code=TxValidationCode.VALID, actions=FilteredTransactionActions() )
5,352,050
def test_you_can_build_a_time_series() -> None: """ This is a Hidden Markov Model - see for example http://mlg.eng.cam.ac.uk/zoubin/papers/ijprai.pdf ... --> X[t-1] --> X[t] --> ... | | Y[t-1] Y[t] """ x_label = "x" y_label = "y" num_items = 10 initial_x = 1. def create_time_step(sequence_item): x_previous = sequence_item.add_double_proxy_for(x_label) x = Exponential(x_previous) y = Poisson(x) sequence_item.add(x, label=x_label) sequence_item.add(y, label=y_label) sequence = Sequence(initial_state={x_label: initial_x}, count=num_items, factories=create_time_step) assert sequence.size() == num_items x_from_previous_step = None x_previous_label = Sequence.proxy_label_for(x_label) for item in sequence: x_previous_proxy = item.get(x_previous_label) x = item.get(x_label) y = item.get(y_label) if x_from_previous_step is None: assert [p.get_value() for p in x_previous_proxy.iter_parents()] == [initial_x] else: assert [p.get_id() for p in x_previous_proxy.iter_parents()] == [x_from_previous_step.get_id()] assert [p.get_id() for p in x.iter_parents()] == [x_previous_proxy.get_id()] assert [p.get_id() for p in y.iter_parents()] == [x.get_id()] x_from_previous_step = x
5,352,051
def psf_gaussian(psf_shape, psf_waist, psf_physical_size=1, psf_nphoton=2): """Return 3D gaussian approximation of PSF.""" def f(index): s = psf_shape[index] // 2 * psf_physical_size c = numpy.linspace(-s, s, psf_shape[index]) c *= c c *= -2.0 / (psf_waist[index] * psf_waist[index]) return c psf = numpy.exp( numpy.sum( numpy.meshgrid(f(0), f(1), f(2), indexing='ij', sparse=False), axis=0, ) ) if psf_nphoton != 1: numpy.power(psf, psf_nphoton, out=psf) return psf
5,352,052
def get_employee_record(id_num): """Gets an employee's details if record exists. Arguments: id_num -- ID of employee record to fetch """ if not id_num in names or not id_num in cities: return 'Error viewing record' return f'{id_num} {names[id_num]} {cities[id_num]}'
5,352,053
def test_permissions_actions(api): """ Получение списка действий по контроллеру :param api: :return: """ data = api.call( 'PermissionAction', 'read', filter=[{ 'property': '_', 'operator': '_', 'value': 'UserRole' }], _web_session_id=ADMIN_SESSION ) data = list(item['action'] for item in data) assert 'get' in data assert 'read' in data assert 'destroy' in data assert 'create' in data assert 'update' in data
5,352,054
def send_exploit(): """ Sends a request with the payload for a remote buffer overflow """ try: with so.socket(so.AF_INET, so.SOCK_STREAM) as s: s.settimeout(5) print_info('Connecting to {}'.format(target)) connect = s.connect_ex((target, port)) # Stop if connection cannot be established if connect != 0: print_error('Connection failed') return # Connection established: send request try: # Catch initial response if any try: print('[*] Received response: ' + str(s.recv(1024))) except so.timeout: pass print_info('Sending evil request with {} bytes'.format(len(buffer))) s.send(buffer) print_success('Done') # Stop on timeout except so.timeout: print_error('Connection failed due to socket timeout') except (BrokenPipeError, ConnectionResetError): print_error('The connection was closed while sending the payload')
5,352,055
def list_aliases(): """ Gets the list of aliases for the current account. An account has at most one alias. :return: The list of aliases for the account. """ try: response = iam.meta.client.list_account_aliases() aliases = response['AccountAliases'] if len(aliases) > 0: logger.info("Got aliases for your account: %s.", ','.join(aliases)) else: logger.info("Got no aliases for your account.") except ClientError: logger.exception("Couldn't list aliases for your account.") raise else: return response['AccountAliases']
5,352,056
def MapToSingleIncrease(val): """ Need 30 minute values to be sequential for some of the tools(i.e. 1,2,3,4) so using a format like 5,10,15,20 won't work. """ return val/5
5,352,057
def get_columns_for_table(instance, db, table): """ Get a list of columns in a table Args: instance - a hostAddr object db - a string which contains a name of a db table - the name of the table to fetch columns Returns A list of columns """ conn = connect_mysql(instance) cursor = conn.cursor() ret = list() param = {'db': db, 'table': table} sql = ("SELECT COLUMN_NAME " "FROM information_schema.columns " "WHERE TABLE_SCHEMA=%(db)s AND" " TABLE_NAME=%(table)s") cursor.execute(sql, param) for column in cursor.fetchall(): ret.append(column['COLUMN_NAME']) return ret
5,352,058
def registra_aluno(nome, ano_entrada, ano_nascimento, **misc): """Cria a entrada do registro de um aluno.""" registro = {'nome': nome, 'ano_entrada': ano_entrada, 'ano_nascimento': ano_nascimento} for key in misc: registro[key] = misc[key] return registro
5,352,059
def define_output_ports(docstring, short_description_word_count=4): """ Turn the 'Returns' fields into VisTrails output ports Parameters ---------- docstring : NumpyDocString #List of strings? The scraped docstring from the function being autowrapped into vistrails Returns ------- input_ports : list List of input_ports (Vistrails type IPort) """ output_ports = [] idx = 0 # now look at the return Returns section for (the_name, the_type, the_description) in docstring['Returns']: # when the return parameter has no name but only type and description if the_type == '': the_type = the_name the_name = 'def_output' + str(idx) idx += 1 base_type, is_optional = _type_optional(the_type) if is_optional: continue type_base, is_enum, enum_list = _enum_type(the_type) normed_type = None # this is to deal with malformed docstrings like {array, scalar} if is_enum and type_base == 'str': try_norm = _normalize_type(' or '.join(enum_list)) if try_norm is not None: is_enum = False enum_list = [] logger.warning("abuse of enum %s | <%s>|", docstring['Signature'], the_type) normed_type = try_norm # first try to parse if normed_type is None: normed_type = _normalize_type(type_base) # deal with if we fail to parse if normed_type is None: raise AutowrapError("Malformed output type |{}: <{}>|".format( the_name, the_type)) for port_name in (_.strip() for _ in the_name.split(',')): if not port_name: raise AutowrapError("A Port with no name") pdict = {'name': port_name, 'signature': sig_map[normed_type]} output_ports.append(pdict) # some numpy functions lack a Returns section and have and 'output' # optional input (mostly for in-place operations) if len(output_ports) < 1: for (the_name, the_type, the_description) in docstring['Parameters']: if the_name.lower() in ['output', 'out']: the_type, _ = _type_optional(the_type) the_type = _normalize_type(the_type) if the_type is None: # TODO dillify raise AutowrapError("Malformed type") output_ports.append(dict(name=the_name, signature=sig_map[the_type])) return output_ports
5,352,060
def kill(pidfile, logger, signum=signal.SIGTERM): """Sends `signum` to the pid specified by `pidfile`. Logs messages to `logger`. Returns True if the process is not running, or signal was sent successfully. Returns False if the process for the pidfile was running and there was an error sending the signal.""" daemon_pid = read_pid(pidfile, logger) if daemon_pid is None: return True try: send_signal(daemon_pid, signum, logger) return True except OSError as e: if e.errno == errno.ESRCH: logger.warning("Daemon not running (Stale lockfile)") os.remove(pidfile) return True elif e.errno == errno.EPERM: logger.error("Unable to kill %d (EPERM)", daemon_pid) return False raise
5,352,061
def create_small_table(small_dict): """ Create a small table using the keys of small_dict as headers. This is only suitable for small dictionaries. Args: small_dict (dict): a result dictionary of only a few items. Returns: str: the table as a string. """ keys, values = tuple(zip(*small_dict.items())) table = tabulate( [values], headers=keys, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center", ) return table
5,352,062
def get_normalized_list_for_every_month(variable_r, list_of_ranges_r, tags_r): """ :param variable_r: big list with all the data [sizes][months] :param list_of_ranges_r: sorted list of range (sizes...Enormous, etc.) :return: normalized list for each month (numbers are percentage respect to the total bytes/requests in a given month) """ number_of_months = len(tags_r) temp_list = [[] for lil in range(0, number_of_months)] total_requests_in_each_month = [[] for lil in range(0, number_of_months)] maxima_each_month = [[] for lil in range(0, number_of_months)] new_list_normalized = [[] for lil in range(0, number_of_months)] for month in range(0, number_of_months): for ciao in range(0, len(list_of_ranges_r), 1): temp_list[month].append(variable_r[ciao][month]) # change second index to change the month: 0,1,2,...23 for month in range(0, number_of_months): total_requests_in_each_month[month] = float(sum(temp_list[month])) #print("total bytes requested in month 0: %f" % total_requests_in_each_month[0]) # list of maxima for each month for month in range(0, number_of_months): maxima_each_month[month] = max(temp_list[month]) #print("maxima for the first month: %d", maxima_each_month[0]) for month in range(0, number_of_months): for zeta in temp_list[month]: new_list_normalized[month].append((zeta/total_requests_in_each_month[month])*100) return new_list_normalized
5,352,063
def mark_as_possible_cluster_member(g, possible_cluster_member, cluster, confidence, system, uri_ref=None): """ Mark an entity or event as a possible member of a cluster. :param rdflib.graph.Graph g: The underlying RDF model :param rdflib.term.URIRef possible_cluster_member: The entity or event to mark as a possible member of the specified cluster :param rdflib.term.URIRef cluster: The cluster to associate with the possible cluster member :param float confidence: The confidence with which to mark the cluster membership :param rdflib.term.URIRef system: The system object for the system which marked the specified cluster :param str uri_ref: A string URI representation of the cluster member (Default is None) :returns: The cluster membership assertion :rtype: rdflib.term.BNode """ cluster_member_assertion = _make_aif_resource(g, uri_ref, AIDA_ANNOTATION.ClusterMembership, system) g.add((cluster_member_assertion, AIDA_ANNOTATION.cluster, cluster)) g.add((cluster_member_assertion, AIDA_ANNOTATION.clusterMember, possible_cluster_member)) mark_confidence(g, cluster_member_assertion, confidence, system) return cluster_member_assertion
5,352,064
def test_thing_action_run(consumed_exposed_pair): """Actions can be invoked on ConsumedThings using the map-like interface.""" consumed_thing = consumed_exposed_pair.pop("consumed_thing") exposed_thing = consumed_exposed_pair.pop("exposed_thing") @tornado.gen.coroutine def test_coroutine(): action_name = next(six.iterkeys(consumed_thing.td.actions)) input_value = Faker().pystr() result = yield consumed_thing.actions[action_name].invoke(input_value) result_expected = yield exposed_thing.invoke_action(action_name, input_value) assert result == result_expected run_test_coroutine(test_coroutine)
5,352,065
def create_xlsx_for_all_recordings(root_directory: str = ".") -> None: """Traverses subdirectories to analyze multiple recordings. Assumes that any folder with an H5 file in it has only H5 files from a single recording. For simple usage, navigate to the root folder you want to analyze, and run: ``python3 -c "from curibio.sdk import check_if_latest_version, create_xlsx_for_all_recordings; check_if_latest_version(); create_xlsx_for_all_recordings()"`` Args: root_directory: where to start the search. Output excel files will all be created in this folder """ list_of_dirs: List[str] = list() for root, _, files in os.walk(root_directory): if any(file_name.endswith(".h5") for file_name in files): list_of_dirs.append(root) list_of_dirs = sorted(list_of_dirs) # ensure deterministic ordering for test suite total_recording_count = len(list_of_dirs) log_text = f"Analysis of the directory {os.path.abspath(root_directory)} completed. {total_recording_count} total recording directories located." logger.info(log_text) for idx, iter_dir in enumerate(list_of_dirs): log_text = f"Analyzing recording {idx+1} of {total_recording_count}: {os.path.abspath(iter_dir)}" logger.info(log_text) iter_recording = PlateRecording.from_directory(iter_dir) for recording in iter_recording: recording.write_xlsx(root_directory) # pylint:disable=protected-access # TODO (Eli 3/19/21): There were odd errors in Windows CI about files not being closed in the temp directory and so they couldn't be deleted, so temporarily putting in this patch for iter_h5_file in recording._files: iter_h5_file._h5_file.close() # del iter_recording # Eli (3/19/21): Resolve windows error with closing file when it is still open
5,352,066
def twodcontourplot(tadata_nm, tadata_timedelay, tadata_z_corr): """ make contour plot Args: tadata_nm: wavelength array tadata_timedelay: time delay array tadata_z_corr: matrix of z values """ timedelayi, nmi = np.meshgrid(tadata_timedelay, tadata_nm) # find the maximum and minimum # these are used for color bar z_min = np.amin(np.amin(tadata_z_corr, axis=1)) z_max = np.amax(np.amax(tadata_z_corr, axis=1)) return [nmi, timedelayi, z_min, z_max]
5,352,067
def make_example_dags(module_path): """Loads DAGs from a module for test.""" dagbag = DagBag(module_path) return dagbag.dags
5,352,068
def test_ict2(): """Is the ict calculated correctly?""" model_embedding = [{0: 1}] real_embedding = [{1: 1}] context = np.array([[0, 4], [500, 4]]) # calculate Euclidean distance matrix distance_matrix = calc_euclidean(context) # calc d for embeddings vocab_len = len(context) d_model = _calc_d(model_embedding, vocab_len) d_real = _calc_d(real_embedding, vocab_len) assert calc_ict(d_model[0], d_real[0], distance_matrix) == pytest.approx(500)
5,352,069
def texLatticeDeformContext(q=1,e=1,ev="float",ex=1,ch=1,i1="string",i2="string",i3="string",lc="uint",lr="uint",n="string",smm=1,spm=1,ubr=1): """ http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/texLatticeDeformContext.html ----------------------------------------- texLatticeDeformContext is undoable, queryable, and editable. This command creates a context which may be used to deform UV maps with lattice manipulator. This context only works in the texture UV editor. ----------------------------------------- Return Value: int Number of column divisions, when querying the latticeColumns flag. int Number of row divisions, when querying the latticeRows flag. float Value of the deform envelope, when querying the envelope flag. boolean Whether snapping to pixels is on, when querying the snapPixelMode flag. boolean Whether the bounding rectangle is to be used for deforemation, when querying the useBoundingRect flag. In query mode, return type is based on queried flag. ----------------------------------------- Flags: ----------------------------------------- ev : envelope [float] ['query', 'edit'] Specifies the influence of the lattice. ----------------------------------------- ex : exists [boolean] [] Returns true or false depending upon whether the specified object exists. Other flags are ignored. ----------------------------------------- ch : history [boolean] [] If this is a tool command, turn the construction history on for the tool in question. ----------------------------------------- i1 : image1 [string] ['query', 'edit'] First of three possible icons representing the tool associated with the context. ----------------------------------------- i2 : image2 [string] ['query', 'edit'] Second of three possible icons representing the tool associated with the context. ----------------------------------------- i3 : image3 [string] ['query', 'edit'] Third of three possible icons representing the tool associated with the context. ----------------------------------------- lc : latticeColumns [uint] ['query', 'edit'] Specifies the number column points the lattice contains. The maximum size lattice is restricted to 8 columns. ----------------------------------------- lr : latticeRows [uint] ['query', 'edit'] Specifies the number of rows the lattice contains. The maximum size lattice is restricted to 8 rows. ----------------------------------------- n : name [string] [] If this is a tool command, name the tool appropriately. ----------------------------------------- smm : showMoveManipulator [boolean] ['query', 'edit'] Specifies whether show move manipulator in UV Editor ----------------------------------------- spm : snapPixelMode [boolean] ['query', 'edit'] Specifies the influenced uv points should be snapped to a pixel center or corner. ----------------------------------------- ubr : useBoundingRect [boolean] When constructing the lattice use the bounding box of the selected UVs for the extents of the lattice. When this is disabled the extents of the marquee selections are used as the extents for the lattice. """
5,352,070
def count_good_deals(df): """ 7. Считает число прибыльных сделок :param df: - датафрейм с колонкой '<DEAL_RESULT>' :return: - число прибыльных сделок """ # http://stackoverflow.com/questions/27140860/count-occurrences-of-number-by-column-in-pandas-data-frame?rq=1 return (df['<DEAL_RESULT>'] > 0).sum()
5,352,071
def match_lines_by_hausdorff(target_features, match_features, distance_tolerance, azimuth_tolerance=None, length_tolerance=0, match_features_sindex=None, match_fields=False, match_stats=False, field_suffixes=('', '_match'), match_strings=None, constrain_target_features=False, target_features_sindex=None, match_vectors=False, expand_target_features=False, closest_match=False, closest_target=False, verbose=False): """Conflate attributes between line features based on Hausdorff distance. target_features : :class:`geopandas.GeoDataFrame` Features to which ``match_features`` will be matched. Must have LineString geometries. All ``target_features`` will be included in output, with or without a match. match_features : :class:`geopandas.GeoDataFrame` Features to be matched to ``target_features``. Must have LineString geometries. Only successfully matched features will be included in output. Multiple ``match_features`` may be matched to a single target feature. Must have the same spatial reference as ``target_features``. distance_tolerance : :obj:`float` Maximum Hausdorff distance between each target feature and candidate ``match_features`` Because directed Hausdorff distances are calculated from target to match and match to target, ``distance_tolerance`` will be assessed based on the smaller of these two values. If feature segments are matched (e.g., 1:n, m:1, or m:n), Hausdorff distances are calculated for each segment. In spatial unit of ``target_features``. azimuth_tolerance : :obj:`float`, optional, default = ``None`` Maximum azimuth difference (in degrees) between target feature and potential match features. Feature azimuths are calculated as the azimuth of the feature's "major axis" (the longest axis of the feature's minimum bounding rectangle). If feature segments are matched (e.g., 1:n, m:1, or m:n), azimuths are calculated for each segment. length_tolerance : :obj:`float`, optional, default = 0 Proportion of target feature length required for potential match features. For example, 0.25 specifies that a match candidates must be at least 25% as long as target features to be viable matches. Must be between 0 and 1. If target and match features are split, length proportions are calculated between split segments, not original features. match_features_sindex : :class:`rtree.index.Index`, optional, default = ``None`` Spatial index for ``match_features``. If provided, will not have to be constructed for each function call. match_fields : :obj:`bool`, optional, default = ``False`` * ``True``: Fields from match features will be included in output. * ``False``: Only row indices for match features will be included in output. match_stats : :obj:`bool`, optional, default = ``False`` * ``True``: Statistics related to tolerances will be included in output. * ``False``: No match statistics will be included in ouput. field_suffixes : :obj:`tuple`, optional, default = ``('', '_match')`` Suffixes to be appended to output field names for ``target_features`` and ``match_features``, respectively. Only used if ``match_stats=True``. match_strings : :obj:`tuple`, optional, default = ``None`` Fields used to compute fuzzy string comparisions. Typically, these are street name fields for the ``target_features`` and ``match_features``, respectively. String comparisions do not affect matches, but can be post-processed to help assess match quality. constrain_target_features : :obj:`bool`, optional, default = ``False`` * ``True``: Extents of ``match_features``, plus a ``distance_tolerance`` buffer, will be used to select relevent ``target_features`` prior to matching. When the extent or number of ``match_features`` is small relative to ``target_features``, this dramatically improves performance because fewer ``target_features`` are analyzed for potential matches. * ``False``: All ``target_features`` are analyzed for potential matches. target_features_sindex : :class:`rtree.index.Index`, optional, default = ``None`` If ``constrain_target_features=True``, a spatial index for the ``target_features`` will be computed unless one is provided. If the same ``target_features`` are specified in multiple function calls, pre-computing a spatial index will improve performance. If ``constrain_target_features=False``, ``target_features_sindex`` is unnecessary. match_vectors : :obj:`bool`, optional, default = ``False`` * ``True``: Constructs LineStrings between midpoint of ``target_features`` and the closest points along matched ``match_features``. Useful for vizualizing match results. expand_target_features : :obj:`bool`, optional, default = ``False`` * ``True`` : Target features that match to multiple ``match_features`` will be expanded into multiple segments, each corresponding to a single match feature. Each target feature segment will be output as a seperate record with an index field identifying original row-wise indices from ``target_features``. closest_match : :obj:`bool`, optional, default = ``False`` * ``True`` : Only the closest available match feature will be matched to each target feature, based on Hausdorff distance * ``False`` : All available match features will match to each target feature closest_target : :obj:`bool`, optional, default = ``False`` * ``True`` : A target feature will only match with a match feature if it is the closest available target, based on Hausdorff distance * ``False`` : A target feature will match with all available match features, regardless of whether it has also matched with other target features verbose : :obj:`bool`, optional, default = ``False`` * ``True`` : Reports status by printing to standard output """ # Copy input features to the function doesn't modify the originals target_features = target_features.copy() match_features = match_features.copy() original_target_feature_columns = target_features.columns original_crs = target_features.crs if verbose: start = time() length = len(target_features) counter = 0 # Constrain target features to those near available match features if constrain_target_features: if not target_features_sindex: target_features_sindex = target_features.sindex nearby_target_idx = [] for match_feature in match_features.geometry: nearby_target_idx.extend( list(target_features_sindex.intersection( match_feature.buffer(distance_tolerance).bounds))) nearby_target_idx = list(set(nearby_target_idx)) operating_target_features = target_features[['geometry']].iloc[nearby_target_idx].copy() else: operating_target_features = target_features[['geometry']].copy() # Make a spatial index for match features, if one isn't supplied if not match_features_sindex: match_features_sindex = match_features.sindex # Initiate lists to store match results match_indices = [] match_types = [] h_tms_matches = [] t_props_matches = [] t_segs_matches = [] t_linrefs_matches = [] h_mts_matches = [] m_props_matches = [] m_segs_matches = [] m_linrefs_matches = [] if match_vectors: match_vectors = [] # Iterate through target features: for i, target in enumerate(operating_target_features.geometry): # Initiate lists to store matches m_ids = [] m_types = [] h_tms = [] t_props = [] t_segs = [] t_linrefs = [] h_mts = [] m_props = [] m_segs = [] m_linrefs = [] # Only analyze targets with length if target.length > 0: # Roughly filter candidates with a spatial index search_area = target.buffer(distance_tolerance).bounds candidate_IDs = list(match_features_sindex.intersection(search_area)) candidates = match_features[['geometry']].iloc[candidate_IDs].reset_index() # Calculate Hausdorff distances from feature to each candidate (h_fc) h_tm_list = [directed_hausdorff(target, candidate) for candidate in candidates.geometry] candidates['h_tm'] = pd.Series(h_tm_list) # Calculate Hausdorff distances from each candidate to feature (h_cf) h_mt_list = [directed_hausdorff(candidate, target) for candidate in candidates.geometry] candidates['h_mt'] = pd.Series(h_mt_list) # Define function to compare major axis azimuths def azimuth_match(target, candidate, azimuth_tolerance): if azimuth_tolerance: target_azimuth = major_axis_azimuth(target) candidate_azimuth = major_axis_azimuth(candidate) azimuth_difference_ = azimuth_difference(target_azimuth, candidate_azimuth, directional=False) if azimuth_difference_ <= azimuth_tolerance: return True else: return False else: return True # Examine each candidate's relationship to the target feature for candidate in candidates.itertuples(): # Only analyze candidates with length if candidate.geometry.length > 0: # Initialize default match values m_type = None h_tm = None t_prop = None t_seg = None t_linref = None h_mt = None m_prop = None m_seg = None m_linref = None # 1:1 if ( (candidate.h_tm <= distance_tolerance) and (candidate.h_mt <= distance_tolerance) and # Check that azimuth is acceptable azimuth_match(target, candidate.geometry, azimuth_tolerance) and # Check relative length (abs(candidate.geometry.length - target.length) < (1- length_tolerance) * target.length)): # Whole target matches candidate h_tm = candidate.h_tm t_prop = 1 t_seg = target t_linref = (0, target.length) # Whole candidate matches target h_mt = candidate.h_mt m_prop = 1 m_seg = candidate.geometry m_linref = (0, candidate.geometry.length) m_type = '1:1' # m:1 elif ( (candidate.h_tm <= distance_tolerance) and (candidate.h_mt > distance_tolerance)): # Find the candidate segment matching the target candidate_seg = find_parallel_segment(target, candidate.geometry) if (candidate_seg and candidate_seg.length > 0 and azimuth_match(target, candidate_seg, azimuth_tolerance) and # Check relative length (abs(candidate_seg.length - target.length) < (1- length_tolerance) * target.length)): # Whole target matches candidate h_tm = directed_hausdorff(target, candidate_seg) t_prop = 1 t_seg = target t_linref = (0, target.length) # Calculate proportion of candidate included in segment h_mt = directed_hausdorff(candidate_seg, target) m_prop = candidate_seg.length / candidate.geometry.length m_seg = candidate_seg m_linref = segment_linear_reference(candidate.geometry, candidate_seg) m_type = 'm:1' # 1:n elif ( (candidate.h_tm > distance_tolerance) and (candidate.h_mt <= distance_tolerance)): # Find the target segment matching the candidate target_seg = find_parallel_segment( candidate.geometry, target, snap_distance=distance_tolerance) if (target_seg and target_seg.length > 0 and azimuth_match(target_seg, candidate.geometry, azimuth_tolerance) and # Check relative length (abs(candidate.geometry.length - target_seg.length) < (1- length_tolerance) * target_seg.length)): # Calculate proportion of target included in segment h_tm = directed_hausdorff(target_seg, candidate.geometry) t_prop = target_seg.length / target.length t_seg = target_seg t_linref = segment_linear_reference(target, target_seg) # Whole candidate matches target h_mt = directed_hausdorff(candidate.geometry, target_seg) m_prop = 1 m_seg = candidate.geometry m_linref = (0, candidate.geometry.length) m_type = '1:n' # potential m:n elif ( (candidate.h_tm > distance_tolerance) and (candidate.h_mt > distance_tolerance)): # See if parallel segments can be identified target_seg = find_parallel_segment( candidate.geometry, target, snap_distance=distance_tolerance) candidate_seg = find_parallel_segment( target, candidate.geometry) # Measure hausdorff distance (non-directed) between parallel segments if target_seg and candidate_seg: h_tm_seg = directed_hausdorff(target_seg, candidate_seg) h_mt_seg = directed_hausdorff(candidate_seg, target_seg) if ((h_tm_seg <= distance_tolerance) and (h_mt_seg <= distance_tolerance) and target_seg.length > 0 and candidate_seg.length > 0 and azimuth_match(target_seg, candidate_seg, azimuth_tolerance) and # Check relative length (abs(candidate_seg.length - target_seg.length) < (1- length_tolerance) * target_seg.length)): h_tm = h_tm_seg t_prop = target_seg.length / target.length t_seg = target_seg t_linref = segment_linear_reference(target, target_seg) h_mt = h_mt_seg m_prop = candidate_seg.length / candidate.geometry.length m_seg = candidate_seg m_linref = segment_linear_reference(candidate.geometry, candidate_seg) m_type = 'm:n' if t_prop is not None: m_ids.append(candidate.index) m_types.append(m_type) h_tms.append(h_tm) t_props.append(t_prop) t_segs.append(t_seg) t_linrefs.append(t_linref) h_mts.append(h_mt) m_props.append(m_prop) m_segs.append(m_seg) m_linrefs.append(m_linref) # Record match stats match_indices.append(m_ids) match_types.append(m_types) h_tms_matches.append(h_tms) t_props_matches.append(t_props) t_segs_matches.append(t_segs) t_linrefs_matches.append(t_linrefs) h_mts_matches.append(h_mts) m_props_matches.append(m_props) m_segs_matches.append(m_segs) m_linrefs_matches.append(m_linrefs) # Construct match vector if isinstance(match_vectors, list): vectors = [] for t_seg, m_seg in zip(t_segs_matches, m_segs_matches): if t_seg and m_seg: vectors.append(LineString([midpoint(t_seg), midpoint(m_seg)])) match_vectors.append(vectors) # Report status if verbose: if counter % round(length / 10) == 0 and counter > 0: percent_complete = (counter // round(length / 10)) * 10 minutes = (time()-start) / 60 print('{}% ({} segments) complete after {:04.2f} minutes'.format(percent_complete, counter, minutes)) counter += 1 # Merge joined data with target features operating_target_features['match_index'] = pd.Series( match_indices, index=operating_target_features.index) operating_target_features['match_type'] = pd.Series( match_types, index=operating_target_features.index) operating_target_features['h_tm'] = pd.Series( h_tms_matches, index=operating_target_features.index) operating_target_features['t_prop'] = pd.Series( t_props_matches, index=operating_target_features.index) operating_target_features['t_seg'] = pd.Series( t_segs_matches, index=operating_target_features.index) operating_target_features['t_linref'] = pd.Series( t_linrefs_matches, index=operating_target_features.index) operating_target_features['h_mt'] = pd.Series( h_mts_matches, index=operating_target_features.index) operating_target_features['m_prop'] = pd.Series( m_props_matches, index=operating_target_features.index) operating_target_features['m_seg'] = pd.Series( m_segs_matches, index=operating_target_features.index) operating_target_features['m_linref'] = pd.Series( m_linrefs_matches, index=operating_target_features.index) if isinstance(match_vectors, list): operating_target_features['match_vectors'] = pd.Series( match_vectors, index=operating_target_features.index) # Store original target feature IDs operating_target_features = operating_target_features.reset_index().rename(columns={'index': 'target_index'}) # Expand targets with more than one match # Look for lists of match IDs in each row expanded_targets = [] for i, target in enumerate(operating_target_features.itertuples()): if isinstance(target.match_index, list): # Make duplicate rows for each match ID with respective attributes for j, match in enumerate(target.match_index): new_row = target._asdict() new_row.pop('Index', None) for key, value in target._asdict().items(): if isinstance(value, list): new_row[key] = value[j] # Append new row to end of dataframe operating_target_features = operating_target_features.append(new_row, ignore_index=True) # Mark original row for deletion expanded_targets.append(i) # Delete expanded targets operating_target_features = operating_target_features.drop(expanded_targets) # Only analyze matches if there are any if len(operating_target_features) > 0: # Identify and add records for unmatched portions of target features # Get target records that have unmatched portions unmatched_segments = operating_target_features.copy() unmatched_segments = unmatched_segments[ (unmatched_segments['t_prop'].notnull()) & (unmatched_segments['t_prop'] < 1)] new_target_records = [] # Iterate through groups of target records for target_index, target_group in unmatched_segments.groupby('target_index'): # Get the linref intervals associated with each of the matched segments matched_linrefs = target_group['t_linref'].tolist() # Combine the intervals matched_linrefs_merged = merge_intervals(matched_linrefs) # Get the original target geometry orig_target_geometry = target_group.iloc[0]['geometry'] # Construct linref intervals for the unmatched parts geometry_extents = [0, orig_target_geometry.length] matched_linrefs_list = [linref for tup in matched_linrefs_merged for linref in tup] all_linrefs_list = sorted(geometry_extents + matched_linrefs_list) unmatched_linrefs = [ (all_linrefs_list[i], all_linrefs_list[i + 1]) for i in range(0, len(all_linrefs_list), 2)] unmatched_lines = [ split_line_at_dists(orig_target_geometry, pair)[1] for pair in unmatched_linrefs] # For each unmatched line, make a new target record for unmatched_line, unmatched_linref in zip(unmatched_lines, unmatched_linrefs): if unmatched_line.length > 1: # Get all the attributes associated with the original target record new_target_record = target_group.iloc[0].to_dict() # Modify the match attributes new_target_record['match_index'] = np.nan new_target_record['match_type'] = np.nan new_target_record['h_tm'] = np.nan new_target_record['t_prop'] = np.nan new_target_record['t_seg'] = unmatched_line new_target_record['t_linref'] = unmatched_linref new_target_record['h_mt'] = np.nan new_target_record['m_prop'] = np.nan new_target_record['m_seg'] = np.nan new_target_record['m_linref'] = np.nan new_target_record['geometry'] = orig_target_geometry new_target_records.append(new_target_record) # Add new target records to operating features new_target_records = gpd.GeoDataFrame(new_target_records, geometry='geometry') operating_target_features = pd.concat([operating_target_features, new_target_records]) # Replace target geometries with target segments (if not NaN) ##### This appears to be duplicated below; not sure if it needs to happen twice operating_target_features['geometry'] = operating_target_features.apply( lambda row: row['t_seg'] if isinstance(row['t_seg'], LineString) else row['geometry'], axis=1) # For each unique target geometry, delete all matches except the closest one # (expanded targets are deleted if they don't have the closest match) # Required if 'closest_target' if closest_match or closest_target: # Identify sets of records with identical targets equivalent_target_sets = [d for _, d in operating_target_features.groupby( ['target_index','t_linref']) if len(d) > 1] # Identify which of these records has the closest match equivalent_record_ids = [] closest_records = gpd.GeoDataFrame(crs=operating_target_features.crs) for equivalent_target_set in equivalent_target_sets: # Keep track of IDs for equivalent records equivalent_record_ids.extend(equivalent_target_set.index.tolist()) # Identify minimum tc and ct distances and associated indices h_tm_min_idx = equivalent_target_set['h_tm'].astype(float).idxmin() h_tm_min = equivalent_target_set['h_tm'].astype(float).min() h_mt_min_idx = equivalent_target_set['h_mt'].astype(float).idxmin() h_mt_min = equivalent_target_set['h_mt'].astype(float).min() # Identify overall closest match min_idx = h_tm_min_idx if h_tm_min < h_mt_min else h_mt_min_idx closest_records = closest_records.append( operating_target_features.loc[[min_idx]], ignore_index=True) # Drop equivalent records operating_target_features = operating_target_features.drop( equivalent_record_ids) # Add back those with the closest match operating_target_features = operating_target_features.append( closest_records, ignore_index=True) # Ensure that each match feature is only matched to one, closest target feature # (No targets are deleted, but matches are removed if a given target isn't closest) if closest_target: # Identify sets of records with the same match id match_id_sets = [d for _, d in operating_target_features.groupby( 'match_index') if len(d) > 1] # Within these sets, identify sets with overlapping linear references for match_id_set in match_id_sets: # Get ID for match feature match_id = match_id_set.iloc[0]['match_index'] # Get raw geometry for match feature match_geom = match_features.loc[match_id]['geometry'] # Find overlapping linear reference ranges among the original matches lin_ref_ranges = merge_intervals(match_id_set['m_linref'].tolist()) # Identify sets of records within each range lin_ref_sets = [match_id_set[match_id_set['m_linref'].apply( lambda x: True if (x[0] >= lower and x[1] <= upper) else False)] for lower, upper in lin_ref_ranges] # Analyze each set of targets with overlapping matches for lin_ref_set, lin_ref_range in zip(lin_ref_sets, lin_ref_ranges): # Get the portion of the raw match feature within the linear reference range _, range_match_geom, _ = split_line_at_dists(match_geom, lin_ref_range) # Split the linear reference feature into segments parallel to match features t_seg_endpoints = [x for t_seg in lin_ref_set['t_seg'] for x in endpoints(t_seg)] t_seg_endpoint_lin_refs = [range_match_geom.project(x) for x in t_seg_endpoints] range_match_segments = split_line_at_dists(range_match_geom, t_seg_endpoint_lin_refs) # For each segment, see which target feature is closest based on hausdorff distance closest_targets = [ nearest_neighbor( segment, GeoDataFrame(geometry=lin_ref_set['t_seg']), hausdorff_distance=True ).index[0] for segment in range_match_segments] # Group adjacent segments with the same target groups = [list(group) for _, group in itertools.groupby( zip(closest_targets, range_match_segments), key=lambda x: x[0])] closest_targets = [group[0][0] for group in groups] match_segments = [[x[1] for x in group] for group in groups] match_segments = [sh.ops.linemerge(x) for x in match_segments] # Only move forward if there are match LineString match segments to work with if LineString in [type(x) for x in match_segments]: # Remove any non-LineString geometries (e.g., GeometryCollection) try: match_segments, closest_targets = zip( *[(segment, idx) for segment, idx in zip(match_segments, closest_targets) if isinstance(segment, LineString)]) except: match_segment_types = [type(x) for x in match_segments] closest_target_types = [type(x) for x in closest_targets] print('match segments: {}, {}'.format(str(match_segment_types), str(match_segments))) print('closest_targets: {}, {}'.format(str(closest_target_types), str(closest_targets))) # Calculate the match prop and lin_ref bounds for the grouped match segments match_props = [x.length/match_geom.length for x in match_segments] match_lin_refs = [tuple([match_geom.project(point) for point in endpoints(segment)]) for segment in match_segments] # Update match info for the chosen target for idx, match_prop, match_segment, match_lin_ref in zip( closest_targets, match_props, match_segments, match_lin_refs): # lin_ref_set.at[idx, 'match_index'] = match_id lin_ref_set.at[idx, 'm_prop'] = match_prop lin_ref_set.at[idx, 'm_seg'] = match_segment lin_ref_set.at[idx, 'm_linref'] = match_lin_ref lin_ref_set.at[idx, 'h_tm'] = directed_hausdorff( lin_ref_set.at[idx, 't_seg'], match_segment) lin_ref_set.at[idx, 'h_mt'] = directed_hausdorff( match_segment, lin_ref_set.at[idx, 't_seg']) # Remove match info for other targets in set not_closest_targets = [x for x in lin_ref_set.index if x not in closest_targets] for idx in not_closest_targets: lin_ref_set.at[idx, 't_prop'] = np.nan # lin_ref_set.at[lin_ref_set_idx, 't_seg'] = np.nan ########### Maybe don't get rid of the t_seg? lin_ref_set.at[idx, 't_linref'] = np.nan lin_ref_set.at[idx, 'm_prop'] = np.nan lin_ref_set.at[idx, 'm_seg'] = np.nan lin_ref_set.at[idx, 'm_linref'] = np.nan lin_ref_set.at[idx, 'h_tm'] = np.nan lin_ref_set.at[idx, 'h_mt'] = np.nan lin_ref_set.at[idx, 'match_index'] = np.nan # Remove original lin_ref_set rows from the operating_target_features operating_target_features = operating_target_features.drop(lin_ref_set.index) # Append rows from lin_ref_set back onto operating_target_features operating_target_features = operating_target_features.append(lin_ref_set) # Gather values from fields of match features if match_fields and isinstance(match_fields, bool): match_fields = match_features.columns.tolist() match_fields.remove('geometry') elif isinstance(match_fields, list): match_fields = match_fields else: match_fields = [] if match_strings and (match_strings[1] not in match_fields): match_fields.append(match_strings[1]) # Join fields for matches operating_target_features = operating_target_features.merge( match_features[match_fields], how='left', left_on='match_index', right_index=True) # Join operating target features back onto all target features target_features = target_features.merge( operating_target_features.drop(columns=['geometry']), how='outer', left_index=True, right_on='target_index', suffixes=field_suffixes) # Sort by original index target_features = target_features.sort_values(['target_index']) # Convert empty lists to NaN target_features = target_features.applymap( lambda x: np.nan if x == [] else x) # Convert single-element lists to their sole elements target_features = target_features.applymap( lambda x: x[0] if (isinstance(x, list) and len(x) == 1) else x) # Calculate string matches, if specified if match_strings: def fuzzy_score(row, col_a, col_b): a = row[col_a] b = row[col_b] def standardize_and_score(a, b): a = standardize_streetname(str(a)) b = standardize_streetname(str(b)) return (fuzz.token_set_ratio(a, b) / 100) # Inputs could be lists, so make them lists if they aren't a_list = listify(a) b_list = listify(b) # Get fuzzy scores for each string combination scores = [] for a in a_list: for b in b_list: if (pd.notnull(a) and pd.notnull(b)): scores.append(standardize_and_score(a, b)) if len(scores) > 0: return scores else: return np.nan target_string, match_string = match_strings if match_string in original_target_feature_columns: target_string = target_string + field_suffixes[0] match_string = match_string + field_suffixes[1] target_features['match_strings'] = target_features.apply( fuzzy_score, args=(target_string, match_string), axis=1) # Replace geometry with t_seg if there is one available target_features['geometry'] = target_features.apply( lambda row: row['t_seg'] if isinstance(row['t_seg'], LineString) else row['geometry'], axis=1) # Drop stats columns if not specifically requested if not match_stats: target_features = target_features.drop( columns=['h_tm','t_prop','t_seg','t_linref','h_mt','m_prop','m_seg','m_linref']) # Move target index to front target_features = df_first_column(target_features, 'target_index') # Move the geometry column to the end target_features = df_last_column(target_features, 'geometry') # Reset the index target_features = target_features.reset_index(drop=True) # Ensure that crs is the same as original target_features.crs = original_crs # Report done if verbose: print('100% ({} segments) complete after {:04.2f} minutes'.format(counter, (time()-start) / 60)) return target_features
5,352,072
def git_rmtree(path: os.PathLike) -> None: """Remove the given recursively. :note: we use shutil rmtree but adjust its behaviour to see whether files that couldn't be deleted are read-only. Windows will not remove them in that case""" def onerror(func: Callable, path: os.PathLike, _) -> None: # Is the error an access error ? os.chmod(path, stat.S_IWUSR) try: func(path) # Will scream if still not possible to delete. except Exception: raise return shutil.rmtree(path, False, onerror)
5,352,073
def get_num_streams(): """Force an offset so high that the payload is small and quick. In it, there will be a total number to base our reverse search from""" result = get_streams() logger.debug(result) if "error" in result: raise Exception("error in request: " + str(result)) total = int(result.get('_total', 0)) logger.info("Total live streams: %d", total) return total
5,352,074
def compute_corr_active(params): """ Compute correlation only for active positions, i.e. where at least one of the two signal tracks is non-zero :param params: :return: """ with pd.HDFStore(params['inputfilea'], 'r') as hdf: load_group = os.path.join(params['inputgroupa'], params['chrom']) data = hdf[load_group].values dataset1 = np.ma.masked_where(data > 0, data) with pd.HDFStore(params['inputfileb'], 'r') as hdf: load_group = os.path.join(params['inputgroupb'], params['chrom']) data = hdf[load_group].values dataset2 = np.ma.masked_where(data > 0, data) comb_mask = np.ma.getmask(dataset1) & np.ma.getmask(dataset2) dataset1 = np.ma.array(dataset1.data, mask=comb_mask) dataset2 = np.ma.array(dataset2.data, mask=comb_mask) results = dict() for ms in params['measure']: corr_fun = get_corr_fun(params[ms], masked=True) res = corr_fun(dataset1, dataset2) try: corr, pv = res except (ValueError, TypeError): corr, pv = res, -1 infos = {'stat': corr, 'pv': pv.data} results[ms] = infos return params['chrom'], results
5,352,075
def test_same_seed(): """ Test same output for random points method with same seed """ data, cluster_borders, _ = gen_acceptable_data() k = 2 model = kmeans_py.kmeans(data=data, K=k) model.initialize_centers(method='rp', seed=1234) model2 = kmeans_py.kmeans(data=data, K=k) model2.initialize_centers(method='rp', seed=1234) assert np.array_equal(model.initial_values, model2.initial_values)
5,352,076
def test_image_lat_profile(): """Tests GLAT profile with image of 1s of known size and shape.""" image = SkyImage.empty_like(FermiGalacticCenter.counts(), fill=1.) coordinates = image.coordinates() l = coordinates.data.lon b = coordinates.data.lat lons, lats = l.degree, b.degree counts = SkyImage.empty_like(FermiGalacticCenter.counts(), fill=1.) mask = np.zeros_like(image.data) # Select Full Image lat = [lats.min(), lats.max()] lon = [lons.min(), lons.max()] # Pick minimum valid binning binsz = 0.5 mask_array = np.zeros_like(image.data, dtype='bool') # Test output lat_profile1 = image_profile('lat', image.to_image_hdu(), lat, lon, binsz, errors=True) # atol 0.1 is sufficient to check if correct number of pixels are included assert_allclose(lat_profile1['BIN_VALUE'].data.astype(float), 2000 * np.ones(39), rtol=1, atol=0.1) assert_allclose(lat_profile1['BIN_ERR'].data, 0.1 * lat_profile1['BIN_VALUE'].data) lat_profile2 = image_profile('lat', image.to_image_hdu(), lat, lon, binsz, counts.to_image_hdu(), errors=True) # atol 0.1 is sufficient to check if correct number of pixels are included assert_allclose(lat_profile2['BIN_ERR'].data, 44.721359549995796 * np.ones(39), rtol=1, atol=0.1) lat_profile3 = image_profile('lat', image.to_image_hdu(), lat, lon, binsz, counts.to_image_hdu(), mask_array, errors=True) assert_allclose(lat_profile3['BIN_VALUE'].data, np.zeros(39))
5,352,077
def create_anchors_3d_stride(grid_size, voxel_size=[0.16, 0.16, 0.5], coordinates_offsets=[0, -19.84, -2.5], dtype=np.float32): """ Args: feature_size: list [D, H, W](zyx) sizes: [N, 3] list of list or array, size of anchors, xyz Returns: anchors: [*feature_size, num_sizes, num_rots, 7] tensor. """ # almost 2x faster than v1 x_stride, y_stride, z_stride = voxel_size x_offset, y_offset, z_offset = coordinates_offsets x_centers = np.arange(grid_size[0], dtype=dtype) y_centers = np.arange(grid_size[1], dtype=dtype) z_centers = np.arange(grid_size[2], dtype=dtype) z_centers = z_centers * z_stride + z_offset + 0.25 y_centers = y_centers * y_stride + y_offset + 0.08 x_centers = x_centers * x_stride + x_offset + 0.08 xx, yy, zz = np.meshgrid(x_centers, y_centers, z_centers) sizes = np.stack((xx, yy , zz), axis=-1) sizes = np.reshape(sizes, [-1,3]) return sizes
5,352,078
def combined_roidb(imdb_names): """ Combine multiple roidbs """ def get_roidb(imdb_name): imdb = get_imdb(imdb_name) print('Loaded dataset `{:s}` for training'.format(imdb.name)) imdb.set_proposal_method("gt") print('Set proposal method: {:s}'.format("gt")) roidb = get_training_roidb(imdb) return roidb roidbs = [get_roidb(s) for s in imdb_names.split('+')] roidb = roidbs[0] if len(roidbs) > 1: for r in roidbs[1:]: roidb.extend(r) tmp = get_imdb(imdb_names.split('+')[1]) imdb = imdb2(imdb_names, tmp.classes) else: imdb = get_imdb(imdb_names) return imdb, roidb
5,352,079
def render_locations_profile(list_id, item_id, resource, rfields, record): """ Custom dataList item renderer for Locations on the Profile Page - UNUSED @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict """ record_id = record["gis_location.id"] item_class = "thumbnail" raw = record._row name = record["gis_location.name"] location_url = URL(c="gis", f="location", args=[record_id, "profile"]) # Placeholder to maintain style #logo = DIV(IMG(_class="media-object"), # _class="pull-left") # We don't Edit Locations # Edit Bar # permit = current.auth.s3_has_permission # table = current.db.gis_location # if permit("update", table, record_id=record_id): # vars = {"refresh": list_id, # "record": record_id, # } # f = current.request.function # if f == "organisation" and organisation_id: # vars["(organisation)"] = organisation_id # edit_btn = A(I(" ", _class="icon icon-edit"), # _href=URL(c="gis", f="location", # args=[record_id, "update.popup"], # vars=vars), # _class="s3_modal", # _title=current.response.s3.crud_strings.gis_location.title_update, # ) # else: # edit_btn = "" # if permit("delete", table, record_id=record_id): # delete_btn = A(I(" ", _class="icon icon-trash"), # _class="dl-item-delete", # ) # else: # delete_btn = "" # edit_bar = DIV(edit_btn, # delete_btn, # _class="edit-bar fright", # ) # Render the item item = DIV(DIV(DIV(#SPAN(A(name, # _href=location_url, # ), # _class="location-title"), #" ", #edit_bar, P(A(name, _href=location_url, ), _class="card_comments"), _class="span5"), # card-details _class="row", ), ) return item
5,352,080
def enable_plugins(): """add all available Larch plugin paths """ if 'larch_plugins' not in sys.modules: import larch sys.modules['larch_plugins'] = larch return sys.modules['larch_plugins']
5,352,081
def exception_log_and_respond(exception, logger, message, status_code): """Log an error and send jsonified respond.""" logger.error(message, exc_info=True) return make_response( message, status_code, dict(exception_type=type(exception).__name__, exception_message=str(exception)), )
5,352,082
def parse_cdhit_clusters(cluster_file): """ Parses cdhit output into three collections in a named tuple: clusters: list of lists of gene ids. reps: list of representative gene for each cluster lookup: dict mapping from gene names to cluster index In this setup, cluster ids are the position in either of the first two lists. """ # re-call with file-like object if we are give an path if isinstance(cluster_file, str): with open(cluster_file) as cluster_handle: return parse_cdhit_clusters(cluster_handle) # initialize final containers clusters = [] cluster_reps = [] cluster_lookup = {} # expression for parsing cluster line (captures gene name and alignment) gene_expr = re.compile(r"\s>(\S+)\.\.\.\s\s*(.+)\s*$") # loop over lines for line in cluster_file: if line.startswith(">"): # create a new cluster cluster = [] cluster_id = len(clusters) clusters.append(cluster) continue # parse gene name from line gene, alignment = gene_expr.search(line).groups() if alignment.strip() == "*": cluster_reps.append(gene) cluster_lookup[gene] = cluster_id cluster.append(gene) # done return CdhitClusters(clusters, cluster_reps, cluster_lookup)
5,352,083
def evaluate(model: nn.Module, dataloader: DataLoader) -> Scores: """ Evaluate a model without gradient calculation :param model: instance of a model :param dataloader: dataloader to evaluate the model on :return: tuple of (accuracy, loss) values """ score = 0 loss = 0 loss_func = nn.LogSoftmax(dim=1).to("cuda") for i, x in enumerate(dataloader): img = x[0] ans = x[1] ques = x[2] if torch.cuda.is_available(): img = img.cuda() ans = ans.cuda() ques = ques.cuda() y_hat = model((img, ques)) img = None ques = None nll = -loss_func(y_hat) score += train_utils.batch_accuracy(y_hat, ans.data).sum() ans = answer_norm(ans) loss += (nll * ans).sum(dim=1).mean() loss /= len(dataloader.dataset) score /= len(dataloader.dataset) score *= 100 print("val loss = ", loss) return score, loss
5,352,084
def remote_judge_get_problem_info(problem_id: str, contest_id: int = -1, contest_problem_id: int = -1): """ { "code":0, "data":{ "isContest":"是否在比赛中", "problemData":{ "title":"题目名", "content":"题目内容", "background":"题目背景", "inputFormat":"输入格式", "outputFormat":'输出格式', "examples":[{"input":"样例输入","output":"样例输出"}], "createTime":"创建时间", "uploaderProfile":{ "uid":"用户ID", "username":"用户名" }, "remoteProblemID":"远程题目ID", "remoteOJ":{ "id":"远程OJID", "display":"远程OJ显示名", "availableLanguages":[ {"id":"0","display":"C++"} ] }, "public":"是否公开", "hint":"提示", "recentDiscussions":[ { "id":123, "title":"qw" } ], "acceptedCount":"", "submissionCount":"" }, "userData":{ "lastCode":"上次提交的代码", "lastLanguage":"上次选择的语言", "status":"qwq", "id":"", "accounts":{ "id":{ "username":"用户名", "oj":"OJ", "accountID":"ID" } } } } } """ # in_contest = contest_id != -1 contest: Contest = Contest.by_id(contest_id) if contest: # pass if not contest.running() and not permission_manager.has_permission(session.get("uid"), "contest.manage"): return make_response(-1, message="你没有权限查看此题目") print(contest_problem_id,"contest_problem_id") problem: Problem = db.session.query(Problem).filter( Problem.id == contest.problems[contest_problem_id]["id"]).one_or_none() else: problem: Problem = db.session.query(Problem).filter( Problem.id == problem_id).one_or_none() if not permission_manager.has_permission(session.get("uid"), "remote_judge.use") and problem.uploader_id != int(session.get("uid")): return make_response(-1, message="你没有权限查看该题目") if not problem: return make_response(-1, message="未知题目ID") if problem.problem_type != "remote_judge": return make_response(-1, message="此题目非远程评测题目") uploader: User = db.session.query(User.id, User.username).filter( User.id == problem.uploader_id).one() last_submission: Submission = db.session.query(Submission).filter(and_( Submission.problem_id == problem.id, Submission.uid == session.get("uid") )).order_by(Submission.score.desc()).order_by(Submission.id.desc()) last_code, last_language, submission_id, status = "", next(iter( config.REMOTE_JUDGE_OJS[problem.remote_judge_oj]["availableLanguages"].keys())), -1, None if last_submission.count(): last_submission = last_submission.first() last_code = last_submission.code last_language = last_submission.language status = last_submission.status submission_id = last_submission.id discussions = [ ] discussions_query = db.session.query(Discussion.id, Discussion.title).filter( Discussion.path == f"discussion.problem.{problem.id}").order_by(Discussion.id.desc()).limit(5) for item in discussions_query: discussions.append({ "id": item.id, "title": item.title }) accounts = {} for item in db.session.query(RemoteAccount.account_id, RemoteAccount.username, RemoteAccount.oj).filter( and_( RemoteAccount.uid == session.get("uid", -1), RemoteAccount.oj == problem.remote_judge_oj ) ): accounts[item.account_id] = { "username": item.username, "oj": config.REMOTE_JUDGE_OJS[item.oj]["display"], "accountID": item.account_id } return make_response(0, data={ "isContest": contest is not None, "problemData": { "title": problem.title, "content": problem.content, "background": problem.background, "inputFormat": problem.input_format, "outputFormat": problem.output_format, "examples": problem.example, "createTime": problem.create_time, "uploaderProfile": { "uid": uploader.id, "username": uploader.username } if not contest else None, "remoteProblemID": problem.remote_problem_id if not contest else None, "remoteOJ": { "id": problem.remote_judge_oj, **config.REMOTE_JUDGE_OJS[problem.remote_judge_oj] }, "public": problem.public if not contest else None, "hint": problem.hint, "recentDiscussions": discussions if not contest else None, "acceptedCount": db.session.query(Submission).filter(Submission.problem_id == problem.id).filter(Submission.status == "accepted").count() if not contest else None, "submissionCount": db.session.query(Submission).filter(Submission.problem_id == problem.id).count() if not contest else None, "id": problem.id }, "userData": { "lastCode": last_code, "lastLanguage": last_language, "status": status, "id": submission_id, "managable": permission_manager.has_permission( session.get("uid", None), "problem.manage"), "accounts": accounts } })
5,352,085
def project_directory(packaged_scene, package_root, source_scene): """ Project directory for packaged scene Args: packaged_scene (str): Packaged scene path package_root (str): Package root path source_scene (str): Source scene path Returns: str """ raise NotImplementedError()
5,352,086
def can_write(obj, user): """ Takes article or related to article model. Check if user can write article. """ return obj.can_write(user)
5,352,087
def process_image(msg_cont): """ Processes the message container, loading the image from the message and forwarding the predictions. :param msg_cont: the message container to process :type msg_cont: MessageContainer """ config = msg_cont.params.config try: start_time = datetime.now() image = Image.open(io.BytesIO(msg_cont.message['data'])) image = remove_alpha_channel(image) image_array = image_to_numpyarray(image) detection = inference_detector(model, image_array) assert isinstance(config.class_names, (tuple, list)) if isinstance(detection, tuple): bbox_result, segm_result = detection else: bbox_result, segm_result = detection, None bboxes = np.vstack(bbox_result) labels = [np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)] labels = np.concatenate(labels) objs = [] for index in range(len(bboxes)): x0, y0, x1, y1, score = bboxes[index] label = labels[index] label_str = config.class_names[label] # Ignore this roi if the score is less than the provided threshold if score < config.score_threshold: continue # Translate roi coordinates into original image coordinates (before combining) x0n = x0 / image.width y0n = y0 / image.height x1n = x1 / image.width y1n = y1 / image.height px = None py = None if segm_result is not None: px = [] py = [] segms = mmcv.concat_list(segm_result) if isinstance(segms, tuple): mask = segms[0][index] score = segms[1][index] else: mask = segms[index] mask = maskUtils.decode(mask).astype(np.int) poly = mask_to_polygon(mask, config.mask_threshold, mask_nth=config.mask_nth, view=(x0, y0, x1, y1), view_margin=config.view_margin, fully_connected=config.fully_connected) if len(poly) > 0: px, py = polygon_to_lists(poly[0], swap_x_y=True, normalize=False) pxn, pyn = polygon_to_lists(poly[0], swap_x_y=True, normalize=True, img_width=image.width, img_height=image.height) if config.bbox_as_fallback >= 0: if len(px) >= 3: p_x0n, p_y0n, p_x1n, p_y1n = polygon_to_bbox(lists_to_polygon(pxn, pyn)) p_area = (p_x1n - p_x0n) * (p_y1n - p_y0n) b_area = (x1n - x0n) * (y1n - y0n) if (b_area > 0) and (p_area / b_area < config.bbox_as_fallback): px = [float(i) for i in [x0, x1, x1, x0]] py = [float(i) for i in [y0, y0, y1, y1]] else: px = [float(i) for i in [x0, x1, x1, x0]] py = [float(i) for i in [y0, y0, y1, y1]] if config.fit_bbox_to_polygon: if len(px) >= 3: x0, y0, x1, y1 = polygon_to_bbox(lists_to_polygon(px, py)) bbox = BBox(left=int(x0), top=int(y0), right=int(x1), bottom=int(y1)) p = [] if px is None: px = [x0, x1, x1, x0] py = [y0, y0, y1, y1] for i in range(len(px)): p.append([int(px[i]), int(py[i])]) poly = Polygon(points=p) pred = ObjectPrediction(label=label_str, score=float(score), bbox=bbox, polygon=poly) objs.append(pred) preds = ObjectPredictions(id=str(start_time), timestamp=str(start_time), objects=objs) msg_cont.params.redis.publish(msg_cont.params.channel_out, preds.to_json_string()) if config.verbose: log("process_images - predictions string published: %s" % msg_cont.params.channel_out) end_time = datetime.now() processing_time = end_time - start_time processing_time = int(processing_time.total_seconds() * 1000) log("process_images - finished processing image: %d ms" % processing_time) except KeyboardInterrupt: msg_cont.params.stopped = True except: log("process_images - failed to process: %s" % traceback.format_exc())
5,352,088
def create_external_question(url: str, height: int) -> str: """Create XML for an MTurk ExternalQuestion.""" return unparse({ 'ExternalQuestion': { '@xmlns': 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd', 'ExternalURL': url, 'FrameHeight': height } }, full_document=False)
5,352,089
def credentials_batch_account_key_secret_id(config): # type: (dict) -> str """Get Batch account key KeyVault Secret Id :param dict config: configuration object :rtype: str :return: keyvault secret id """ try: secid = config[ 'credentials']['batch']['account_key_keyvault_secret_id'] if util.is_none_or_empty(secid): raise KeyError() except KeyError: return None return secid
5,352,090
def is_C2D(lname): """ """ import re pattns = ['Conv2D'] return any([bool(re.match(t,lname)) for t in pattns])
5,352,091
def add_help_attribute(functions: Dict[str, Callable[..., Any]]) -> None: """Given a dict whose content is of the form {function_name_string: function_obj} it adds customs `help` and `__rich__repr` attributes for all such function objects. """ for name in functions: if name not in short_description: debug_helper.log(f"Missing description for {name}.") continue func = functions[name] setattr(func, "help", short_description[name]) # noqa setattr(func, "__rich_repr__", lambda func=func: (func.help(),))
5,352,092
def p_caseWhenStmt(t): """caseWhenStmt : R_WHEN expresion R_THEN plInstructions""" t[0] = CaseWhen(t[2], t[4],t.slice[1].lineno,t.slice[1].lexpos) repGrammar.append(t.slice)
5,352,093
def encounter_media(instance, filename): """Return an upload file path for an encounter media attachment.""" if not instance.encounter.id: instance.encounter.save() return 'encounter/{0}/{1}'.format(instance.encounter.source_id, filename)
5,352,094
def findFonts(pattern, lazy=True): """Answers a list of Font instances where the pattern fits the font path. If pattern is a list, all parts should have a match. # TODO: make case insensitive """ """ >>> findFonts('Roboto-Thi') [<Font Roboto-Thin>, <Font Roboto-ThinItalic>] >>> # Select on family and name parts. >>> findFonts(('Robo', 'Ita', 'Thi')) [<Font Roboto-ThinItalic>] >>> # Select on style parts only. >>> findFonts(('Ita', 'Bol', 'Con')) [<Font RobotoCondensed-BoldItalic>] """ fontPaths = getFontPaths() fonts = [] if not isinstance(pattern, (list, tuple)): pattern = [pattern] for fontPath in fontPaths: found = True for match in pattern: if not match in fontPath: found = False break if found: fonts.append(findFont(fontPath, lazy=lazy)) return fonts
5,352,095
def time_range_cutter_at_time(local,time_range,time_cut=(0,0,0)): """ Given a range, return a list of DateTimes that match the time_cut between start and end. :param local: if False [default] use UTC datetime. If True use localtz :param time_range: the TimeRange object :param time_cut: HH:MM:SS of when to cut. eg: (0,0,0) for midnight """ ( start, end ) = time_range.get(local) index = start.replace( hour=time_cut[0], minute=time_cut[1], second=time_cut[2] ) cuts = [] index += datetime.timedelta(days=1) while index < end: cuts.append(index) index += datetime.timedelta(days=1) if local: index = time_range.normalize(index) return cuts
5,352,096
def _apply_nat_dns_host_resolver(): """ This will make the Dusty VM always use the host's DNS resolver for lookups. It solves an issue we were seeing where the VM's resolving settings would get out of date when a laptop was moved between routers with different settings, resulting in DNS lookup failures on the VM. """ check_and_log_output_and_error_demoted( ['VBoxManage', 'modifyvm', constants.VM_MACHINE_NAME, '--natdnshostresolver1', 'on'], quiet_on_success=True)
5,352,097
def _darknet_conv( x: np.ndarray, filters: int, size: int, strides: int = 1, batch_norm: bool = True ) -> tf.Tensor: """create 1 layer with [padding], conv2d, [bn and relu]""" if strides == 1: padding = "same" else: x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding padding = "valid" x = Conv2D( filters=filters, kernel_size=size, strides=strides, padding=padding, use_bias=not batch_norm, kernel_regularizer=l2(0.0005), )(x) if batch_norm: x = BatchNormalization()(x) x = LeakyReLU(alpha=0.1)(x) return x
5,352,098
def _build_topic_to_consumer_topic_state_map(watermarks): """Builds a topic_to_consumer_topic_state_map from a kafka get_topics_watermarks response""" return { topic: ConsumerTopicState({ partition: int((marks.highmark + marks.lowmark) / 2) for partition, marks in watermarks_map.items() }, None) for topic, watermarks_map in watermarks.items() }
5,352,099