hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6cd191f4e7eeaa1d075d528c9e2ada0827d674f
| 4,618 |
py
|
Python
|
HW2/dbsys-hw2/Database.py
|
yliu120/dbsystem
|
d1b008f411929058a34a1dd2c44c9ee2cf899865
|
[
"Apache-2.0"
] | null | null | null |
HW2/dbsys-hw2/Database.py
|
yliu120/dbsystem
|
d1b008f411929058a34a1dd2c44c9ee2cf899865
|
[
"Apache-2.0"
] | null | null | null |
HW2/dbsys-hw2/Database.py
|
yliu120/dbsystem
|
d1b008f411929058a34a1dd2c44c9ee2cf899865
|
[
"Apache-2.0"
] | null | null | null |
import json, io, os, os.path
from Catalog.Schema import DBSchema, DBSchemaEncoder, DBSchemaDecoder
from Query.Plan import PlanBuilder
from Storage.StorageEngine import StorageEngine
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31.848276 | 107 | 0.707016 |
e6cda14ca91ba1556d929a926bfc87a16ab1f726
| 371 |
py
|
Python
|
tests/test_arr_add_value.py
|
dboyliao/TaipeiPy-pybind11-buffer-array
|
22e764d9fbf605950c0de10e3a341de36bc9bf89
|
[
"MIT"
] | 1 |
2022-03-17T10:01:45.000Z
|
2022-03-17T10:01:45.000Z
|
tests/test_arr_add_value.py
|
dboyliao/TaipeiPy-pybind11-buffer-array
|
22e764d9fbf605950c0de10e3a341de36bc9bf89
|
[
"MIT"
] | null | null | null |
tests/test_arr_add_value.py
|
dboyliao/TaipeiPy-pybind11-buffer-array
|
22e764d9fbf605950c0de10e3a341de36bc9bf89
|
[
"MIT"
] | null | null | null |
import numpy as np
import mylib
| 28.538462 | 80 | 0.638814 |
e6ce056f0a84e4b655921e3c42a24774c81e07e4
| 619 |
py
|
Python
|
moderngl_window/resources/data.py
|
DavideRuzza/moderngl-window
|
e9debc6ed4a1899aa83c0da2320e03b0c2922b80
|
[
"MIT"
] | 142 |
2019-11-11T23:14:28.000Z
|
2022-03-29T08:37:03.000Z
|
moderngl_window/resources/data.py
|
DavideRuzza/moderngl-window
|
e9debc6ed4a1899aa83c0da2320e03b0c2922b80
|
[
"MIT"
] | 107 |
2019-10-31T20:31:45.000Z
|
2022-03-23T15:01:41.000Z
|
moderngl_window/resources/data.py
|
DavideRuzza/moderngl-window
|
e9debc6ed4a1899aa83c0da2320e03b0c2922b80
|
[
"MIT"
] | 36 |
2019-12-12T16:14:10.000Z
|
2022-01-18T22:58:21.000Z
|
"""
Registry general data files
"""
from typing import Any
from moderngl_window.resources.base import BaseRegistry
from moderngl_window.meta import DataDescription
data = DataFiles()
| 23.807692 | 99 | 0.678514 |
e6cea1b013c7155bc06629fbf31e017bbe14f52f
| 658 |
py
|
Python
|
tests/test_units/test_mapper_str.py
|
frewsxcv/routes
|
7690fc1016e56739855435fb54c96acccfa29009
|
[
"MIT"
] | 1 |
2015-11-08T12:58:16.000Z
|
2015-11-08T12:58:16.000Z
|
tests/test_units/test_mapper_str.py
|
frewsxcv/routes
|
7690fc1016e56739855435fb54c96acccfa29009
|
[
"MIT"
] | null | null | null |
tests/test_units/test_mapper_str.py
|
frewsxcv/routes
|
7690fc1016e56739855435fb54c96acccfa29009
|
[
"MIT"
] | null | null | null |
import unittest
from routes import Mapper
| 34.631579 | 91 | 0.582067 |
e6cebeadc3ade385a017e0f9c9ce037d2f450345
| 2,106 |
py
|
Python
|
quarkchain/tools/config_slave.py
|
HAOYUatHZ/pyquarkchain
|
b2c7c02e4415aa26917c2cbb5e7571c9fef16c5b
|
[
"MIT"
] | 1 |
2018-10-23T05:48:42.000Z
|
2018-10-23T05:48:42.000Z
|
quarkchain/tools/config_slave.py
|
skji/pyquarkchain
|
090f9981b89b8873daaed36171a9bc9f27b10473
|
[
"MIT"
] | 3 |
2020-03-12T18:09:40.000Z
|
2021-02-26T02:33:09.000Z
|
quarkchain/tools/config_slave.py
|
skji/pyquarkchain
|
090f9981b89b8873daaed36171a9bc9f27b10473
|
[
"MIT"
] | null | null | null |
"""
python config_slave.py 127.0.0.1 38000 38006 127.0.0.2 18999 18002
will generate 4 slave server configs accordingly. will be used in deployment automation to configure a cluster.
usage: python config_slave.py <host1> <port1> <port2> <host2> <port3> ...
"""
import argparse
import collections
import json
import os
FILE = "../../testnet/2/cluster_config_template.json"
if "QKC_CONFIG" in os.environ:
FILE = os.environ["QKC_CONFIG"]
if __name__ == "__main__":
main()
| 28.459459 | 111 | 0.597816 |
e6cfd0714854720779418d4a80b8997e25e611e3
| 3,227 |
py
|
Python
|
python-function-files-dictionaries/week4-assignment1.py
|
MauMendes/python3-programming-specialization
|
8bd259f0ac559c6004baa0e759b6ec4bc25e1320
|
[
"MIT"
] | null | null | null |
python-function-files-dictionaries/week4-assignment1.py
|
MauMendes/python3-programming-specialization
|
8bd259f0ac559c6004baa0e759b6ec4bc25e1320
|
[
"MIT"
] | null | null | null |
python-function-files-dictionaries/week4-assignment1.py
|
MauMendes/python3-programming-specialization
|
8bd259f0ac559c6004baa0e759b6ec4bc25e1320
|
[
"MIT"
] | null | null | null |
#1) Write a function, sublist, that takes in a list of numbers as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the number 5 (it should not contain the number 5).
#2) Write a function called check_nums that takes a list as its parameter, and contains a while loop that only stops once the element of the
# list is the number 7. What is returned is a list of all of the numbers up until it reaches 7.def check_nums(input_lst):
#3) Write a function, sublist, that takes in a list of strings as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the string STOP (it should not contain the string STOP).
#4) Write a function called stop_at_z that iterates through a list of strings. Using a while loop, append each string to a new list until the string that
# appears is z. The function should return the new list.
#5) Below is a for loop that works. Underneath the for loop, rewrite the problem so that it does the same thing, but using a while loop instead of a for loop.
# Assign the accumulated total in the while loop code to the variable sum2. Once complete, sum2 should equal sum1.
lst = [65, 78, 21, 33]
lenght = len(lst)
i = 0
sum2 = 0
while i<lenght:
sum2 += lst[i]
i+=1
#6) Challenge: Write a function called beginning that takes a list as input and contains a while loop that only stops once the element of the list is the string bye.
# What is returned is a list that contains up to the first 10 strings, regardless of where the loop stops. (i.e., if it stops on the 32nd element, the first 10 are
# returned. If bye is the 5th element, the first 4 are returned.) If you want to make this even more of a challenge, do this without slicing
| 37.091954 | 168 | 0.664084 |
e6d14bad54d6d5d7401435412b7045fd99c1fc0a
| 25,605 |
py
|
Python
|
saas/backend/apps/group/views.py
|
Canway-shiisa/bk-iam-saas
|
73c3770d9647c9cc8d515427cd1d053d8af9d071
|
[
"MIT"
] | null | null | null |
saas/backend/apps/group/views.py
|
Canway-shiisa/bk-iam-saas
|
73c3770d9647c9cc8d515427cd1d053d8af9d071
|
[
"MIT"
] | null | null | null |
saas/backend/apps/group/views.py
|
Canway-shiisa/bk-iam-saas
|
73c3770d9647c9cc8d515427cd1d053d8af9d071
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making -(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from functools import wraps
from typing import List
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
from drf_yasg.utils import swagger_auto_schema
from pydantic.tools import parse_obj_as
from rest_framework import serializers, status, views
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, mixins
from backend.account.permissions import RolePermission, role_perm_class
from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ
from backend.apps.group import tasks # noqa
from backend.apps.group.models import Group
from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ
from backend.apps.template.models import PermTemplatePolicyAuthorized
from backend.audit.audit import audit_context_setter, view_audit_decorator
from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean
from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz
from backend.biz.policy_tag import ConditionTagBean, ConditionTagBiz
from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker
from backend.biz.template import TemplateBiz
from backend.common.error_codes import error_codes
from backend.common.filters import NoCheckModelFilterBackend
from backend.common.serializers import SystemQuerySLZ
from backend.common.time import PERMANENT_SECONDS
from backend.service.constants import PermissionCodeEnum, RoleType, SubjectType
from backend.service.models import Subject
from backend.trans.group import GroupTrans
from .audit import (
GroupCreateAuditProvider,
GroupDeleteAuditProvider,
GroupMemberCreateAuditProvider,
GroupMemberDeleteAuditProvider,
GroupMemberRenewAuditProvider,
GroupPolicyDeleteAuditProvider,
GroupPolicyUpdateAuditProvider,
GroupTemplateCreateAuditProvider,
GroupTransferAuditProvider,
GroupUpdateAuditProvider,
)
from .constants import OperateEnum
from .filters import GroupFilter, GroupTemplateSystemFilter
from .serializers import (
GroupAddMemberSLZ,
GroupAuthoriedConditionSLZ,
GroupAuthorizationSLZ,
GroupCreateSLZ,
GroupDeleteMemberSLZ,
GroupIdSLZ,
GroupMemberUpdateExpiredAtSLZ,
GroupPolicyUpdateSLZ,
GroupSLZ,
GroupTemplateDetailSchemaSLZ,
GroupTemplateDetailSLZ,
GroupTemplateSchemaSLZ,
GroupTemplateSLZ,
GroupTransferSLZ,
GroupUpdateSLZ,
MemberSLZ,
SearchMemberSLZ,
)
permission_logger = logging.getLogger("permission")
def check_readonly_group(operation):
""""""
return decorate
| 36.216407 | 117 | 0.696348 |
e6d16a8a093216b78956e0c3642e48c0a64c8778
| 5,188 |
py
|
Python
|
towers.py
|
fillest/7drl2013
|
96d291dce08a85d3871713c99f3a036de482d6ca
|
[
"MIT"
] | 1 |
2015-05-19T08:12:49.000Z
|
2015-05-19T08:12:49.000Z
|
towers.py
|
fillest/7drl2013
|
96d291dce08a85d3871713c99f3a036de482d6ca
|
[
"MIT"
] | null | null | null |
towers.py
|
fillest/7drl2013
|
96d291dce08a85d3871713c99f3a036de482d6ca
|
[
"MIT"
] | null | null | null |
import util
import libtcodpy as tcod
import enemies
import operator
| 24.018519 | 92 | 0.664418 |
e6d351ce6a88251c74a7d12532c34a2b0ba6f8b1
| 795 |
py
|
Python
|
python/mandelbrot.py
|
lukasjoc/random
|
5be080b424f02491fb219634902fc0cc192aff6c
|
[
"0BSD"
] | 1 |
2020-11-09T19:32:43.000Z
|
2020-11-09T19:32:43.000Z
|
python/mandelbrot.py
|
lukasjoc/random
|
5be080b424f02491fb219634902fc0cc192aff6c
|
[
"0BSD"
] | null | null | null |
python/mandelbrot.py
|
lukasjoc/random
|
5be080b424f02491fb219634902fc0cc192aff6c
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/python3
from PIL import Image
from numpy import complex, array
from tqdm import tqdm
import colorsys
W=512
#W=142
if __name__ == "__main__":
img = Image.new("RGB", (W, int(W / 2)))
pixels = img.load()
for x in tqdm(range(img.size[0])):
for y in tqdm(range(img.size[1])):
xx = (x - (0.75 * W)) / (W / 4)
yy = (y - (W / 4)) / (W / 4)
pixels[x, y] = mandelbrot(xx, yy)
img.show()
img.save("mandelbrot.jpg")
| 22.714286 | 69 | 0.52956 |
e6d3938d66694895ff110b11b2560698b6722338
| 9,672 |
py
|
Python
|
tests/unit/commands/test_deploy.py
|
tonyreina/mlt
|
ee490ebdeb5aa6924dbfc0a067a0653754c470f4
|
[
"Apache-2.0"
] | 1 |
2021-11-29T10:35:20.000Z
|
2021-11-29T10:35:20.000Z
|
tests/unit/commands/test_deploy.py
|
tonyreina/mlt
|
ee490ebdeb5aa6924dbfc0a067a0653754c470f4
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/commands/test_deploy.py
|
tonyreina/mlt
|
ee490ebdeb5aa6924dbfc0a067a0653754c470f4
|
[
"Apache-2.0"
] | 1 |
2020-02-22T01:04:15.000Z
|
2020-02-22T01:04:15.000Z
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
from __future__ import print_function
import uuid
import pytest
from mock import call, MagicMock
from mlt.commands.deploy import DeployCommand
from test_utils.io import catch_stdout
def deploy(no_push, skip_crd_check, interactive, extra_config_args, retries=5):
deploy = DeployCommand(
{'deploy': True, '--no-push': no_push,
'--skip-crd-check': skip_crd_check,
'--interactive': interactive, '--retries': retries,
'--logs':False})
deploy.config = {'name': 'app', 'namespace': 'namespace'}
deploy.config.update(extra_config_args)
with catch_stdout() as caught_output:
deploy.action()
output = caught_output.getvalue()
return output
def verify_successful_deploy(output, did_push=True, interactive=False):
"""assert pushing, deploying, then objs created, then pushed"""
pushing = output.find('Pushing ')
push_skip = output.find('Skipping image push')
deploying = output.find('Deploying ')
inspecting = output.find('Inspect created objects by running:\n')
pushed = output.find('Pushed to ')
pod_connect = output.find('Connecting to pod...')
if did_push:
assert all(var >= 0 for var in (
deploying, inspecting, pushing, pushed))
assert deploying < inspecting, pushing < pushed
else:
assert all(var == -1 for var in (pushing, pushed))
assert all(var >= 0 for var in (deploying, inspecting, push_skip))
assert push_skip < deploying, deploying < inspecting
if interactive:
assert pod_connect > inspecting
| 34.791367 | 85 | 0.652089 |
e6d61cff66c7d3846169dfff6eca952a90b72ddd
| 1,940 |
py
|
Python
|
packages/mccomponents/tests/mccomponentsbpmodule/sample/Broadened_E_Q_Kernel_TestCase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 5 |
2017-01-16T03:59:47.000Z
|
2020-06-23T02:54:19.000Z
|
packages/mccomponents/tests/mccomponentsbpmodule/sample/Broadened_E_Q_Kernel_TestCase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 293 |
2015-10-29T17:45:52.000Z
|
2022-01-07T16:31:09.000Z
|
packages/mccomponents/tests/mccomponentsbpmodule/sample/Broadened_E_Q_Kernel_TestCase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 1 |
2019-05-25T00:53:31.000Z
|
2019-05-25T00:53:31.000Z
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
standalone = True
import unittestX as unittest
import journal
debug = journal.debug( "Broadened_E_Q_Kernel_TestCase" )
warning = journal.warning( "Broadened_E_Q_Kernel_TestCase" )
import mcni
from mccomposite import mccompositebp
from mccomponents import mccomponentsbp
if __name__ == "__main__":
main()
# version
__id__ = "$Id: TestCase.py 696 2010-11-09 06:23:06Z linjiao $"
# End of file
| 23.373494 | 80 | 0.491753 |
e6d6837b46baf712793275d6754e0dab0bf209be
| 602 |
py
|
Python
|
baseline/ns-vqa/reason/options/test_options.py
|
robinzixuan/Video-Question-Answering-HRI
|
ae68ffee1e6fc1eb13229e457e3b8e3bc3a11579
|
[
"MIT"
] | 52 |
2019-12-04T22:26:56.000Z
|
2022-03-31T17:04:15.000Z
|
reason/options/test_options.py
|
guxiwuruo/VCML
|
5a0f01a0baba238cef2f63131fccd412e3d7822b
|
[
"MIT"
] | 6 |
2020-08-25T07:35:14.000Z
|
2021-09-09T04:57:09.000Z
|
reason/options/test_options.py
|
guxiwuruo/VCML
|
5a0f01a0baba238cef2f63131fccd412e3d7822b
|
[
"MIT"
] | 5 |
2020-02-10T07:39:24.000Z
|
2021-06-23T02:53:42.000Z
|
from .base_options import BaseOptions
| 43 | 107 | 0.699336 |
e6d751bc3f23bc91c2716777ca9ac12139d4b799
| 6,325 |
py
|
Python
|
Model_setup/NEISO_data_file/downsampling_generators_v1.py
|
keremakdemir/ISONE_UCED
|
11ce34c5ac5d34dcab771640f41c0d2ce4ab21f9
|
[
"MIT"
] | null | null | null |
Model_setup/NEISO_data_file/downsampling_generators_v1.py
|
keremakdemir/ISONE_UCED
|
11ce34c5ac5d34dcab771640f41c0d2ce4ab21f9
|
[
"MIT"
] | null | null | null |
Model_setup/NEISO_data_file/downsampling_generators_v1.py
|
keremakdemir/ISONE_UCED
|
11ce34c5ac5d34dcab771640f41c0d2ce4ab21f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 18:45:34 2020
@author: kakdemi
"""
import pandas as pd
#importing generators
all_generators = pd.read_excel('generators2.xlsx', sheet_name='NEISO generators (dispatch)')
#getting all oil generators
all_oil = all_generators[all_generators['typ']=='oil'].copy()
#getting all generators in every zone
CT_oil = all_oil[all_oil['zone']=='CT'].copy()
ME_oil = all_oil[all_oil['zone']=='ME'].copy()
NEMA_oil = all_oil[all_oil['zone']=='NEMA'].copy()
NH_oil = all_oil[all_oil['zone']=='NH'].copy()
RI_oil = all_oil[all_oil['zone']=='RI'].copy()
SEMA_oil = all_oil[all_oil['zone']=='SEMA'].copy()
VT_oil = all_oil[all_oil['zone']=='VT'].copy()
WCMA_oil = all_oil[all_oil['zone']=='WCMA'].copy()
#defining zones
zones = ['CT','ME','NEMA','NH','RI','SEMA','VT','WCMA']
#getting all slack generators
all_slack = all_generators[all_generators['typ']=='slack'].copy()
#getting generators other than slack and oil
all_other = all_generators[(all_generators['typ']!='oil') & (all_generators['typ']!='slack')].copy()
#defining a function to downsample oil generators
#downsampling oil generators in every zone by using the defined function
for z in zones:
globals()[z+'_agg_oil_df'] = oil_downsampler(z)
#adding downsampled oil generators to create a complete list of generators
final_generators = pd.concat([all_other, CT_agg_oil_df, ME_agg_oil_df, NEMA_agg_oil_df,
NH_agg_oil_df, RI_agg_oil_df, SEMA_agg_oil_df, VT_agg_oil_df,
WCMA_agg_oil_df, all_slack], ignore_index=True)
#exporting the generators as an Excel file
final_generators.to_excel('generators.xlsx', sheet_name='NEISO generators (dispatch)', index=False)
| 47.201493 | 100 | 0.68253 |
e6d7ef175de941485b4682919229774de09d58bb
| 307 |
py
|
Python
|
GUI1.py
|
otmanabdoun/IHM-Python
|
624e961c2f6966b98bf2c1bc4dd276b812954ba1
|
[
"Apache-2.0"
] | 3 |
2021-12-08T10:34:55.000Z
|
2022-01-17T21:02:40.000Z
|
GUI1.py
|
otmanabdoun/IHM-Python
|
624e961c2f6966b98bf2c1bc4dd276b812954ba1
|
[
"Apache-2.0"
] | null | null | null |
GUI1.py
|
otmanabdoun/IHM-Python
|
624e961c2f6966b98bf2c1bc4dd276b812954ba1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 16 19:47:41 2021
@author: User
"""
import tkinter as tk
racine = tk . Tk ()
label = tk . Label ( racine , text ="J ' adore Python !")
bouton = tk . Button ( racine , text =" Quitter ", command = racine . destroy )
label . pack ()
bouton . pack ()
| 23.615385 | 80 | 0.579805 |
e6d83253f8c1c21cef502fbe86bb43dc1f2be4ac
| 2,579 |
py
|
Python
|
app/routes/v1/endpoints/clickup.py
|
ertyurk/bugme
|
5a3ef3e089e0089055074c1c896c3fdc76600e93
|
[
"MIT"
] | null | null | null |
app/routes/v1/endpoints/clickup.py
|
ertyurk/bugme
|
5a3ef3e089e0089055074c1c896c3fdc76600e93
|
[
"MIT"
] | null | null | null |
app/routes/v1/endpoints/clickup.py
|
ertyurk/bugme
|
5a3ef3e089e0089055074c1c896c3fdc76600e93
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter, status, Body, HTTPException
from fastapi.encoders import jsonable_encoder
from starlette.responses import JSONResponse
from app.models.common import *
from app.models.clickup import *
from app.database.crud.clickup import *
router = APIRouter()
| 31.839506 | 88 | 0.6867 |
e6d9219a9f3da8435460a41632a908023dbaa338
| 2,668 |
py
|
Python
|
cellfinder_core/main.py
|
npeschke/cellfinder-core
|
7a86a7d2c879c94da529ec6140f7e5c3f02bf288
|
[
"BSD-3-Clause"
] | 5 |
2021-01-22T11:40:01.000Z
|
2021-09-10T07:16:05.000Z
|
cellfinder_core/main.py
|
npeschke/cellfinder-core
|
7a86a7d2c879c94da529ec6140f7e5c3f02bf288
|
[
"BSD-3-Clause"
] | 38 |
2021-01-22T11:50:29.000Z
|
2022-03-11T11:04:06.000Z
|
cellfinder_core/main.py
|
npeschke/cellfinder-core
|
7a86a7d2c879c94da529ec6140f7e5c3f02bf288
|
[
"BSD-3-Clause"
] | 12 |
2021-06-18T09:57:24.000Z
|
2022-03-06T13:03:18.000Z
|
"""
N.B imports are within functions to prevent tensorflow being imported before
it's warnings are silenced
"""
import os
import logging
from imlib.general.logging import suppress_specific_logs
tf_suppress_log_messages = [
"multiprocessing can interact badly with TensorFlow"
]
def suppress_tf_logging(tf_suppress_log_messages):
"""
Prevents many lines of logs such as:
"2019-10-24 16:54:41.363978: I tensorflow/stream_executor/platform/default
/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1"
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
for message in tf_suppress_log_messages:
suppress_specific_logs("tensorflow", message)
| 25.653846 | 78 | 0.664168 |
e6d9b9257b4bb7dd1463fcb578829bc893311e39
| 1,378 |
py
|
Python
|
server.py
|
rezist-ro/rezistenta.tv
|
0c0dfa4842061baf2b575688588c5d77cfdba427
|
[
"MIT"
] | null | null | null |
server.py
|
rezist-ro/rezistenta.tv
|
0c0dfa4842061baf2b575688588c5d77cfdba427
|
[
"MIT"
] | null | null | null |
server.py
|
rezist-ro/rezistenta.tv
|
0c0dfa4842061baf2b575688588c5d77cfdba427
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import dateutil.parser
import flask
import json
import os
import time
import urllib
import yaml
EPISODES = yaml.load(open("episodes.yaml").read())
app = flask.Flask(__name__,
static_path="/assets",
static_folder="assets")
app.jinja_env.filters["strftime"] = \
lambda str, fmt: dateutil.parser.parse(str).strftime(fmt)
app.jinja_env.filters["quote_plus"] = lambda u: urllib.quote_plus(u)
ASSETS = os.path.join(app.root_path, "assets")
| 25.054545 | 68 | 0.592163 |
e6dbab8094c7c2aea35411b5ea545eabb3be8db0
| 273 |
py
|
Python
|
problem020.py
|
mazayus/ProjectEuler
|
64aebd5d80031fab2f0ef3c44c3a1118212ab613
|
[
"MIT"
] | null | null | null |
problem020.py
|
mazayus/ProjectEuler
|
64aebd5d80031fab2f0ef3c44c3a1118212ab613
|
[
"MIT"
] | null | null | null |
problem020.py
|
mazayus/ProjectEuler
|
64aebd5d80031fab2f0ef3c44c3a1118212ab613
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from functools import *
import operator
print(sum(digits(factorial(100))))
| 19.5 | 52 | 0.70696 |
e6dce6f716b933d2a36c1e77462d5b0eb2326793
| 5,449 |
py
|
Python
|
transformer.py
|
ghafran/KerasPersonLab
|
fcd80b62247aee8bd1d41ff91e31c822950f561e
|
[
"MIT"
] | null | null | null |
transformer.py
|
ghafran/KerasPersonLab
|
fcd80b62247aee8bd1d41ff91e31c822950f561e
|
[
"MIT"
] | null | null | null |
transformer.py
|
ghafran/KerasPersonLab
|
fcd80b62247aee8bd1d41ff91e31c822950f561e
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import cos, sin, pi
import cv2
import random
from config import config, TransformationParams
from data_prep import map_coco_to_personlab
class Transformer:
| 41.915385 | 142 | 0.599193 |
e6ddfeb2d231878165ecef38a814ab51e23d6978
| 412 |
py
|
Python
|
enan/__init__.py
|
mizuno-group/enan
|
3c9dbe60bebf98e384e858db56980928b5897775
|
[
"MIT"
] | null | null | null |
enan/__init__.py
|
mizuno-group/enan
|
3c9dbe60bebf98e384e858db56980928b5897775
|
[
"MIT"
] | null | null | null |
enan/__init__.py
|
mizuno-group/enan
|
3c9dbe60bebf98e384e858db56980928b5897775
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 15:46:32 2019
@author: tadahaya
"""
from .binom import BT
from .connect import Connect
from .fet import FET
from .gsea import GSEA
from .ssgsea import ssGSEA
__copyright__ = 'Copyright (C) 2020 MIZUNO Tadahaya'
__version__ = '1.0.3'
__license__ = 'MIT'
__author__ = 'MIZUNO Tadahaya'
__author_email__ = '[email protected]'
| 22.888889 | 56 | 0.662621 |
e6de80977f40faa2f17ffea735e4529c245402b4
| 320 |
py
|
Python
|
app/helpers/__init__.py
|
Hacker-1202/Selfium
|
7e798c23c9f24aacab6f6a485d6355f1045bc65c
|
[
"MIT"
] | 14 |
2021-11-05T11:27:25.000Z
|
2022-02-28T02:04:32.000Z
|
app/helpers/__init__.py
|
CssHammer/Selfium
|
7e798c23c9f24aacab6f6a485d6355f1045bc65c
|
[
"MIT"
] | 2 |
2022-01-24T22:00:44.000Z
|
2022-01-31T13:13:27.000Z
|
app/helpers/__init__.py
|
CssHammer/Selfium
|
7e798c23c9f24aacab6f6a485d6355f1045bc65c
|
[
"MIT"
] | 5 |
2022-01-02T13:33:17.000Z
|
2022-02-26T13:09:50.000Z
|
"""
Selfium Helper Files
~~~~~~~~~~~~~~~~~~~
All Helper Files used in Selfium project;
:copyright: (c) 2021 - Caillou and ZeusHay;
:license: MIT, see LICENSE for more details.
"""
from .getUser import *
from .getGuild import *
from .params import *
from .notify import *
from .sendEmbed import *
from .isStaff import *
| 21.333333 | 44 | 0.6875 |
e6dee5544a49eb20feb56cbcfdbdf81cda6aae63
| 10,859 |
py
|
Python
|
NLP/UNIMO/src/finetune/visual_entailment.py
|
zhangyimi/Research
|
866f91d9774a38d205d6e9a3b1ee6293748261b3
|
[
"Apache-2.0"
] | 1,319 |
2020-02-14T10:42:07.000Z
|
2022-03-31T15:42:18.000Z
|
NLP/UNIMO/src/finetune/visual_entailment.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 192 |
2020-02-14T02:53:34.000Z
|
2022-03-31T02:25:48.000Z
|
NLP/UNIMO/src/finetune/visual_entailment.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 720 |
2020-02-14T02:12:38.000Z
|
2022-03-31T12:21:15.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for visual_entailment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import time
import numpy as np
import paddle.fluid as fluid
from model.unimo_finetune import UNIMOModel
from eval import glue_eval
from collections import OrderedDict
from utils.utils import print_eval_log
def kl_divergence_with_logits(q_logits, p_logits):
"""
symmetric KL-divergence (See SMART, Sec 3.1)
q_logits: logits
p_logits: delta_logits
"""
q = fluid.layers.softmax(input=q_logits)
p = fluid.layers.softmax(input=p_logits)
kl_qp = fluid.layers.reduce_sum(q * (fluid.layers.log(q) - fluid.layers.log(p)), -1)
kl_pq = fluid.layers.reduce_sum(p * (fluid.layers.log(p) - fluid.layers.log(q)), -1)
vat_loss = fluid.layers.mean(x=kl_qp+kl_pq)
return vat_loss
def create_model(args, config, pyreader_name="train_reader", is_train=True):
"""create_model"""
shapes = [[-1, args.max_seq_len, 1], # src_ids
[-1, args.max_seq_len, 1], # pos_ids
[-1, args.max_seq_len, 1], # sent_ids
[-1, args.max_img_len + args.max_seq_len, args.max_img_len + args.max_seq_len], # input_mask
[-1, args.max_img_len, 1], # v_mask
[-1, args.max_seq_len, 1], # t_mask
[-1, args.max_img_len, config["image_embedding_size"]], # image_embedding
[-1, args.max_img_len, 5], # image_loc
[-1, 1] # labels
]
dtypes = ['int64', 'int64', 'int64', 'float32', 'float32', 'float32', 'float32','float32', 'int64']
lod_levels = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pyreader = fluid.layers.py_reader(
capacity=70,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=pyreader_name,
use_double_buffer=True)
(src_ids, pos_ids, sent_ids, input_mask, v_mask, t_mask, image_embedding, image_loc, labels) \
= fluid.layers.read_file(pyreader)
emb_ids = {"word_embedding": src_ids, "sent_embedding": sent_ids, "pos_embedding": pos_ids}
image_input = {"image_embedding": image_embedding, "loc_embedding": image_loc}
adv_step, adv_lr, norm_type, adv_max_norm, adv_init_mag = \
args.adv_step, args.adv_lr, args.norm_type, args.adv_max_norm, args.adv_init_mag
assert adv_step > 0 and adv_init_mag > 0
if is_train:
text_emb_shape = [-1, args.max_seq_len, config['hidden_size']]
text_delta = init_delta(src_ids, t_mask, text_emb_shape, name='text')
image_emb_shape = [-1, args.max_img_len, config['image_embedding_size']]
image_delta = init_delta(image_embedding, v_mask, image_emb_shape, name='img')
else:
text_delta, image_delta = None, None
loss = None
for iter in range(adv_step):
vl_pure = UNIMOModel(
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_text = UNIMOModel(
text_adv_delta=text_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_image = UNIMOModel(
image_adv_delta=image_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
h_pure_text, h_pure_image = vl_pure.get_pooled_output()
h_text_text, h_text_image = vl_text.get_pooled_output()
h_image_text, h_image_image = vl_image.get_pooled_output()
loss_pure, logit_pure, probs_pure = get_loss_and_logits(h_pure_text, h_pure_image)
loss_text, logit_text, probs_text = get_loss_and_logits(h_text_text, h_text_image)
loss_image, logit_image, probs_image = get_loss_and_logits(h_image_text, h_image_image)
if is_train:
text_delta = pgd_with_l2(loss_text, text_delta)
image_delta = pgd_with_l2(loss_image, image_delta)
kl_adv_text_loss = kl_divergence_with_logits(logit_pure, logit_text)
kl_adv_image_loss = kl_divergence_with_logits(logit_pure, logit_image)
cur_loss = loss_pure + loss_text + loss_image + kl_adv_text_loss + kl_adv_image_loss
loss = cur_loss if loss is None else loss + cur_loss
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs_pure, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs_pure,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs
}
for k, v in graph_vars.items():
v.persistable = False
return pyreader, graph_vars
def evaluate(args, exe, test_pyreader, graph_vars, eval_phase, dev_count=1, gpu_id=0):
"""evaluate"""
all_mat = []
test_pyreader.start()
time_begin = time.time()
fetch_list = [graph_vars["probs"].name, graph_vars["labels"].name]
while True:
try:
np_probs, np_labels = exe.run(fetch_list=fetch_list)
np_preds = np.argmax(np_probs, axis=1).reshape((-1, 1))
np_labels = np_labels.reshape((-1, 1))
mat = np.concatenate([np_preds, np_labels], axis=1)
all_mat.extend(mat.tolist())
except fluid.core.EOFException:
test_pyreader.reset()
break
all_mat = np.array(all_mat)
time_end = time.time()
save_file = "%s/%s.trainers_%d.part_%d.npy" % (args.eval_dir, eval_phase, dev_count, gpu_id)
np.save(save_file, all_mat)
tmp_file = "%s/%s.trainers_%d.part_%d.finish" % (args.eval_dir, eval_phase, dev_count, gpu_id)
tmp_writer = open(tmp_file, "w")
tmp_writer.close()
if gpu_id == 0:
while True:
ret = os.popen('find %s -maxdepth 1 -name "%s.trainers_%d.part_*.finish"' %
(args.eval_dir, eval_phase, dev_count)).readlines()
if len(ret) != dev_count:
time.sleep(1)
continue
else:
break
all_mats = []
save_files = glob.glob("%s/%s.trainers_%d.part_*.npy" % (args.eval_dir, eval_phase, dev_count))
for cur_save_file in save_files:
mat = np.load(cur_save_file).tolist()
all_mats.extend(mat)
all_mats = np.array(all_mats)
cur_time = str(int(time.time()))
os.system("mkdir %s/%s" % (args.eval_dir, cur_time))
os.system("mv %s/%s.trainers_%d.* %s/%s" % (args.eval_dir, eval_phase, dev_count, args.eval_dir, cur_time))
ret = OrderedDict()
ret['phase'] = eval_phase
ret['loss'] = -1
ret['data_num'] = all_mats.shape[0]
ret['used_time'] = round(time_end - time_begin, 4)
metrics = OrderedDict()
metrics["simple_accuracy"] = glue_eval.simple_accuracy
if args.eval_mertrics in metrics:
ret_metric = metrics[args.eval_mertrics](all_mats[:, 0], all_mats[:, 1])
ret.update(ret_metric)
print_eval_log(ret)
else:
raise ValueError('unsupported metric {}'.format(args.eval_mertrics))
return ret
else:
return None
| 38.644128 | 115 | 0.634773 |
e6df360b607f7a2f24c1ab6bf355ca5d23eb73f0
| 856 |
py
|
Python
|
src/records.py
|
oth-datapipeline/ingestion-scripts
|
48eecf63b0bf06200aa59be63de6839599ec51df
|
[
"Apache-2.0"
] | null | null | null |
src/records.py
|
oth-datapipeline/ingestion-scripts
|
48eecf63b0bf06200aa59be63de6839599ec51df
|
[
"Apache-2.0"
] | 4 |
2022-03-31T16:41:33.000Z
|
2022-03-31T22:58:11.000Z
|
src/records.py
|
oth-datapipeline/ingestion-scripts
|
48eecf63b0bf06200aa59be63de6839599ec51df
|
[
"Apache-2.0"
] | null | null | null |
from faust import Record
| 18.608696 | 46 | 0.627336 |
e6df6e5deaed8c701c0957596bd842d1b7c2b65f
| 923 |
py
|
Python
|
leetcode/102-Medium-Binary-Tree-Level-Order-Traversal/answer.py
|
vaishali-bariwal/Practice-Coding-Questions
|
747bfcb1cb2be5340daa745f2b9938f0ee87c9ac
|
[
"Unlicense"
] | 25 |
2018-05-22T15:18:50.000Z
|
2022-01-08T02:41:46.000Z
|
leetcode/102-Medium-Binary-Tree-Level-Order-Traversal/answer.py
|
vaishali-bariwal/Practice-Coding-Questions
|
747bfcb1cb2be5340daa745f2b9938f0ee87c9ac
|
[
"Unlicense"
] | 1 |
2019-05-24T16:55:27.000Z
|
2019-05-24T16:55:27.000Z
|
leetcode/102-Medium-Binary-Tree-Level-Order-Traversal/answer.py
|
vaishali-bariwal/Practice-Coding-Questions
|
747bfcb1cb2be5340daa745f2b9938f0ee87c9ac
|
[
"Unlicense"
] | 18 |
2018-09-20T15:39:26.000Z
|
2022-03-02T21:38:22.000Z
|
#!/usr/bin/python3
#------------------------------------------------------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#------------------------------------------------------------------------------
#Testing
| 26.371429 | 79 | 0.40195 |
e6dfce648d291a8622a4863d4137f07d19b0910a
| 320 |
py
|
Python
|
setup.py
|
shirayu/fitbit-dumper
|
21cee614e294d84204ad06d81dae9adf9853a135
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
shirayu/fitbit-dumper
|
21cee614e294d84204ad06d81dae9adf9853a135
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
shirayu/fitbit-dumper
|
21cee614e294d84204ad06d81dae9adf9853a135
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="",
version="0.01",
packages=find_packages(),
install_requires=[
"fitbit"
],
dependency_links=[
],
extras_require={
"tests": [
"flake8",
"autopep8",
]
}
)
| 15.238095 | 43 | 0.515625 |
e6dfe1b17aaabced195ba909adb862f6d72a3bd2
| 214 |
py
|
Python
|
src/main.py
|
mtnmunuklu/SigmaToExcel
|
7d11fda19c0075122928ff5f1dbaab7775d30fe9
|
[
"MIT"
] | 10 |
2021-05-26T11:24:27.000Z
|
2022-01-14T16:42:25.000Z
|
src/main.py
|
mtnmunuklu/SigmaToExcel
|
7d11fda19c0075122928ff5f1dbaab7775d30fe9
|
[
"MIT"
] | null | null | null |
src/main.py
|
mtnmunuklu/SigmaToExcel
|
7d11fda19c0075122928ff5f1dbaab7775d30fe9
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../")
from src.app.sigma import SigmaConverter
if __name__ == "__main__":
sigmaconverter = SigmaConverter()
sigmaconverter.read_from_file()
sigmaconverter.write_to_excel()
| 21.4 | 40 | 0.742991 |
e6e002827d5c227b7c36fcd9b7c86eda019324e4
| 449 |
py
|
Python
|
server/processes/migrations/0132_auto_20201108_0540.py
|
CloudReactor/task_manager
|
464ca74371064fabb9a21b1f5bacba30360932ab
|
[
"Fair"
] | null | null | null |
server/processes/migrations/0132_auto_20201108_0540.py
|
CloudReactor/task_manager
|
464ca74371064fabb9a21b1f5bacba30360932ab
|
[
"Fair"
] | 6 |
2021-11-01T01:35:40.000Z
|
2022-02-11T03:33:06.000Z
|
server/processes/migrations/0132_auto_20201108_0540.py
|
CloudReactor/task_manager
|
464ca74371064fabb9a21b1f5bacba30360932ab
|
[
"Fair"
] | null | null | null |
# Generated by Django 2.2.14 on 2020-11-08 05:40
from django.db import migrations
| 24.944444 | 134 | 0.623608 |
e6e0a15a9ec84da1c3d497af8bd4ec8d117edbbd
| 4,291 |
py
|
Python
|
sparsely_lstmvae_main.py
|
pengkangzaia/usad
|
937a29c24632cfa31e0c626cd5b058b3af74ef94
|
[
"BSD-3-Clause"
] | null | null | null |
sparsely_lstmvae_main.py
|
pengkangzaia/usad
|
937a29c24632cfa31e0c626cd5b058b3af74ef94
|
[
"BSD-3-Clause"
] | null | null | null |
sparsely_lstmvae_main.py
|
pengkangzaia/usad
|
937a29c24632cfa31e0c626cd5b058b3af74ef94
|
[
"BSD-3-Clause"
] | null | null | null |
from model.sparsely_lstm_vae import *
import torch.utils.data as data_utils
from sklearn import preprocessing
from utils.eval_methods import *
device = get_default_device()
# Read data
# normal = pd.read_csv("data/SWaT_Dataset_Normal_v1.csv") # , nrows=1000)
normal = pd.read_csv("data/SWaT/SWaT_Dataset_Normal_v1.csv", nrows=10000) # , nrows=1000)
normal = normal.drop(["Timestamp", "Normal/Attack"], axis=1)
# normal.shape
# Transform all columns into float64
for i in list(normal):
normal[i] = normal[i].apply(lambda x: str(x).replace(",", "."))
normal = normal.astype(float)
#
min_max_scaler = preprocessing.MinMaxScaler()
x = normal.values
x_scaled = min_max_scaler.fit_transform(x)
normal = pd.DataFrame(x_scaled)
# Read data
# attack = pd.read_csv("data/SWaT_Dataset_Attack_v0.csv", sep=";") # , nrows=1000)
attack = pd.read_csv("data/SWaT/SWaT_Dataset_Attack_v0.csv", sep=";", nrows=10000) # , nrows=1000)
labels = [float(label != 'Normal') for label in attack["Normal/Attack"].values]
attack = attack.drop(["Timestamp", "Normal/Attack"], axis=1)
# Transform all columns into float64
for i in list(attack):
attack[i] = attack[i].apply(lambda x: str(x).replace(",", "."))
attack = attack.astype(float)
x = attack.values
x_scaled = min_max_scaler.transform(x)
attack = pd.DataFrame(x_scaled)
############## windows ###################
window_size = 12
# np.arange(window_size)[None, :] 1*12 (0,1,2,3,4,5,6,7,8,9,10,11)12
# np.arange(normal.shape[0] - window_size)[:, None] (1000-12)*1 (0,1,2,3,4,5...) 988
# np.arange(window_size)[None, :] + np.arange(normal.shape[0] - window_size)[:, None] (1000-12)*12
windows_normal = normal.values[np.arange(window_size)[None, :] + np.arange(attack.shape[0] - window_size)[:, None]]
windows_attack = attack.values[np.arange(window_size)[None, :] + np.arange(attack.shape[0] - window_size)[:, None]]
############## training ###################
# BATCH_SIZE = 7919
BATCH_SIZE = 200
N_EPOCHS = 100
hidden_size = 100
latent_size = 40
# w_size = windows_normal.shape[1] * windows_normal.shape[2] # window_size * feature_size
# z_size = windows_normal.shape[1] * hidden_size # window_size * hidden_size
windows_normal_train = windows_normal[:int(np.floor(.8 * windows_normal.shape[0]))]
windows_normal_val = windows_normal[int(np.floor(.8 * windows_normal.shape[0])):int(np.floor(windows_normal.shape[0]))]
train_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_normal_train).float().view(([windows_normal_train.shape[0], windows_normal_train.shape[1], windows_normal_train.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
val_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_normal_val).float().view(([windows_normal_val.shape[0], windows_normal_train.shape[1], windows_normal_train.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
test_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_attack).float().view(([windows_attack.shape[0], windows_attack.shape[1], windows_attack.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
model = SparselyLstmVae(BATCH_SIZE, window_size, windows_normal.shape[2], hidden_size, latent_size, former_step=3)
model = to_device(model, device)
val_loss, train_loss = training(N_EPOCHS, model, train_loader, val_loader)
plot_simple_history(val_loss)
plot_train_loss(train_loss)
torch.save({'ae': model.state_dict()}, "saved_model/model.pth")
############ testing #################
checkpoint = torch.load("model.pth")
model.load_state_dict(checkpoint['ae'])
# batchresultresult
results = testing(model, test_loader)
windows_labels = []
for i in range(len(labels) - window_size):
windows_labels.append(list(np.int_(labels[i:i + window_size])))
# 1
y_test = [1.0 if (np.sum(window) > 0) else 0 for window in windows_labels]
#
y_pred = np.concatenate(
[torch.stack(results[:-1]).flatten().detach().cpu().numpy(),
results[-1].flatten().detach().cpu().numpy()])
y_pred = (y_pred - y_pred.min()) / (y_pred.max() - y_pred.min())
threshold = ROC(y_test, y_pred)
t, th = bf_search(y_pred, y_test, start=0, end=1, step_num=1000, display_freq=50)
| 41.660194 | 152 | 0.723141 |
e6e20c3e769f1a5e89011c872f7f4c1dc10d94e8
| 542 |
py
|
Python
|
src/demo/tasks.py
|
MexsonFernandes/AsynchronousTasks-Django-Celery-RabbitMQ-Redis
|
b64b31cec4ccf8e0dca2cfe9faba40da647b94f7
|
[
"Apache-2.0"
] | 1 |
2019-01-17T09:16:06.000Z
|
2019-01-17T09:16:06.000Z
|
src/demo/tasks.py
|
MexsonFernandes/Asynchronous_Tasks-Django-Celery-RabbitMQ-Redis
|
b64b31cec4ccf8e0dca2cfe9faba40da647b94f7
|
[
"Apache-2.0"
] | 7 |
2019-10-20T18:47:34.000Z
|
2022-02-10T07:42:18.000Z
|
src/demo/tasks.py
|
MexsonFernandes/AsynchronousTasks-Django-Celery-RabbitMQ-Redis
|
b64b31cec4ccf8e0dca2cfe9faba40da647b94f7
|
[
"Apache-2.0"
] | 2 |
2019-10-20T18:47:59.000Z
|
2022-03-02T12:31:54.000Z
|
from __future__ import absolute_import, unicode_literals
from dcs.celeryconf import app
import time
from django.core.mail import EmailMessage
| 25.809524 | 91 | 0.680812 |
e6e370a3613328a0a9c46c0e262a69e05fcae601
| 355 |
py
|
Python
|
pytorch_translate/models/__init__.py
|
Ayansam1152/translate
|
33d397fc25fb1072abd2975c77c602a2d031c6c4
|
[
"BSD-3-Clause"
] | 748 |
2018-05-02T17:12:53.000Z
|
2022-03-26T04:44:44.000Z
|
pytorch_translate/models/__init__.py
|
Ayansam1152/translate
|
33d397fc25fb1072abd2975c77c602a2d031c6c4
|
[
"BSD-3-Clause"
] | 352 |
2018-05-02T19:05:59.000Z
|
2022-02-25T16:54:27.000Z
|
pytorch_translate/models/__init__.py
|
Ayansam1152/translate
|
33d397fc25fb1072abd2975c77c602a2d031c6c4
|
[
"BSD-3-Clause"
] | 193 |
2018-05-02T17:14:56.000Z
|
2022-02-24T21:10:56.000Z
|
#!/usr/bin/env python3
import importlib
import os
# automatically import any Python files in the models/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module("pytorch_translate.models." + model_name)
| 29.583333 | 73 | 0.712676 |
e6e3cdee410d18c73bf42cae95012d7ea773e4ae
| 808 |
py
|
Python
|
app/config/secure.py
|
mapeimapei/awesome-flask-webapp
|
d0474f447a41e9432a14f9110989166c6595f0fa
|
[
"MIT"
] | 2 |
2020-05-08T15:58:44.000Z
|
2020-05-09T19:36:34.000Z
|
app/config/secure.py
|
mapeimapei/awesome-flask-webapp
|
d0474f447a41e9432a14f9110989166c6595f0fa
|
[
"MIT"
] | null | null | null |
app/config/secure.py
|
mapeimapei/awesome-flask-webapp
|
d0474f447a41e9432a14f9110989166c6595f0fa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = ''
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/awesome'
SECRET_KEY = '\x88D\xf09\x91\x07\x98\x89\x87\x96\xa0A\xc68\xf9\xecJ:U\x17\xc5V\xbe\x8b\xef\xd7\xd8\xd3\xe6\x98*4'
# Email
MAIL_SERVER = 'smtp.exmail.qq.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = '[email protected]'
MAIL_PASSWORD = 'Bmwzy1314520'
MAIL_SUBJECT_PREFIX = '[]'
MAIL_SENDER = ' <[email protected]>'
#
SQLALCHEMY_RECORD_QUERIES = True
#
DATABASE_QUERY_TIMEOUT = 0.5
SQLALCHEMY_TRACK_MODIFICATIONS = True
WTF_CSRF_CHECK_DEFAULT = False
SQLALCHEMY_ECHO = True
from datetime import timedelta
REMEMBER_COOKIE_DURATION = timedelta(days=30)
PROXY_API = 'http://ip.yushu.im/get'
# PERMANENT_SESSION_LIFETIME = 3600
| 22.444444 | 113 | 0.762376 |
e6e519c34806df836f150fb2649703603da92026
| 1,580 |
py
|
Python
|
src/users/migrations/0014_auto_20200801_1008.py
|
aliharby12/Simple-vezeeta-project
|
feb6df8b354ac284edc645059bea17021169dcfa
|
[
"MIT"
] | null | null | null |
src/users/migrations/0014_auto_20200801_1008.py
|
aliharby12/Simple-vezeeta-project
|
feb6df8b354ac284edc645059bea17021169dcfa
|
[
"MIT"
] | 5 |
2021-03-19T12:06:16.000Z
|
2022-02-10T11:44:27.000Z
|
src/users/migrations/0014_auto_20200801_1008.py
|
aliharby12/Simple-vezeeta-project
|
feb6df8b354ac284edc645059bea17021169dcfa
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2020-08-01 08:08
from django.db import migrations, models
import django.db.models.deletion
| 49.375 | 444 | 0.599367 |
e6e54d8b26245cebf1276442b433cc49edf1fc78
| 762 |
py
|
Python
|
caller_v3/app/api/v1/docker.py
|
tienthegainz/pipeline_executor_docker_call
|
b2b9478056e4b818f5963b0b266375fe6d39627a
|
[
"MIT"
] | null | null | null |
caller_v3/app/api/v1/docker.py
|
tienthegainz/pipeline_executor_docker_call
|
b2b9478056e4b818f5963b0b266375fe6d39627a
|
[
"MIT"
] | null | null | null |
caller_v3/app/api/v1/docker.py
|
tienthegainz/pipeline_executor_docker_call
|
b2b9478056e4b818f5963b0b266375fe6d39627a
|
[
"MIT"
] | null | null | null |
from typing import Any, List, Callable
from fastapi import APIRouter, HTTPException, status, BackgroundTasks
from app import schemas
from app.core import docker_client
import json
from copy import deepcopy
router = APIRouter()
| 28.222222 | 99 | 0.732283 |
e6e829827c4e2ffcbb07be400f025860fb9ae813
| 10,409 |
py
|
Python
|
keras/models.py
|
kalyc/keras-apache-mxnet
|
5497ebd50a45ccc446b8944ebbe11fb7721a5533
|
[
"MIT"
] | 300 |
2018-04-04T05:01:21.000Z
|
2022-02-25T18:56:04.000Z
|
keras/models.py
|
kalyc/keras-apache-mxnet
|
5497ebd50a45ccc446b8944ebbe11fb7721a5533
|
[
"MIT"
] | 163 |
2018-04-03T17:41:22.000Z
|
2021-09-03T16:44:04.000Z
|
keras/models.py
|
kalyc/keras-apache-mxnet
|
5497ebd50a45ccc446b8944ebbe11fb7721a5533
|
[
"MIT"
] | 72 |
2018-04-21T06:42:30.000Z
|
2021-12-26T06:02:42.000Z
|
"""Model-related utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import backend as K
from .utils.generic_utils import has_arg
from .utils.generic_utils import to_list
from .engine.input_layer import Input
from .engine.input_layer import InputLayer
from .engine.training import Model
from .engine.sequential import Sequential
from .engine.saving import save_model
from .engine.saving import load_model
from .engine.saving import model_from_config
from .engine.saving import model_from_yaml
from .engine.saving import model_from_json
from .engine.saving import save_mxnet_model
try:
import h5py
except ImportError:
h5py = None
def _clone_functional_model(model, input_tensors=None):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
layer_map = {} # Cache for created layers.
tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_layers = []
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(batch_shape=layer.batch_input_shape,
dtype=layer.dtype,
sparse=layer.sparse,
name=layer.name)
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[layer] = newly_created_input_layer
for _original, _cloned in zip(model._input_layers, input_layers):
layer_map[_original] = _cloned
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
input_tensors = to_list(input_tensors)
_input_tensors = []
for i, x in enumerate(input_tensors):
if not K.is_keras_tensor(x):
name = model._input_layers[i].name
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + name)
_input_tensors.append(input_tensor)
# Cache newly created input layer.
original_input_layer = x._keras_history[0]
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[original_input_layer] = newly_created_input_layer
else:
_input_tensors.append(x)
input_tensors = _input_tensors
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = (y, None) # tensor, mask
# Iterated over every node in the reference model, in depth order.
depth_keys = list(model._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = model._nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
# Clone layer.
new_layer = layer.__class__.from_config(layer.get_config())
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, InputLayer):
continue
# Gather inputs to call the new layer.
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in reference_input_tensors:
if x in tensor_map:
computed_data.append(tensor_map[x])
if len(computed_data) == len(reference_input_tensors):
# Call layer.
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_mask
output_tensors = to_list(
layer(computed_tensor, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensor,
computed_mask))
computed_tensors = [computed_tensor]
computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_masks
output_tensors = to_list(
layer(computed_tensors, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensors,
computed_masks))
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors,
output_tensors,
output_masks):
tensor_map[x] = (y, mask)
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
tensor, _ = tensor_map[x]
output_tensors.append(tensor)
return Model(input_tensors, output_tensors, name=model.name)
def _clone_sequential_model(model, input_tensors=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
layers = [clone(layer) for layer in model.layers]
if input_tensors is None:
return Sequential(layers=layers, name=model.name)
else:
if len(to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
x = to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history[0]
if isinstance(origin_layer, InputLayer):
return Sequential(layers=[origin_layer] + layers,
name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history[0]
return Sequential(layers=[input_layer] + layers, name=model.name)
def clone_model(model, input_tensors=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors)
| 41.142292 | 77 | 0.593525 |
e6e833fb51a1ec7a1130669c82455b2f1f57a22e
| 53,602 |
py
|
Python
|
pythonFiles/tests/testing_tools/adapter/test_functional.py
|
erinxocon/vscode-python
|
e53f9061d16467a9ae2d8995a9a5f3cfa0f444e1
|
[
"MIT"
] | null | null | null |
pythonFiles/tests/testing_tools/adapter/test_functional.py
|
erinxocon/vscode-python
|
e53f9061d16467a9ae2d8995a9a5f3cfa0f444e1
|
[
"MIT"
] | null | null | null |
pythonFiles/tests/testing_tools/adapter/test_functional.py
|
erinxocon/vscode-python
|
e53f9061d16467a9ae2d8995a9a5f3cfa0f444e1
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import unicode_literals
import json
import os
import os.path
import subprocess
import sys
import unittest
import pytest
from ...__main__ import TESTING_TOOLS_ROOT
CWD = os.getcwd()
DATA_DIR = os.path.join(os.path.dirname(__file__), '.data')
SCRIPT = os.path.join(TESTING_TOOLS_ROOT, 'run_adapter.py')
COMPLEX = {
'root': None,
'rootid': '.',
'parents': [
#
{'id': fix_path('./tests'),
'kind': 'folder',
'name': 'tests',
'parentid': '.',
},
# +++
{'id': fix_path('./tests/test_42-43.py'),
'kind': 'file',
'name': 'test_42-43.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_42.py'),
'kind': 'file',
'name': 'test_42.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_doctest.txt'),
'kind': 'file',
'name': 'test_doctest.txt',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_foo.py'),
'kind': 'file',
'name': 'test_foo.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_mixed.py'),
'kind': 'file',
'name': 'test_mixed.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests'),
'kind': 'suite',
'name': 'MyTests',
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::TestMySuite'),
'kind': 'suite',
'name': 'TestMySuite',
'parentid': fix_path('./tests/test_mixed.py'),
},
# +++
{'id': fix_path('./tests/test_pytest.py'),
'kind': 'file',
'name': 'test_pytest.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_pytest.py::TestEggs'),
'kind': 'suite',
'name': 'TestEggs',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam'),
'kind': 'suite',
'name': 'TestParam',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py::TestParam'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll'),
'kind': 'suite',
'name': 'TestParamAll',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
'kind': 'function',
'name': 'test_spam_13',
'parentid': fix_path('./tests/test_pytest.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam'),
'kind': 'suite',
'name': 'TestSpam',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam'),
'kind': 'suite',
'name': 'TestHam',
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs'),
'kind': 'suite',
'name': 'TestEggs',
'parentid': fix_path('./tests/test_pytest.py::TestSpam::TestHam'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param'),
'kind': 'function',
'name': 'test_fixture_param',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_01'),
'kind': 'function',
'name': 'test_param_01',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_11'),
'kind': 'function',
'name': 'test_param_11',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers'),
'kind': 'function',
'name': 'test_param_13_markers',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
'kind': 'function',
'name': 'test_param_13_repeat',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
'kind': 'function',
'name': 'test_param_13_skipped',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13'),
'kind': 'function',
'name': 'test_param_23_13',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises'),
'kind': 'function',
'name': 'test_param_23_raises',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33'),
'kind': 'function',
'name': 'test_param_33',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids'),
'kind': 'function',
'name': 'test_param_33_ids',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture'),
'kind': 'function',
'name': 'test_param_fixture',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
'kind': 'function',
'name': 'test_param_mark_fixture',
'parentid': fix_path('./tests/test_pytest.py'),
},
# +++
{'id': fix_path('./tests/test_pytest_param.py'),
'kind': 'file',
'name': 'test_pytest_param.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll'),
'kind': 'suite',
'name': 'TestParamAll',
'parentid': fix_path('./tests/test_pytest_param.py'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
'kind': 'function',
'name': 'test_spam_13',
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest_param.py'),
},
# +++
{'id': fix_path('./tests/test_unittest.py'),
'kind': 'file',
'name': 'test_unittest.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests'),
'kind': 'suite',
'name': 'MyTests',
'parentid': fix_path('./tests/test_unittest.py'),
},
{'id': fix_path('./tests/test_unittest.py::OtherTests'),
'kind': 'suite',
'name': 'OtherTests',
'parentid': fix_path('./tests/test_unittest.py'),
},
##
{'id': fix_path('./tests/v'),
'kind': 'folder',
'name': 'v',
'parentid': fix_path('./tests'),
},
## +++
{'id': fix_path('./tests/v/test_eggs.py'),
'kind': 'file',
'name': 'test_eggs.py',
'parentid': fix_path('./tests/v'),
},
{'id': fix_path('./tests/v/test_eggs.py::TestSimple'),
'kind': 'suite',
'name': 'TestSimple',
'parentid': fix_path('./tests/v/test_eggs.py'),
},
## +++
{'id': fix_path('./tests/v/test_ham.py'),
'kind': 'file',
'name': 'test_ham.py',
'parentid': fix_path('./tests/v'),
},
## +++
{'id': fix_path('./tests/v/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/v'),
},
##
{'id': fix_path('./tests/w'),
'kind': 'folder',
'name': 'w',
'parentid': fix_path('./tests'),
},
## +++
{'id': fix_path('./tests/w/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/w'),
},
## +++
{'id': fix_path('./tests/w/test_spam_ex.py'),
'kind': 'file',
'name': 'test_spam_ex.py',
'parentid': fix_path('./tests/w'),
},
##
{'id': fix_path('./tests/x'),
'kind': 'folder',
'name': 'x',
'parentid': fix_path('./tests'),
},
###
{'id': fix_path('./tests/x/y'),
'kind': 'folder',
'name': 'y',
'parentid': fix_path('./tests/x'),
},
####
{'id': fix_path('./tests/x/y/z'),
'kind': 'folder',
'name': 'z',
'parentid': fix_path('./tests/x/y'),
},
#####
{'id': fix_path('./tests/x/y/z/a'),
'kind': 'folder',
'name': 'a',
'parentid': fix_path('./tests/x/y/z'),
},
##### +++
{'id': fix_path('./tests/x/y/z/a/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/x/y/z/a'),
},
#####
{'id': fix_path('./tests/x/y/z/b'),
'kind': 'folder',
'name': 'b',
'parentid': fix_path('./tests/x/y/z'),
},
##### +++
{'id': fix_path('./tests/x/y/z/b/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/x/y/z/b'),
},
#### +++
{'id': fix_path('./tests/x/y/z/test_ham.py'),
'kind': 'file',
'name': 'test_ham.py',
'parentid': fix_path('./tests/x/y/z'),
},
],
'tests': [
##########
{'id': fix_path('./tests/test_42-43.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_42-43.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_42-43.py'),
},
#####
{'id': fix_path('./tests/test_42.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_42.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_42.py'),
},
#####
{'id': fix_path('./tests/test_doctest.txt::test_doctest.txt'),
'name': 'test_doctest.txt',
'source': fix_path('./tests/test_doctest.txt:1'),
'markers': [],
'parentid': fix_path('./tests/test_doctest.txt'),
},
#####
{'id': fix_path('./tests/test_foo.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_foo.py:3'),
'markers': [],
'parentid': fix_path('./tests/test_foo.py'),
},
#####
{'id': fix_path('./tests/test_mixed.py::test_top_level'),
'name': 'test_top_level',
'source': fix_path('./tests/test_mixed.py:5'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_mixed.py:9'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::TestMySuite::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_mixed.py:16'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py::TestMySuite'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_mixed.py:22'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py::MyTests'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_mixed.py:25'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_mixed.py::MyTests'),
},
#####
{'id': fix_path('./tests/test_pytest.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:6'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_failure'),
'name': 'test_failure',
'source': fix_path('./tests/test_pytest.py:10'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_runtime_skipped'),
'name': 'test_runtime_skipped',
'source': fix_path('./tests/test_pytest.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_runtime_failed'),
'name': 'test_runtime_failed',
'source': fix_path('./tests/test_pytest.py:18'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_raises'),
'name': 'test_raises',
'source': fix_path('./tests/test_pytest.py:22'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_pytest.py:26'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_maybe_skipped'),
'name': 'test_maybe_skipped',
'source': fix_path('./tests/test_pytest.py:31'),
'markers': ['skip-if'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_known_failure'),
'name': 'test_known_failure',
'source': fix_path('./tests/test_pytest.py:36'),
'markers': ['expected-failure'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_warned'),
'name': 'test_warned',
'source': fix_path('./tests/test_pytest.py:41'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_custom_marker'),
'name': 'test_custom_marker',
'source': fix_path('./tests/test_pytest.py:46'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_multiple_markers'),
'name': 'test_multiple_markers',
'source': fix_path('./tests/test_pytest.py:51'),
'markers': ['expected-failure', 'skip', 'skip-if'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_1'),
'name': 'test_dynamic_1',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_2'),
'name': 'test_dynamic_2',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_3'),
'name': 'test_dynamic_3',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:70'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_pytest.py:73'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:81'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs'),
},
{'id': fix_path('./tests/test_pytest.py::TestEggs::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:93'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestEggs'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_01[]'),
'name': 'test_param_01[]',
'source': fix_path('./tests/test_pytest.py:103'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_01'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_11[x0]'),
'name': 'test_param_11[x0]',
'source': fix_path('./tests/test_pytest.py:108'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_11'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x0]'),
'name': 'test_param_13_repeat[x0]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x1]'),
'name': 'test_param_13_repeat[x1]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x2]'),
'name': 'test_param_13_repeat[x2]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[1-1-1]'),
'name': 'test_param_33[1-1-1]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[3-4-5]'),
'name': 'test_param_33[3-4-5]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[0-0-0]'),
'name': 'test_param_33[0-0-0]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v1]'),
'name': 'test_param_33_ids[v1]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v2]'),
'name': 'test_param_33_ids[v2]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v3]'),
'name': 'test_param_33_ids[v3]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z0]'),
'name': 'test_param_23_13[1-1-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z1]'),
'name': 'test_param_23_13[1-1-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z2]'),
'name': 'test_param_23_13[1-1-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z0]'),
'name': 'test_param_23_13[3-4-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z1]'),
'name': 'test_param_23_13[3-4-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z2]'),
'name': 'test_param_23_13[3-4-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z0]'),
'name': 'test_param_23_13[0-0-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z1]'),
'name': 'test_param_23_13[0-0-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z2]'),
'name': 'test_param_23_13[0-0-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[x0]'),
'name': 'test_param_13_markers[x0]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[???]'),
'name': 'test_param_13_markers[???]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[2]'),
'name': 'test_param_13_markers[2]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': ['expected-failure'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x0]'),
'name': 'test_param_13_skipped[x0]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x1]'),
'name': 'test_param_13_skipped[x1]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x2]'),
'name': 'test_param_13_skipped[x2]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[1-None]'),
'name': 'test_param_23_raises[1-None]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[1.0-None]'),
'name': 'test_param_23_raises[1.0-None]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[2-catch2]'),
'name': 'test_param_23_raises[2-catch2]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:164'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x0]'),
'name': 'test_spam_13[x0]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x1]'),
'name': 'test_spam_13[x1]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x2]'),
'name': 'test_spam_13[x2]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture'),
'name': 'test_fixture',
'source': fix_path('./tests/test_pytest.py:192'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_mark_fixture'),
'name': 'test_mark_fixture',
'source': fix_path('./tests/test_pytest.py:196'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x0]'),
'name': 'test_param_fixture[x0]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x1]'),
'name': 'test_param_fixture[x1]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x2]'),
'name': 'test_param_fixture[x2]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x0]'),
'name': 'test_param_mark_fixture[x0]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x1]'),
'name': 'test_param_mark_fixture[x1]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x2]'),
'name': 'test_param_mark_fixture[x2]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param[spam]'),
'name': 'test_fixture_param[spam]',
'source': fix_path('./tests/test_pytest.py:216'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_fixture_param'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param[eggs]'),
'name': 'test_fixture_param[eggs]',
'source': fix_path('./tests/test_pytest.py:216'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_fixture_param'),
},
######
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x0]'),
'name': 'test_spam_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x1]'),
'name': 'test_spam_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x2]'),
'name': 'test_spam_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
######
{'id': fix_path('./tests/test_unittest.py::MyTests::test_dynamic_'),
'name': 'test_dynamic_',
'source': fix_path('./tests/test_unittest.py:54'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_failure'),
'name': 'test_failure',
'source': fix_path('./tests/test_unittest.py:34'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_known_failure'),
'name': 'test_known_failure',
'source': fix_path('./tests/test_unittest.py:37'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_maybe_not_skipped'),
'name': 'test_maybe_not_skipped',
'source': fix_path('./tests/test_unittest.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_maybe_skipped'),
'name': 'test_maybe_skipped',
'source': fix_path('./tests/test_unittest.py:13'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_unittest.py:6'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_unittest.py:9'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_skipped_inside'),
'name': 'test_skipped_inside',
'source': fix_path('./tests/test_unittest.py:21'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_with_nested_subtests'),
'name': 'test_with_nested_subtests',
'source': fix_path('./tests/test_unittest.py:46'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_with_subtests'),
'name': 'test_with_subtests',
'source': fix_path('./tests/test_unittest.py:41'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::OtherTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_unittest.py:61'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::OtherTests'),
},
###########
{'id': fix_path('./tests/v/test_eggs.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_eggs.py'),
},
{'id': fix_path('./tests/v/test_eggs.py::TestSimple::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:8'),
'markers': [],
'parentid': fix_path('./tests/v/test_eggs.py::TestSimple'),
},
######
{'id': fix_path('./tests/v/test_ham.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_ham.py'),
},
{'id': fix_path('./tests/v/test_ham.py::test_not_hard'),
'name': 'test_not_hard',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_ham.py'),
},
######
{'id': fix_path('./tests/v/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_spam.py'),
},
{'id': fix_path('./tests/v/test_spam.py::test_simpler'),
'name': 'test_simpler',
'source': fix_path('./tests/v/test_spam.py:4'),
'markers': [],
'parentid': fix_path('./tests/v/test_spam.py'),
},
###########
{'id': fix_path('./tests/w/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/w/test_spam.py:4'),
'markers': [],
'parentid': fix_path('./tests/w/test_spam.py'),
},
{'id': fix_path('./tests/w/test_spam_ex.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/w/test_spam_ex.py:4'),
'markers': [],
'parentid': fix_path('./tests/w/test_spam_ex.py'),
},
###########
{'id': fix_path('./tests/x/y/z/test_ham.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/test_ham.py:2'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/test_ham.py'),
},
######
{'id': fix_path('./tests/x/y/z/a/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/a/test_spam.py:11'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/a/test_spam.py'),
},
{'id': fix_path('./tests/x/y/z/b/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/b/test_spam.py:7'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/b/test_spam.py'),
},
],
}
| 43.828291 | 96 | 0.491605 |
e6e86dd990b3c5cac611e5ac9c031855b2eafefb
| 2,223 |
py
|
Python
|
mmgp/kernels/wavelet_slice.py
|
axdahl/SC-MMGP
|
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
|
[
"Apache-2.0"
] | null | null | null |
mmgp/kernels/wavelet_slice.py
|
axdahl/SC-MMGP
|
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
|
[
"Apache-2.0"
] | null | null | null |
mmgp/kernels/wavelet_slice.py
|
axdahl/SC-MMGP
|
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
|
[
"Apache-2.0"
] | null | null | null |
'''
Wavelet kernel
slice allows kernel operation on feature subset
active_dims is iterable of feature dimensions to extract
input_dim must equal dimension defined by active_dims
'''
import numpy as np
import tensorflow as tf
from .. import util
from . import kernel
from .kernel_extras import *
| 34.734375 | 113 | 0.597391 |
e6e91782ecbf3d082de6c4e80c1d94b9a36175e3
| 8,084 |
py
|
Python
|
transform.py
|
latenite4/python3
|
30e367471ba48e5fc0fb07327b636fcb9959e3e0
|
[
"Apache-2.0"
] | null | null | null |
transform.py
|
latenite4/python3
|
30e367471ba48e5fc0fb07327b636fcb9959e3e0
|
[
"Apache-2.0"
] | null | null | null |
transform.py
|
latenite4/python3
|
30e367471ba48e5fc0fb07327b636fcb9959e3e0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#program to parse png images and change images
# cmd: python3 transform.py
# you must have local input/ and output/ directories
#
# name: R. Melton
# date: 12/27/20
# cmdline: python transform.py cmd show image='city.png' --ulx=1 --uly=2 --brx=0 --bry=9
# python transform.py show city.png
# python transform.py blur city.png
from image import Image
import numpy as np
import time, os, argparse, string
#from tkinter import *
import imghdr
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#adjust the contrast by increasing difference from user
#defined midpoint
# blur and image
# check for necessary parts of the runtime environment
# setup command line args and parms
# optional args have --
# fixed (required args do not have --)
#def show_image(filename):
if __name__ == '__main__':
args = arg_init()
check_env(args.image)
lake = Image(filename = 'lake.png')
city = Image(filename='city.png')
start_time = time.time()
# brightened_im = adjust_brightness(lake, 1.7)
# brightened_im.write_image('brightened.png')
# darkened_im = adjust_brightness(lake, 0.3)
# darkened_im.write_image('darkened.png')
# incr_contrast = adjust_contrast(lake, 2,0.5)
# incr_contrast.write_image('incr_contrast.png')
# decr_contrast = adjust_contrast(lake, 0.5,0.5)
# decr_contrast.write_image('decr_contrast.png')
# blur_3 = blur(city,3)
# blur_3.write_image('blur_k3.png')
# blur_15 = blur(city,15)
# blur_15.write_image('blur_k15.png')
# let's apply a sobel kernel on the x and y axis
# sobel_x = apply_kernel(city, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
# sobel_x.write_image('edge_x.png')
# sobel_y = apply_kernel(city, np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]))
# sobel_y.write_image('edge_y.png')
# # this will show x and y edges
# sobel_xy = combine_images(sobel_x, sobel_y)
# sobel_xy.write_image('edge_xy.png')
if args.cmd == "show" and args.image:
show_image(args.image)
if args.cmd == "blur" and args.image:
blur_15 = blur(args.image,15)
blur_15.write_image(args.image+'blur_k15.png')
show_image(blur_k15.png)
if args.v:
print(f'total execution duration: {time.time() - start_time}s')
| 35.30131 | 162 | 0.671326 |
e6e98c6da8123831026901d34d51a2a66f9be3c8
| 4,563 |
py
|
Python
|
plugins/wyr.py
|
Jeglet/pcbot
|
89178d4982151adb2fadfacdc3080e46cda9e891
|
[
"MIT"
] | null | null | null |
plugins/wyr.py
|
Jeglet/pcbot
|
89178d4982151adb2fadfacdc3080e46cda9e891
|
[
"MIT"
] | null | null | null |
plugins/wyr.py
|
Jeglet/pcbot
|
89178d4982151adb2fadfacdc3080e46cda9e891
|
[
"MIT"
] | null | null | null |
""" Would you rather? This plugin includes would you rather functionality
"""
import asyncio
import random
import re
import discord
import bot
import plugins
from pcbot import Config
client = plugins.client # type: bot.Client
db = Config("would-you-rather", data=dict(timeout=10, responses=["**{name}** would **{choice}**!"], questions=[]),
pretty=True)
command_pattern = re.compile(r"(.+)(?:\s+or|\s*,)\s+([^?]+)\?*")
sessions = set() # All running would you rather's are in this set
def get_choice(choices: list, choice: str):
""" Get the chosen option. This accept 1 and 2 as numbers. """
if choice == "1":
return 0
if choice == "2":
return 1
choices = list(map(str.lower, choices))
words = list(map(str.split, choices))
# Go through all words in the given message, and find any words unique to a choice
for word in choice.lower().split():
if word in words[0] and word not in words[1]:
return 0
elif word in words[1] and word not in words[0]:
return 1
# Invalid choice
return None
| 34.308271 | 114 | 0.601359 |
e6e9911a23d6bd5acc93e8e6fe7c90d813721358
| 5,690 |
py
|
Python
|
suit_tool/argparser.py
|
bergzand/suit-manifest-generator
|
da82651a8b02fd4d7261e826cc70b5c862dd94ea
|
[
"Apache-2.0"
] | 16 |
2018-03-16T23:56:47.000Z
|
2022-01-23T14:14:09.000Z
|
suit_tool/argparser.py
|
bergzand/suit-manifest-generator
|
da82651a8b02fd4d7261e826cc70b5c862dd94ea
|
[
"Apache-2.0"
] | 23 |
2018-06-05T14:30:23.000Z
|
2021-02-15T20:53:09.000Z
|
suit_tool/argparser.py
|
bergzand/suit-manifest-generator
|
da82651a8b02fd4d7261e826cc70b5c862dd94ea
|
[
"Apache-2.0"
] | 10 |
2018-03-16T23:56:52.000Z
|
2020-07-21T16:36:46.000Z
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright 2019-2020 ARM Limited or its affiliates
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import sys, argparse, os
from suit_tool import __version__
from suit_tool import keygen
from suit_tool import get_pubkey
import json
import re
| 55.784314 | 142 | 0.649561 |
e6e9b0500db4a76f7cfddf89a8acd023b1673bdb
| 437 |
py
|
Python
|
python/process/process_pool.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
python/process/process_pool.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
python/process/process_pool.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
import random
import time
from multiprocessing import Pool
if __name__ == '__main__':
process_names = [f'computer_{i}' for i in range(15)]
pool = Pool(processes=5)
pool.map(worker, process_names)
# pool.terminate()
| 24.277778 | 61 | 0.686499 |
e6e9e879bcf76ce5cfbee781823873ae94cc9222
| 45,541 |
py
|
Python
|
Project/Support-NotSourced/generic_pydicom_ns.py
|
mazalgarab-git/OSICpypy
|
003fb0b146c9ed711f05475e6cc7563bf549f230
|
[
"CC0-1.0"
] | 1 |
2020-12-18T14:39:24.000Z
|
2020-12-18T14:39:24.000Z
|
Project/Support-NotSourced/generic_pydicom_ns.py
|
mazalgarab-git/OSICpypy
|
003fb0b146c9ed711f05475e6cc7563bf549f230
|
[
"CC0-1.0"
] | null | null | null |
Project/Support-NotSourced/generic_pydicom_ns.py
|
mazalgarab-git/OSICpypy
|
003fb0b146c9ed711f05475e6cc7563bf549f230
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 11:48:59 2020
@author: mazal
"""
"""
=========================================
Support functions of pydicom (Not sourced)
=========================================
Purpose: Create support functions for the pydicom project
"""
"""
Test mode 1 | Basics
testMode = True
reportMode = False
Test mode 2 | Function Report
testMode = False
reportMode = True
Commisionning mode
testMode = False
reportMode = False
"""
testMode = False
reportMode = False
"""
=========================================
Function 1: Aleatory Sampling
=========================================
Purpose: Build an aleatory sample given a train dataset of Kaggle for competition and a sample size
Raw code reference (see Tester.py): Test 5
"""
if testMode == True:
samplingSize = 5
resultFunction1 = trainDatasetSampler(samplingSize,testMode,reportMode)
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[0])
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[1])
print("=========================================")
print("Test result Function 1: Success")
print("=========================================")
"""
=========================================
Function 2: Submission Builder
=========================================
Purpose: Build a submission CSV file
Raw code reference (see Tester.py): Test 8
"""
if testMode == True:
ProductType = 'population'
filename = 'submissionRawFile_2020_09_19.csv'
resultFunction2 = SubmissionBuilder(ProductType,filename,testMode)
print("=========================================")
print("Product Type:")
print("=========================================")
print(ProductType)
print("=========================================")
print("Submission File saved as:")
print("=========================================")
print(resultFunction2[1])
print("=========================================")
print("Test result Function 2: Success")
print("=========================================")
"""
=========================================
Function 3: Dataset builder (Stacking solution case) to process with ML models
=========================================
Purpose: Build an input dataset to be processed with an stacking solution
Raw code reference (see Tester.py): Test 15
"""
if testMode == True:
ProductType = 'prototype'
PydicomMode = True
reportMode = False
resultFunction3 = stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode)
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction3[0])
print("=========================================")
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction3[1])
print("=========================================")
print("Input File saved as:", resultFunction3[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction3[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
"""
=========================================
Function 4: Submission dataset builder (Stacking solution case) after ML outcome
=========================================
Purpose: Build a submission CSV file (Stacking solution case)
Raw code reference (see Tester.py): Test 17
About the Shape Parameter: It amounts to c = 0.12607421874999922 for every instance in the oject of concern. c value has been computed
deeming the following data fitting scope: (1) Data: FVC predictions; (2) Probability density function as follows (staistical function
in scipy renowend as scipy.stats.loglaplace): loglaplace.pdf(x, c, loc=0, scale=1).
"""
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
example = False
if (example == True):
import pandas as pd
shapeParameter_IDList = ['ID00419637202311204720264','ID00421637202311550012437','ID00422637202311677017371','ID00423637202312137826377','ID00426637202313170790466']
c_List1 = [3,3,3,3,3]
c_List2 = [3,3,3,3,3]
c_List3 = [3,3,3,3,3]
c_List4 = [3,3,3,3,3]
shapeParameter_dictionary = {'Random Forest':c_List1, 'Lasso':c_List2, 'Gradient Boosting':c_List3, 'Stacking Regressor':c_List4}
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = shapeParameter_IDList)
else:
shapeParameter_DataFrame = []
# Set Pydicom mode
pydicomMode = True
resultFunction4 = Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[1])
print("Standard Deviation Clipped - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[2])
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
"""
=========================================
Function 5: Get parameters given a must-usage of a log-laplace distribution (i.e. Laplace Log Likelihood)
=========================================
Purpose: Get shape parameter visualization for loglaplace
Raw code reference (see Tester.py): Test 17
"""
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
resultFunction5 = shapeParameter_visualizer(ProductType, testMode = True)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction5)
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
# """
# =========================================
# Function : Dataset builder 2 (Stacking solution case) to process with ML models
# =========================================
# Purpose: Build an input dataset to be processed with an stacking solution but including Pydicom image-processing solution
# Raw code reference (see Tester.py): 15
# """
# def stacking_Dataset_Builder_PydicomSolution(productType, testMode):
# # Set Product Type and its corresponding path
# if ProductType == 'population':
# path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# if ProductType == 'prototype':
# path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
# if ProductType == 'sampling':
# path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
| 41.973272 | 186 | 0.603215 |
e6e9ffb5e0649025342ebb242012d9b21913b192
| 8,378 |
py
|
Python
|
paperscraper/scrapers/keywords.py
|
ahmed-shariff/scraper
|
52bed967db7e08e438daaa8dfa8d9338567ad7c2
|
[
"MIT"
] | 1 |
2021-11-19T02:56:22.000Z
|
2021-11-19T02:56:22.000Z
|
paperscraper/scrapers/keywords.py
|
ahmed-shariff/scraper
|
52bed967db7e08e438daaa8dfa8d9338567ad7c2
|
[
"MIT"
] | 1 |
2021-11-19T03:42:58.000Z
|
2022-03-29T16:32:16.000Z
|
paperscraper/scrapers/keywords.py
|
ahmed-shariff/scraper
|
52bed967db7e08e438daaa8dfa8d9338567ad7c2
|
[
"MIT"
] | 1 |
2021-11-19T02:56:28.000Z
|
2021-11-19T02:56:28.000Z
|
import re
regex = re.compile(r'[\n\r\t]')
| 38.608295 | 247 | 0.62509 |
e6ea376dac46236ea3d4ce92ad3215d1dbffb660
| 6,642 |
py
|
Python
|
topobank/publication/models.py
|
ContactEngineering/TopoBank
|
12710c24cc158801db20f030c3e0638060e24a0e
|
[
"MIT",
"BSD-3-Clause"
] | 3 |
2021-12-03T19:11:07.000Z
|
2021-12-27T17:14:39.000Z
|
topobank/publication/models.py
|
ContactEngineering/TopoBank
|
12710c24cc158801db20f030c3e0638060e24a0e
|
[
"MIT",
"BSD-3-Clause"
] | 268 |
2021-03-19T13:57:00.000Z
|
2022-03-31T20:58:26.000Z
|
topobank/publication/models.py
|
ContactEngineering/TopoBank
|
12710c24cc158801db20f030c3e0638060e24a0e
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.conf import settings
MAX_LEN_AUTHORS_FIELD = 512
CITATION_FORMAT_FLAVORS = ['html', 'ris', 'bibtex', 'biblatex']
DEFAULT_KEYWORDS = ['surface', 'topography']
| 36.696133 | 107 | 0.579645 |
e6ea40233a3bb49f837e23e4f39a0fd85da9fe09
| 489 |
py
|
Python
|
vendor/migrations/0003_store_password.py
|
rayhu-osu/vcube
|
ff1af048adb8a9f1007368150a78b309b4d821af
|
[
"MIT"
] | 1 |
2019-02-20T18:47:04.000Z
|
2019-02-20T18:47:04.000Z
|
vendor/migrations/0003_store_password.py
|
rayhu-osu/vcube
|
ff1af048adb8a9f1007368150a78b309b4d821af
|
[
"MIT"
] | null | null | null |
vendor/migrations/0003_store_password.py
|
rayhu-osu/vcube
|
ff1af048adb8a9f1007368150a78b309b4d821af
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-24 19:36
from __future__ import unicode_literals
from django.db import migrations, models
| 22.227273 | 61 | 0.609407 |
e6eadd6e5aefadc0d052f84f6f0acadbd4bc7e84
| 440 |
py
|
Python
|
lec2.py
|
widnerlr/isat252
|
4196a8b1c6f4c75c3f5d8f64164014103b695077
|
[
"MIT"
] | null | null | null |
lec2.py
|
widnerlr/isat252
|
4196a8b1c6f4c75c3f5d8f64164014103b695077
|
[
"MIT"
] | null | null | null |
lec2.py
|
widnerlr/isat252
|
4196a8b1c6f4c75c3f5d8f64164014103b695077
|
[
"MIT"
] | null | null | null |
"""
Your module description
"""
"""
this is my second py code
for my second lecture
"""
#print ('hello world') # this is a single line commment
# this is my second line comment
#print(type("123."))
#print ("Hello World".upper())
#print("Hello World".lower())
#print("hello" + "world" + ".")
#print(2**3)
#my_str = "hello world"
#print(my_str)
#my_str = "Tom"
#print(my_str)
my_int = 2
my_float = 3.0
print(my_int + my_float)
| 12.941176 | 56 | 0.638636 |
e6eb31b711fe08af2de8afcc37c668f59c3bdd16
| 1,579 |
py
|
Python
|
day_22_b.py
|
Gyaha/AOC2020
|
fbabae9acd7d274b84bc0c64f2665dfba9f008ca
|
[
"MIT"
] | null | null | null |
day_22_b.py
|
Gyaha/AOC2020
|
fbabae9acd7d274b84bc0c64f2665dfba9f008ca
|
[
"MIT"
] | null | null | null |
day_22_b.py
|
Gyaha/AOC2020
|
fbabae9acd7d274b84bc0c64f2665dfba9f008ca
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
run_tests()
import time
time_start = time.perf_counter()
print(run())
time_end = time.perf_counter() - time_start
print(f"Time: {time_end:0.4f} sec")
| 17.544444 | 62 | 0.542115 |
e6ecb90ea1c9f175831984d63548bf549ba7937d
| 335 |
py
|
Python
|
Auth/Constants/LoginOpCode.py
|
sundayz/idewave-core
|
5bdb88892173c9c3e8c85f431cf9b5dbd9f23941
|
[
"Apache-2.0"
] | null | null | null |
Auth/Constants/LoginOpCode.py
|
sundayz/idewave-core
|
5bdb88892173c9c3e8c85f431cf9b5dbd9f23941
|
[
"Apache-2.0"
] | null | null | null |
Auth/Constants/LoginOpCode.py
|
sundayz/idewave-core
|
5bdb88892173c9c3e8c85f431cf9b5dbd9f23941
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
| 18.611111 | 49 | 0.641791 |
e6ee19c46029883010bf024e3e8dd551854a83e8
| 80 |
py
|
Python
|
LINETOKEN/__init__.py
|
pratannaimjoi/tokenIpad
|
f03969c05427bc1804d05c42823a28725c7e38a0
|
[
"Apache-2.0"
] | null | null | null |
LINETOKEN/__init__.py
|
pratannaimjoi/tokenIpad
|
f03969c05427bc1804d05c42823a28725c7e38a0
|
[
"Apache-2.0"
] | null | null | null |
LINETOKEN/__init__.py
|
pratannaimjoi/tokenIpad
|
f03969c05427bc1804d05c42823a28725c7e38a0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from .LineApi import LINE
from .lib.Gen.ttypes import *
| 20 | 29 | 0.6625 |
e6ee404e9353e9098c1662d7447e96af2b5999cf
| 164 |
py
|
Python
|
main.py
|
seton-develops/PDF-Camelot-Folder-Executable
|
168b5c24afe8884cf121a4207d7d3cb3ee7cc626
|
[
"MIT"
] | null | null | null |
main.py
|
seton-develops/PDF-Camelot-Folder-Executable
|
168b5c24afe8884cf121a4207d7d3cb3ee7cc626
|
[
"MIT"
] | null | null | null |
main.py
|
seton-develops/PDF-Camelot-Folder-Executable
|
168b5c24afe8884cf121a4207d7d3cb3ee7cc626
|
[
"MIT"
] | null | null | null |
'''
Created on Jun 17, 2021
@author: Sean
'''
import PDF2CSV_GUI
if __name__ == "__main__":
main()
| 12.615385 | 34 | 0.591463 |
e6ee7c4e821041f353c4df40b51b9e9fed815d11
| 325 |
py
|
Python
|
Part1/bot_read.py
|
Mildlyoffbeat/RedditBot-1
|
f65c3c4d0f3d6d3a468069d4a009b44a20e33797
|
[
"MIT"
] | null | null | null |
Part1/bot_read.py
|
Mildlyoffbeat/RedditBot-1
|
f65c3c4d0f3d6d3a468069d4a009b44a20e33797
|
[
"MIT"
] | null | null | null |
Part1/bot_read.py
|
Mildlyoffbeat/RedditBot-1
|
f65c3c4d0f3d6d3a468069d4a009b44a20e33797
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import praw
reddit = praw.Reddit('mob-secondbot')
subreddit = reddit.subreddit("learnpython")
for submission in subreddit.hot(limit=5):
print("Title: ", submission.title)
print("Text: ", submission.selftext)
print("Score: ", submission.score)
print("---------------------------------\n")
| 25 | 48 | 0.618462 |
e6ee864c778e3c7bd05d01ccaa072084d9d7a6f7
| 1,052 |
py
|
Python
|
17/kazuate_liar.cpp.py
|
Siketyan/Programming-I
|
0749c1ae045d53cd8a67f0de7ab13c26030ddd74
|
[
"Apache-2.0"
] | null | null | null |
17/kazuate_liar.cpp.py
|
Siketyan/Programming-I
|
0749c1ae045d53cd8a67f0de7ab13c26030ddd74
|
[
"Apache-2.0"
] | null | null | null |
17/kazuate_liar.cpp.py
|
Siketyan/Programming-I
|
0749c1ae045d53cd8a67f0de7ab13c26030ddd74
|
[
"Apache-2.0"
] | null | null | null |
from subprocess import Popen, PIPE, call
name = "kazuate_liar.o"
src = """
#include <iostream>
#include <random>
using namespace std;
int main()
{
random_device rd;
mt19937 mt(rd());
uniform_int_distribution<int> randfive(0, 4);
uniform_int_distribution<int> randint(1, 100);
int count = 0;
int num = randint(mt);
while (1)
{
int i;
cout << " ";
cin >> i;
if (i < 1 || i > 100)
{
cout << "" << endl;
continue;
}
count++;
bool liar = randfive(mt) == 0;
if (i == num)
{
cout << " (" << count << " )" << endl;
break;
}
else if ((liar && i > num) || i < num)
{
cout << "" << endl;
}
else
{
cout << "" << endl;
}
}
return 0;
}
""";
proc = Popen(["g++", "-o", name, "-x", "c++", "-"], stdin = PIPE);
proc.communicate(src.encode());
call(["./" + name]);
| 17.533333 | 66 | 0.439163 |
e6eeea99216e21aebde0241d03385a480d8c6df4
| 649 |
py
|
Python
|
src/terrafort/main.py
|
silvercar/terrafort
|
bdf9cb5d7f58d10a0c295c01b3a5620fdcc2876c
|
[
"MIT"
] | 1 |
2019-06-18T00:40:40.000Z
|
2019-06-18T00:40:40.000Z
|
src/terrafort/main.py
|
silvercar/terrafort
|
bdf9cb5d7f58d10a0c295c01b3a5620fdcc2876c
|
[
"MIT"
] | null | null | null |
src/terrafort/main.py
|
silvercar/terrafort
|
bdf9cb5d7f58d10a0c295c01b3a5620fdcc2876c
|
[
"MIT"
] | 1 |
2021-08-25T02:15:28.000Z
|
2021-08-25T02:15:28.000Z
|
"""
Terrafort
Generate terraform templates for specific resources
"""
import click
from .providers.aws import Aws
cli.add_command(Aws.aws_db_instance)
cli.add_command(Aws.aws_iam_instance_profile)
cli.add_command(Aws.aws_instance)
cli.add_command(Aws.aws_security_group)
if __name__ == "__main__":
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
cli(obj={})
| 22.37931 | 76 | 0.731895 |
e6efe17c4e6e08ec55040433cf5ea1ff20fecb68
| 528 |
py
|
Python
|
src/ping.py
|
jnsougata/rich-embed
|
95901e590f00c4e4eabeb99c8f06bb5f90718d80
|
[
"MIT"
] | null | null | null |
src/ping.py
|
jnsougata/rich-embed
|
95901e590f00c4e4eabeb99c8f06bb5f90718d80
|
[
"MIT"
] | null | null | null |
src/ping.py
|
jnsougata/rich-embed
|
95901e590f00c4e4eabeb99c8f06bb5f90718d80
|
[
"MIT"
] | null | null | null |
import discord
import app_util
| 24 | 94 | 0.662879 |
e6f05425230fc70414cb78c1b2738e7f0e282ac0
| 2,017 |
py
|
Python
|
2020/24/visualization.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2020/24/visualization.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2020/24/visualization.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | 1 |
2021-12-04T10:37:09.000Z
|
2021-12-04T10:37:09.000Z
|
#!/usr/bin/env python3
import sys
import re
import numpy as np
from PIL import Image
moves = { 'e': (2, 0), 'se': (1, 2), 'sw': (-1, 2), 'w': (-2, 0), 'nw': (-1, -2), 'ne': (1, -2) }
# Save (x, y): True/False in tiles. True = black, False = white.
tiles = {}
for line in open(sys.argv[1]).read().splitlines():
pos = np.array((0, 0))
for d in re.findall(r'e|se|sw|w|nw|ne', line):
pos += moves[d]
t = tuple(pos)
if t in tiles:
tiles[t] = not tiles[t]
else:
tiles[t] = True
# Part 1
print('black:', sum(val == True for val in tiles.values()))
# -- Part 2 --
# take a chance on how wide it needs to be
width = 300
heigth = 300
board = np.zeros(width * heigth, dtype=np.int8)
board = board.reshape(heigth, width)
# Fill in tiles, move to center
for key, value in tiles.items():
x, y = key
x += width // 2
y += heigth // 2
board[y][x] = value
save_image(0)
for day in range(1, 101):
game()
save_image(day)
print('Day %d: %d' % (day, len(np.where(board == True)[0])))
ys, xs = np.where(board)
print(min(ys), max(ys), min(xs), max(xs))
| 24.901235 | 97 | 0.511155 |
e6f0fc4f8d5c7522b3b6e45957a0edd9bcec2662
| 16,451 |
py
|
Python
|
experimental/tracing/bin/diff_heap_profiler.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | 1,894 |
2015-04-17T18:29:53.000Z
|
2022-03-28T22:41:06.000Z
|
experimental/tracing/bin/diff_heap_profiler.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | 4,640 |
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
experimental/tracing/bin/diff_heap_profiler.py
|
atuchin-m/catapult
|
108ea3e2ec108e68216b1250a3d79cc642600294
|
[
"BSD-3-Clause"
] | 698 |
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import print_function
import argparse
import gzip
import json
import os
import shutil
import six
from six.moves import zip
_OUTPUT_DIR = 'output'
_OUTPUT_GRAPH_DIR = os.path.join(_OUTPUT_DIR, 'graph')
def OpenTraceFile(file_path, mode):
if file_path.endswith('.gz'):
return gzip.open(file_path, mode + 'b')
return open(file_path, mode + 't')
def FindMemoryDumps(filename):
processes = {}
with OpenTraceFile(filename, 'r') as f:
data = json.loads(f.read().decode('utf-8'))
for event in data['traceEvents']:
pid = event['pid']
if pid not in processes:
processes[pid] = Process()
processes[pid].pid = pid
process = processes[pid]
# Retrieve process informations.
if event['ph'] == 'M':
if event['name'] == 'process_name' and 'name' in event['args']:
process.name = event['args']['name']
if event['name'] == 'process_labels' and 'labels' in event['args']:
process.labels = event['args']['labels']
if event['name'] == 'typeNames':
process.types = {}
for type_id, t in six.iteritems(event['args']['typeNames']):
process.types[int(type_id)] = t
if event['name'] == 'stackFrames':
process.stackframes = {}
for stack_id, s in six.iteritems(event['args']['stackFrames']):
new_stackframe = {}
new_stackframe['name'] = s['name']
if 'parent' in s:
new_stackframe['parent'] = int(s['parent'])
process.stackframes[int(stack_id)] = new_stackframe
# Look for a detailed memory dump event.
if not ((event['name'] == 'periodic_interval' or
event['name'] == 'explicitly_triggered') and
event['args']['dumps']['level_of_detail'] == 'detailed'):
continue
# Check for a memory dump V1.
if u'heaps' in event['args']['dumps']:
# Get the first memory dump.
if not process.allocators:
process.version = 1
process.allocators = event['args']['dumps']['heaps']
# Check for a memory dump V2.
# See format: [chromium] src/base/trace_event/heap_profiler_event_writer.h
if u'heaps_v2' in event['args']['dumps']:
# Memory dump format V2 is dumping information incrementally. Update
# the cumulated indexes.
maps = event['args']['dumps']['heaps_v2']['maps']
for string in maps['strings']:
process.strings[string['id']] = string['string']
for node in maps['nodes']:
node_v1 = {}
node_v1['name'] = process.strings[node['name_sid']]
if 'parent' in node:
node_v1['parent'] = node['parent']
process.stackframes[node['id']] = node_v1
for t in maps['types']:
process.types[t['id']] = process.strings[t['name_sid']]
# Get the first memory dump.
if not process.allocators:
dump = event['args']['dumps']
process.version = 2
process.allocators = dump['heaps_v2']['allocators']
# Remove processes with incomplete memory dump.
for pid, process in processes.items():
if not (process.allocators and process.stackframes and process.types):
del processes[pid]
return processes
def ResolveMemoryDumpFields(entries, stackframes, types):
for entry in entries:
# Stackframe may be -1 (18446744073709551615L) when not stackframe are
# available.
if entry.stackframe not in stackframes:
entry.stackframe = []
else:
entry.stackframe = ResolveStackTrace(entry.stackframe, stackframes)
entry.type = ResolveType(entry.type, types)
def GetEntries(heap, process):
"""
Returns all entries in a heap, after filtering out unknown entries, and doing
some post processing to extract the relevant fields.
"""
if not process:
return []
entries = []
if process.version == 1:
for raw_entry in process.allocators[heap]['entries']:
# Cumulative sizes and types are skipped. see:
# https://chromium.googlesource.com/chromium/src/+/a990af190304be5bf38b120799c594df5a293518/base/trace_event/heap_profiler_heap_dump_writer.cc#294
if 'type' not in raw_entry or not raw_entry['bt']:
continue
entry = Entry()
entry.count = int(raw_entry['count'], 16)
entry.size = int(raw_entry['size'], 16)
entry.type = int(raw_entry['type'])
entry.stackframe = int(raw_entry['bt'])
entries.append(entry)
elif process.version == 2:
raw_entries = list(zip(process.allocators[heap]['counts'],
process.allocators[heap]['sizes'],
process.allocators[heap]['types'],
process.allocators[heap]['nodes']))
for (raw_count, raw_size, raw_type, raw_stackframe) in raw_entries:
entry = Entry()
entry.count = raw_count
entry.size = raw_size
entry.type = raw_type
entry.stackframe = raw_stackframe
entries.append(entry)
# Resolve fields by looking into indexes
ResolveMemoryDumpFields(entries, process.stackframes, process.types)
return entries
def BuildGraphDumps(processes, threshold, size_threshold):
"""
Build graph for a sequence of pair of processes.
If start_process is None, counts objects in end_trace.
Otherwise, counts objects present in end_trace, but not in start_process.
"""
graph_dumps = []
for (start_process, end_process) in processes:
pid = end_process.pid
name = end_process.name if end_process.name else ''
labels = end_process.labels if end_process.labels else ''
print('Process[%d] %s: %s' % (pid, name, labels))
for heap in end_process.allocators:
start_entries = GetEntries(heap, start_process)
end_entries = GetEntries(heap, end_process)
graph = GraphDump()
graph.pid = pid
graph.name = name
graph.labels = labels
graph.heap = heap
graph_dumps.append(graph)
# Do the math: diffing start and end memory dumps.
root = {}
root['count'] = 0
root['size'] = 0
root['children'] = {}
root['count_by_type'] = {}
for entry in start_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, - entry.count, - entry.size,
entry.type, root)
for entry in end_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, entry.count, entry.size,
entry.type, root)
CanonicalHeapEntries(root)
graph.root = root
# Find leaks
leaks = []
FindLeaks(root, [], leaks, threshold, size_threshold)
leaks.sort(reverse=True, key=lambda k: k['size'])
if leaks:
print(' %s: %d potential leaks found.' % (heap, len(leaks)))
graph.leaks = leaks
graph.leak_stackframes = len(leaks)
for leak in leaks:
graph.leak_objects += leak['count']
return graph_dumps
if __name__ == '__main__':
Main()
| 32.005837 | 152 | 0.636253 |
e6f21f20dc1c7283a540aac397169a7429e851b1
| 3,743 |
py
|
Python
|
mne_bids/commands/mne_bids_raw_to_bids.py
|
kingjr/mne-bids
|
3a4543076912cebbc89a5f0b9433cda1b9e288b8
|
[
"BSD-3-Clause"
] | null | null | null |
mne_bids/commands/mne_bids_raw_to_bids.py
|
kingjr/mne-bids
|
3a4543076912cebbc89a5f0b9433cda1b9e288b8
|
[
"BSD-3-Clause"
] | null | null | null |
mne_bids/commands/mne_bids_raw_to_bids.py
|
kingjr/mne-bids
|
3a4543076912cebbc89a5f0b9433cda1b9e288b8
|
[
"BSD-3-Clause"
] | null | null | null |
"""Write raw files to BIDS format.
example usage: $ mne_bids raw_to_bids --subject_id sub01 --task rest
--raw data.edf --bids_root new_path
"""
# Authors: Teon Brooks <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
import mne_bids
from mne_bids import write_raw_bids, BIDSPath
from mne_bids.read import _read_raw
def run():
"""Run the raw_to_bids command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--subject_id', dest='subject_id',
help=('subject name in BIDS compatible format '
'(01, 02, etc.)'))
parser.add_option('--task', dest='task',
help='name of the task the data is based on')
parser.add_option('--raw', dest='raw_fname',
help='path to the raw MEG file')
parser.add_option('--bids_root', dest='bids_root',
help='The path of the BIDS compatible folder.')
parser.add_option('--session_id', dest='session_id',
help='session name in BIDS compatible format')
parser.add_option('--run', dest='run',
help='run number for this dataset')
parser.add_option('--acq', dest='acq',
help='acquisition parameter for this dataset')
parser.add_option('--events_data', dest='events_data',
help='events file (events.tsv)')
parser.add_option('--event_id', dest='event_id',
help='event id dict', metavar='eid')
parser.add_option('--hpi', dest='hpi',
help='path to the MEG marker points')
parser.add_option('--electrode', dest='electrode',
help='path to head-native digitizer points')
parser.add_option('--hsp', dest='hsp',
help='path to headshape points')
parser.add_option('--config', dest='config',
help='path to the configuration file')
parser.add_option('--overwrite', dest='overwrite',
help="whether to overwrite existing data (BOOLEAN)")
parser.add_option('--line_freq', dest='line_freq',
help="The frequency of the line noise in Hz "
"(e.g. 50 or 60). If unknown, pass None")
opt, args = parser.parse_args()
if len(args) > 0:
parser.print_help()
parser.error('Do not specify arguments without flags. Found: "{}".\n'
.format(args))
if not all([opt.subject_id, opt.task, opt.raw_fname, opt.bids_root]):
parser.print_help()
parser.error('Arguments missing. You need to specify at least the'
'following: --subject_id, --task, --raw, --bids_root.')
bids_path = BIDSPath(
subject=opt.subject_id, session=opt.session_id, run=opt.run,
acquisition=opt.acq, task=opt.task, root=opt.bids_root)
allow_maxshield = False
if opt.raw_fname.endswith('.fif'):
allow_maxshield = True
raw = _read_raw(opt.raw_fname, hpi=opt.hpi, electrode=opt.electrode,
hsp=opt.hsp, config=opt.config,
allow_maxshield=allow_maxshield)
if opt.line_freq is not None:
line_freq = None if opt.line_freq == "None" else opt.line_freq
raw.info['line_freq'] = line_freq
write_raw_bids(raw, bids_path, event_id=opt.event_id,
events_data=opt.events_data, overwrite=opt.overwrite,
verbose=True)
if __name__ == '__main__':
run()
| 41.588889 | 77 | 0.594176 |
e6f290178fbe89e1c3a852359d5e4b95ce0dd4ec
| 1,460 |
py
|
Python
|
lab1oop.py
|
NastiaK/NewRepository
|
d1907fc2e159dc1831071d7c79e20bbfb47fb822
|
[
"MIT"
] | null | null | null |
lab1oop.py
|
NastiaK/NewRepository
|
d1907fc2e159dc1831071d7c79e20bbfb47fb822
|
[
"MIT"
] | null | null | null |
lab1oop.py
|
NastiaK/NewRepository
|
d1907fc2e159dc1831071d7c79e20bbfb47fb822
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
main()
| 29.795918 | 119 | 0.489041 |
e6f2fef589655b9bf1c7a2c668ca919bfd152a24
| 460 |
py
|
Python
|
Arrays/cyclic_rotation.py
|
Jeans212/codility-dev-training
|
9c5118c6433ea210d1485a6127712a92496e2bc2
|
[
"MIT"
] | null | null | null |
Arrays/cyclic_rotation.py
|
Jeans212/codility-dev-training
|
9c5118c6433ea210d1485a6127712a92496e2bc2
|
[
"MIT"
] | null | null | null |
Arrays/cyclic_rotation.py
|
Jeans212/codility-dev-training
|
9c5118c6433ea210d1485a6127712a92496e2bc2
|
[
"MIT"
] | null | null | null |
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
'''
Rotate an array A to the right by a given number of steps K.
Covert the array to a deque
Apply the rotate() method the rotate the deque in positive K steps
Convert the deque to array
'''
from collections import deque
| 23 | 70 | 0.669565 |
e6f36e3d6234b36ef09fd70fd1be755548b506ba
| 37,741 |
py
|
Python
|
tests/test_apis.py
|
hatzel/markdown-spoilers
|
1964f298f0e8b99f1202d36ccc7d8cf7d613ad26
|
[
"BSD-3-Clause"
] | 2 |
2020-06-21T12:02:58.000Z
|
2020-09-02T15:21:19.000Z
|
tests/test_apis.py
|
hatzel/markdown-spoilers
|
1964f298f0e8b99f1202d36ccc7d8cf7d613ad26
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_apis.py
|
hatzel/markdown-spoilers
|
1964f298f0e8b99f1202d36ccc7d8cf7d613ad26
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
Python-Markdown Regression Tests
================================
Tests of the various APIs with the python markdown lib.
"""
from __future__ import unicode_literals
import unittest
import sys
import os
import markdown
import warnings
from markdown.__main__ import parse_options
from logging import DEBUG, WARNING, CRITICAL
import yaml
import tempfile
from io import BytesIO
from xml.etree.ElementTree import ProcessingInstruction
PY3 = sys.version_info[0] == 3
if not PY3:
| 36.081262 | 116 | 0.601123 |
e6f5ecde56dec14d70d3fec0b36dc822d497cba7
| 2,230 |
py
|
Python
|
nervous/utility/config.py
|
csxeba/nervous
|
f7aeb9b2ff875835c346c607722fab517ef6df61
|
[
"MIT"
] | 1 |
2018-09-24T11:29:19.000Z
|
2018-09-24T11:29:19.000Z
|
nervous/utility/config.py
|
csxeba/nervous
|
f7aeb9b2ff875835c346c607722fab517ef6df61
|
[
"MIT"
] | null | null | null |
nervous/utility/config.py
|
csxeba/nervous
|
f7aeb9b2ff875835c346c607722fab517ef6df61
|
[
"MIT"
] | null | null | null |
import os
| 33.283582 | 98 | 0.673991 |
e6f658acae15a3e9ea6e4c377ee45743db7b0897
| 6,365 |
py
|
Python
|
mindspore/nn/optim/ftrl.py
|
XinYao1994/mindspore
|
2c1a2bf752a1fde311caddba22633d2f4f63cb4e
|
[
"Apache-2.0"
] | 2 |
2020-04-28T03:49:10.000Z
|
2020-04-28T03:49:13.000Z
|
mindspore/nn/optim/ftrl.py
|
XinYao1994/mindspore
|
2c1a2bf752a1fde311caddba22633d2f4f63cb4e
|
[
"Apache-2.0"
] | null | null | null |
mindspore/nn/optim/ftrl.py
|
XinYao1994/mindspore
|
2c1a2bf752a1fde311caddba22633d2f4f63cb4e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FTRL"""
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.common.parameter import Parameter
from mindspore.common import Tensor
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator as validator
from mindspore._checkparam import Rel
from .optimizer import Optimizer, apply_decay, grad_scale
ftrl_opt = C.MultitypeFuncGraph("ftrl_opt")
def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
prim_name=None):
"""Check param."""
validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
validator.check_value_type("learning_rate", learning_rate, [float], prim_name)
validator.check_number("learning_rate", learning_rate, 0.0, Rel.GT, prim_name)
validator.check_value_type("lr_power", lr_power, [float], prim_name)
validator.check_number("lr_power", lr_power, 0.0, Rel.LE, prim_name)
validator.check_value_type("l1", l1, [float], prim_name)
validator.check_number("l1", l1, 0.0, Rel.GE, prim_name)
validator.check_value_type("l2", l2, [float], prim_name)
validator.check_number("l2", l2, 0.0, Rel.GE, prim_name)
validator.check_value_type("use_locking", use_locking, [bool], prim_name)
validator.check_value_type("loss_scale", loss_scale, [float], prim_name)
validator.check_number("loss_scale", loss_scale, 1.0, Rel.GE, prim_name)
validator.check_value_type("weight_decay", weight_decay, [float], prim_name)
validator.check_number("weight_decay", weight_decay, 0.0, Rel.GE, prim_name)
| 50.11811 | 116 | 0.692066 |
e6f688088bfa1088bfe7257d2cece961dd478353
| 5,106 |
py
|
Python
|
aws_utils/region_selector.py
|
skimhub/aws-utils
|
5496a7594ab90b1e658e8f9f8137e8943a39be1e
|
[
"Apache-2.0"
] | null | null | null |
aws_utils/region_selector.py
|
skimhub/aws-utils
|
5496a7594ab90b1e658e8f9f8137e8943a39be1e
|
[
"Apache-2.0"
] | 13 |
2016-01-05T14:48:38.000Z
|
2017-08-14T10:17:41.000Z
|
aws_utils/region_selector.py
|
skimhub/aws-utils
|
5496a7594ab90b1e658e8f9f8137e8943a39be1e
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import boto3
US_EAST_REGION = {'us-east-1'}
US_EAST_AVAILABILITY_ZONES = {'us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1e'} # note d is missing
INSTANCE_VERSION = 'Linux/UNIX (Amazon VPC)'
def fetch_price_stats_per_availability_zone(region, start_time, end_time, instance_type, instance_version=INSTANCE_VERSION,
filter_availability_zones=None):
"""Groups raw prices by region, returns min, max and avg price.
Args:
region (str): region to look for instances in
start_time (datetime.datetime):
end_time (datetime.datetime):
instance_type (str):
instance_version (str): the types of instances that we wish to return prices for.
filter_availability_zones ({str}): if set then we only return a price if the availability zone is in this list
Returns: dict,
{'us-east-1b': {'min': 2.01, 'max': 3.53,'avg':2.8, 'latest':3.0}}
"""
by_zone = {}
for zone, price in fetch_spot_prices(region, start_time, end_time, instance_type, instance_version):
by_zone.setdefault(zone, []).append(price)
prices_per_region = {}
for zone, prices in by_zone.iteritems():
if filter_availability_zones is None or zone in filter_availability_zones:
region_prices = {'min': min(prices),
'max': max(prices),
'avg': sum(prices) / float(len(prices)),
'latest': prices[0]}
prices_per_region[zone] = region_prices
return prices_per_region
def get_cheapest_availability_zone(instance_type, search_regions=US_EAST_REGION,
filter_availability_zones=US_EAST_AVAILABILITY_ZONES, expected_job_length=datetime.timedelta(days=1)):
"""Get the cheapest availability zone from a set of regions. Cheapest is determined by 'latest price + average price'
over the duration that the job is expected to run for
Args:
filter_availability_zones ({str}): We only return results for zones in this set
instance_type (str): Type of aws instance e.g. "m2.4xlarge"
search_regions ({str}): Set of regions we want to look for availability zones in.
expected_job_length (datetime.timedelta): The period we expect the job to run this is used as the amount of time to look back over
for the average
Returns:
(str, {}) : e.g. ('us-east-1b': {'min': 2.01, 'max': 3.53,'avg':2.8, 'latest':3.0})
"""
if isinstance(search_regions, str):
search_regions = {search_regions}
aggregated_prices = {}
for region in search_regions:
result_stats = fetch_price_stats_per_availability_zone(region,
datetime.datetime.utcnow() - expected_job_length,
datetime.datetime.utcnow(),
instance_type,
filter_availability_zones=filter_availability_zones)
if not len(result_stats):
raise Exception("No valid avialability zones found for region %s" % (region,))
aggregated_prices.update(result_stats)
cheapest_availability_zone, stats = min(aggregated_prices.iteritems(), key=lambda x: x[1]['avg'] + x[1]['latest'])
return cheapest_availability_zone, stats
| 44.4 | 138 | 0.604387 |
e6f6e592f45ce51ed72972736b1981a35d6ad662
| 81 |
py
|
Python
|
pynn/__init__.py
|
jkae/knn-exercise
|
ae569e3f6a0e23669369d99e032270e72f8fbb66
|
[
"MIT"
] | null | null | null |
pynn/__init__.py
|
jkae/knn-exercise
|
ae569e3f6a0e23669369d99e032270e72f8fbb66
|
[
"MIT"
] | null | null | null |
pynn/__init__.py
|
jkae/knn-exercise
|
ae569e3f6a0e23669369d99e032270e72f8fbb66
|
[
"MIT"
] | null | null | null |
from .nearest_neighbor_index import NearestNeighborIndex
from .kd_tree import *
| 20.25 | 56 | 0.851852 |
e6f7da2b0c80534457eb53c6aaa04a6eb69ac541
| 2,562 |
py
|
Python
|
tests/test_try.py
|
threecifanggen/python-functional-programming
|
bd17281e5f24db826266f509bc54b25362c0d2a1
|
[
"MIT"
] | 3 |
2021-10-05T09:12:36.000Z
|
2021-11-30T07:11:58.000Z
|
tests/test_try.py
|
threecifanggen/python-functional-programming
|
bd17281e5f24db826266f509bc54b25362c0d2a1
|
[
"MIT"
] | 14 |
2021-10-11T05:31:15.000Z
|
2021-12-16T12:52:47.000Z
|
tests/test_try.py
|
threecifanggen/python-functional-programming
|
bd17281e5f24db826266f509bc54b25362c0d2a1
|
[
"MIT"
] | null | null | null |
'''
Author: huangbaochen<[email protected]>
Date: 2021-12-11 20:04:19
LastEditTime: 2021-12-11 21:46:16
LastEditors: huangbaochen<[email protected]>
Description: Try
No MERCY
'''
import pytest
from fppy.try_monad import Try, Success, Fail
from fppy.option import Just, Nothing
def test_try_monad_map():
assert Success(1).map(lambda x: x + 1) == Success(2)
assert Success(1).map(lambda x: x / 0) ==\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by zero'), 1)\
.map(lambda x: x + 1) ==\
Fail(ZeroDivisionError('division by zero'), 1)
| 29.790698 | 66 | 0.673692 |
e6f805f6f11f261c37210a559213d4def9f1debd
| 952 |
py
|
Python
|
app/internal/daily_quotes.py
|
yammesicka/calendar
|
7c15a24883dbdffb563b6d3286c2d458e4a1c9c0
|
[
"Apache-2.0"
] | null | null | null |
app/internal/daily_quotes.py
|
yammesicka/calendar
|
7c15a24883dbdffb563b6d3286c2d458e4a1c9c0
|
[
"Apache-2.0"
] | null | null | null |
app/internal/daily_quotes.py
|
yammesicka/calendar
|
7c15a24883dbdffb563b6d3286c2d458e4a1c9c0
|
[
"Apache-2.0"
] | null | null | null |
from datetime import date
from typing import Dict, Optional
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import func
from app.database.models import Quote
TOTAL_DAYS = 366
def create_quote_object(quotes_fields: Dict[str, Optional[str]]) -> Quote:
"""This function create a quote object from given fields dictionary.
It is used for adding the data from the json into the db"""
return Quote(
text=quotes_fields['text'],
author=quotes_fields['author']
)
def quote_per_day(
session: Session, date: date = date.today()
) -> Optional[Quote]:
"""This function provides a daily quote, relevant to the current
day of the year. The quote is randomally selected from a set
of quotes matching to the given day"""
day_num = date.timetuple().tm_yday
quote = session.query(Quote).filter(
Quote.id % TOTAL_DAYS == day_num).order_by(func.random()).first()
return quote
| 30.709677 | 74 | 0.711134 |
e6fa166658f7b4a5f652c93e09a1ac34583195f0
| 123 |
py
|
Python
|
src/789A.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | 2 |
2016-08-19T09:47:03.000Z
|
2016-10-01T10:15:03.000Z
|
src/789A.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | null | null | null |
src/789A.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | 1 |
2015-07-01T23:57:32.000Z
|
2015-07-01T23:57:32.000Z
|
n, k = map(int, input().split())
w = list(map(int, input().split()))
r = sum(map(lambda x: (x+k-1)//k, w))
print((r+1)//2)
| 24.6 | 37 | 0.536585 |
e6fab2043b0b6fa907bee5da86873ddbf2cfe3cf
| 1,432 |
py
|
Python
|
platform/server/detect.py
|
leyyin/godot
|
68325d7254db711beaedddad218e2cddb405c42c
|
[
"CC-BY-3.0",
"MIT"
] | 24 |
2016-10-14T16:54:01.000Z
|
2022-01-15T06:39:17.000Z
|
platform/server/detect.py
|
leyyin/godot
|
68325d7254db711beaedddad218e2cddb405c42c
|
[
"CC-BY-3.0",
"MIT"
] | 17 |
2016-12-30T14:35:53.000Z
|
2017-03-07T21:07:50.000Z
|
platform/server/detect.py
|
leyyin/godot
|
68325d7254db711beaedddad218e2cddb405c42c
|
[
"CC-BY-3.0",
"MIT"
] | 9 |
2017-08-04T12:00:16.000Z
|
2021-12-10T06:48:28.000Z
|
import os
import sys
| 17.679012 | 81 | 0.609637 |
e6fc051294ab288b08cbb146da00f8c04ac171dd
| 413 |
py
|
Python
|
telemetry/Truck.py
|
SnipsMine/ETS2-Speedrun-Tool
|
5ac87e4bc88be67ff4954b2b98772ff14a65eb48
|
[
"MIT"
] | null | null | null |
telemetry/Truck.py
|
SnipsMine/ETS2-Speedrun-Tool
|
5ac87e4bc88be67ff4954b2b98772ff14a65eb48
|
[
"MIT"
] | null | null | null |
telemetry/Truck.py
|
SnipsMine/ETS2-Speedrun-Tool
|
5ac87e4bc88be67ff4954b2b98772ff14a65eb48
|
[
"MIT"
] | null | null | null |
from telemetry.TruckConstants import ConstantValues
from telemetry.TruckCurrent import CurrentValues
from telemetry.TruckPositioning import Positioning
| 21.736842 | 51 | 0.750605 |
e6fc32c8a31669a37234337e3418a714af3c26bd
| 1,483 |
py
|
Python
|
IntroToSpark/Assign4_Q1-6_action.py
|
petersontylerd/spark-courses
|
e8dcb4968ea31a50206739e6af3006889f8c3c6c
|
[
"MIT"
] | null | null | null |
IntroToSpark/Assign4_Q1-6_action.py
|
petersontylerd/spark-courses
|
e8dcb4968ea31a50206739e6af3006889f8c3c6c
|
[
"MIT"
] | null | null | null |
IntroToSpark/Assign4_Q1-6_action.py
|
petersontylerd/spark-courses
|
e8dcb4968ea31a50206739e6af3006889f8c3c6c
|
[
"MIT"
] | 1 |
2021-03-26T11:47:37.000Z
|
2021-03-26T11:47:37.000Z
|
import csv
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.appName("Assignment4").getOrCreate()
sc = spark.sparkContext
# load data to dataframe
path = 'fake_data.csv'
df = spark.read.format('csv').option('header','true').load(path)
# cast income as an integer
df = df.withColumn('Income', df['Income'].cast(IntegerType()))
# Question 1
print('*' * 30)
print('\nQuestion 1\n')
print(df.rdd.map(lambda x: (x[1], x[0])).groupByKey().mapValues(lambda vals: len(set(vals))).sortBy(lambda a: a[1], ascending = False).take(1))
print('\n\n')
# Question 2
print('*' * 30)
print('\nQuestion 2\n')
print(df.rdd.filter(lambda v: v[1] == 'United States of America').map(lambda x: (x[1], x[4])).groupByKey().mapValues(lambda x: sum(x) / len(x)).collect())
print('\n\n')
# Question 3
print('*' * 30)
print('\nQuestion 3\n')
print(df.rdd.filter(lambda v: v[4] > 100000).filter(lambda v: v[7] == 'FALSE').count())
print('\n\n')
# Question 4
print('*' * 30)
print('\nQuestion 4\n')
print(df.rdd.filter(lambda v: v[1] == 'United States of America').sortBy(lambda x: x[4], ascending = False).map(lambda x: (x[3], x[6], x[4], x[5])).take(10))
print('\n\n')
# Question 5
print('*' * 30)
print('\nQuestion 5\n')
print(df.rdd.groupBy(lambda x: x[5]).count())
print('\n\n')
# Question 6
print('*' * 30)
print('\nQuestion 6\n')
print(df.rdd.filter(lambda v: v[5] == 'Writer').filter(lambda x: x[4] < 100000).count())
print('\n\n')
| 26.017544 | 157 | 0.652057 |
e6fc5742d6236482be2f3020d03479a9c33e3222
| 274 |
py
|
Python
|
src/firebot/tests/factories.py
|
zipmex/fire
|
a41bbdbc86085c055ae4706fadea4f142e881a85
|
[
"Apache-2.0"
] | 52 |
2017-03-15T16:25:14.000Z
|
2022-03-01T16:50:14.000Z
|
src/firebot/tests/factories.py
|
zipmex/fire
|
a41bbdbc86085c055ae4706fadea4f142e881a85
|
[
"Apache-2.0"
] | 239 |
2017-03-16T17:10:22.000Z
|
2022-03-06T07:24:24.000Z
|
src/firebot/tests/factories.py
|
zipmex/fire
|
a41bbdbc86085c055ae4706fadea4f142e881a85
|
[
"Apache-2.0"
] | 8 |
2017-03-15T17:45:18.000Z
|
2022-01-26T14:51:03.000Z
|
import factory
from django.contrib.auth import get_user_model
| 21.076923 | 46 | 0.715328 |
e6fc7870ccb1bbdefca5d31e7c6358dd9b6c9578
| 482 |
py
|
Python
|
reamber/o2jam/O2JHold.py
|
Bestfast/reamberPy
|
91b76ca6adf11fbe8b7cee7c186481776a4d7aaa
|
[
"MIT"
] | null | null | null |
reamber/o2jam/O2JHold.py
|
Bestfast/reamberPy
|
91b76ca6adf11fbe8b7cee7c186481776a4d7aaa
|
[
"MIT"
] | null | null | null |
reamber/o2jam/O2JHold.py
|
Bestfast/reamberPy
|
91b76ca6adf11fbe8b7cee7c186481776a4d7aaa
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from reamber.base.Hold import Hold, HoldTail
from reamber.o2jam.O2JNoteMeta import O2JNoteMeta
| 21.909091 | 54 | 0.73029 |
e6fc89e2fb95df50b778c64242f30654175e9df4
| 566 |
py
|
Python
|
peacebot/core/plugins/Miscellaneous/__init__.py
|
Peacebot-Development/Peacebot-v2
|
79ab87b12cd60b708631d96021ac3d3eaeee01c9
|
[
"MIT"
] | 3 |
2021-11-13T06:26:06.000Z
|
2022-01-23T13:03:30.000Z
|
peacebot/core/plugins/Miscellaneous/__init__.py
|
Peacebot-Development/Peacebot-v2
|
79ab87b12cd60b708631d96021ac3d3eaeee01c9
|
[
"MIT"
] | 32 |
2021-11-12T15:29:04.000Z
|
2022-01-23T14:44:19.000Z
|
peacebot/core/plugins/Miscellaneous/__init__.py
|
Peacebot-Development/Peacebot-v2
|
79ab87b12cd60b708631d96021ac3d3eaeee01c9
|
[
"MIT"
] | 1 |
2021-11-13T06:34:03.000Z
|
2021-11-13T06:34:03.000Z
|
import lightbulb
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from peacebot.core.utils.time import TimeConverter
| 25.727273 | 68 | 0.738516 |
e6fe319ea41fa303d91576c379f5911e22bf4307
| 855 |
py
|
Python
|
example/android/python/msite_simple_default_browser.py
|
laichimirum/docker-appium-emulator
|
3549c5f1fc09bbc650dd30351ad4f509a72a90fa
|
[
"Apache-2.0"
] | 8 |
2019-04-26T04:09:40.000Z
|
2022-01-04T05:24:12.000Z
|
example/android/python/msite_simple_default_browser.py
|
laichimirum/docker-appium-emulator
|
3549c5f1fc09bbc650dd30351ad4f509a72a90fa
|
[
"Apache-2.0"
] | null | null | null |
example/android/python/msite_simple_default_browser.py
|
laichimirum/docker-appium-emulator
|
3549c5f1fc09bbc650dd30351ad4f509a72a90fa
|
[
"Apache-2.0"
] | 2 |
2019-12-16T15:34:57.000Z
|
2020-10-22T07:03:15.000Z
|
import unittest
from appium import webdriver
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(MSiteDefaultBrowserAndroidUITests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 29.482759 | 90 | 0.65614 |
e6fe636ebee73df95de2568536aed7f6f3927fad
| 458 |
py
|
Python
|
src/nn/dataset_utils/types_processing.py
|
sola-st/Nalin
|
3a6f95cec95d9152a65af970cfbb145179b0bd72
|
[
"MIT"
] | null | null | null |
src/nn/dataset_utils/types_processing.py
|
sola-st/Nalin
|
3a6f95cec95d9152a65af970cfbb145179b0bd72
|
[
"MIT"
] | null | null | null |
src/nn/dataset_utils/types_processing.py
|
sola-st/Nalin
|
3a6f95cec95d9152a65af970cfbb145179b0bd72
|
[
"MIT"
] | null | null | null |
"""
Created on 17-June-2020
@author Jibesh Patra
The types extracted during runtime usually look something like --> <class 'numpy.ndarray'> or
<class 'seaborn.palettes._ColorPalette'> change them to --> ndarray, ColorPalette
"""
import re
remove_chars = re.compile(r'>|\'|<|(class )|_|(type)')
| 24.105263 | 93 | 0.696507 |
e6fee516b4253e139cd1d42c7d2077b96248a564
| 4,254 |
py
|
Python
|
src/canvas.py
|
soootaleb/spare
|
b454b9a8861df55c29fe55b4b584248a2ffe79cb
|
[
"Apache-2.0"
] | 1 |
2019-05-21T16:04:08.000Z
|
2019-05-21T16:04:08.000Z
|
src/canvas.py
|
soootaleb/school-spacial-relations
|
b454b9a8861df55c29fe55b4b584248a2ffe79cb
|
[
"Apache-2.0"
] | null | null | null |
src/canvas.py
|
soootaleb/school-spacial-relations
|
b454b9a8861df55c29fe55b4b584248a2ffe79cb
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.ticker as ticker
import numpy as np
import random, matplotlib.pyplot as plt
| 35.157025 | 107 | 0.609074 |
fc0054805adf6c4edaa7b274d8d98323387b2aa1
| 7,561 |
py
|
Python
|
src/cpg_scpi/test/__init__.py
|
GeorgBraun/cpg_scpi_python
|
ec74c15beaac0b002fb996a42f4e66ea369e1fc6
|
[
"MIT"
] | null | null | null |
src/cpg_scpi/test/__init__.py
|
GeorgBraun/cpg_scpi_python
|
ec74c15beaac0b002fb996a42f4e66ea369e1fc6
|
[
"MIT"
] | null | null | null |
src/cpg_scpi/test/__init__.py
|
GeorgBraun/cpg_scpi_python
|
ec74c15beaac0b002fb996a42f4e66ea369e1fc6
|
[
"MIT"
] | null | null | null |
'''Functional tests for CPG'''
from .. import CircuitPlayground
from .. import __version__ as CircuitPlaygroundVersion
import time
def test_led(cpg) -> None:
'''Flash LEDs and run a short chasing light.'''
_printFuncTestHeadingWithDeliLine('LED-Test: Flash LEDs and run a short chasing light...')
print('flashing LEDs...')
test_ledDemo(cpg)
value=1
# print('| val | LEDs |')
for i in range(10):
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
value <<= 1 # shift 1 bit to the left
for i in range(10):
value >>= 1 # shift 1 bit to the right
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
print('flashing LEDs...')
test_ledDemo(cpg)
def test_ledDemo(cpg) -> None:
'''Flash LEDs three times.'''
for i in range(3):
cpg.ledDemo()
cpg.wait(0.2)
def testAccSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do an accelerometer measurement.'''
print(f'Testing acc measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.acc(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def testLightSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do a light sensor measurement.'''
print(f'Testing light measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.light(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def _testResponseWaitTime(cpg, iterations: int = 10000) -> None:
'''Test it the wait time for additional, unexpected responses is long enough.'''
print(f'Testing Response-Wait-Time with {iterations} iterations ...')
for i in range(iterations):
if i%100==0: print('try-count', i)
try:
# Request acc measurement values, but do not expect any response, even if the CPG will send one.
cpg._query('MEAS:ACC?', 0)
# If we are still here, we did not get a response. This is bad.
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
print('ERROR in testResponseWaitTime(): CPG-Response was too late.')
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
except Exception:
# The normal behavior is a response, resulting in an exception.
# This is what we expected. Therefore, just continue.
pass
| 37.616915 | 151 | 0.61923 |
fc01125ff8253bbaef2b133518f03e3663e85d73
| 216 |
py
|
Python
|
main/models.py
|
yejun1060/SbjctSclctn
|
eca6a9d09cf81fce262ea58ca90e69ee5735ab16
|
[
"MIT"
] | null | null | null |
main/models.py
|
yejun1060/SbjctSclctn
|
eca6a9d09cf81fce262ea58ca90e69ee5735ab16
|
[
"MIT"
] | null | null | null |
main/models.py
|
yejun1060/SbjctSclctn
|
eca6a9d09cf81fce262ea58ca90e69ee5735ab16
|
[
"MIT"
] | 1 |
2021-06-08T17:41:42.000Z
|
2021-06-08T17:41:42.000Z
|
from django.db import models
| 21.6 | 42 | 0.689815 |
fc01bbc538287134d61e574ed4af064a81cfdf43
| 1,307 |
py
|
Python
|
test/utils/test_geodesic.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | 2 |
2020-12-06T13:10:52.000Z
|
2021-07-06T06:50:10.000Z
|
test/utils/test_geodesic.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | null | null | null |
test/utils/test_geodesic.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | 1 |
2019-05-31T02:45:38.000Z
|
2019-05-31T02:45:38.000Z
|
from math import sqrt
import torch
from torch_geometric.utils import geodesic_distance
| 30.395349 | 76 | 0.574598 |
fc01d88d24681ec66a1cf06a3a055252d072afd3
| 31,292 |
py
|
Python
|
gridfs/grid_file.py
|
naomielst/mongo-python-driver
|
e3d1d6f5b48101654a05493fd6eec7fe3fa014bd
|
[
"Apache-2.0"
] | 2 |
2022-01-19T21:00:48.000Z
|
2022-01-27T05:54:13.000Z
|
gridfs/grid_file.py
|
naomielst/mongo-python-driver
|
e3d1d6f5b48101654a05493fd6eec7fe3fa014bd
|
[
"Apache-2.0"
] | 1 |
2021-12-24T11:32:17.000Z
|
2021-12-24T11:32:17.000Z
|
gridfs/grid_file.py
|
naomielst/mongo-python-driver
|
e3d1d6f5b48101654a05493fd6eec7fe3fa014bd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for representing files stored in GridFS."""
import datetime
import io
import math
import os
from bson.int64 import Int64
from bson.son import SON
from bson.binary import Binary
from bson.objectid import ObjectId
from pymongo import ASCENDING
from pymongo.collection import Collection
from pymongo.cursor import Cursor
from pymongo.errors import (ConfigurationError,
CursorNotFound,
DuplicateKeyError,
InvalidOperation,
OperationFailure)
from pymongo.read_preferences import ReadPreference
from gridfs.errors import CorruptGridFile, FileExists, NoFile
try:
_SEEK_SET = os.SEEK_SET
_SEEK_CUR = os.SEEK_CUR
_SEEK_END = os.SEEK_END
# before 2.5
except AttributeError:
_SEEK_SET = 0
_SEEK_CUR = 1
_SEEK_END = 2
EMPTY = b""
NEWLN = b"\n"
"""Default chunk size, in bytes."""
# Slightly under a power of 2, to work well with server's record allocations.
DEFAULT_CHUNK_SIZE = 255 * 1024
_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)])
_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)])
def _grid_in_property(field_name, docstring, read_only=False,
closed_only=False):
"""Create a GridIn property."""
if read_only:
docstring += "\n\nThis attribute is read-only."
elif closed_only:
docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and "
"can only be read after :meth:`close` "
"has been called.")
if not read_only and not closed_only:
return property(getter, setter, doc=docstring)
return property(getter, doc=docstring)
def _grid_out_property(field_name, docstring):
"""Create a GridOut property."""
docstring += "\n\nThis attribute is read-only."
return property(getter, doc=docstring)
def _clear_entity_type_registry(entity, **kwargs):
"""Clear the given database/collection object's type registry."""
codecopts = entity.codec_options.with_options(type_registry=None)
return entity.with_options(codec_options=codecopts, **kwargs)
| 35.721461 | 103 | 0.599035 |
fc021cb14dd8b84a0a6873924f2194048e2791f0
| 1,415 |
py
|
Python
|
forte/processors/tests/stanfordnlp_processor_test.py
|
tcl326/forte
|
d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8
|
[
"Apache-2.0"
] | null | null | null |
forte/processors/tests/stanfordnlp_processor_test.py
|
tcl326/forte
|
d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8
|
[
"Apache-2.0"
] | null | null | null |
forte/processors/tests/stanfordnlp_processor_test.py
|
tcl326/forte
|
d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8
|
[
"Apache-2.0"
] | null | null | null |
"""This module tests Stanford NLP processors."""
import os
import unittest
from texar.torch import HParams
from forte.pipeline import Pipeline
from forte.data.readers import StringReader
from forte.processors.stanfordnlp_processor import StandfordNLPProcessor
from ft.onto.base_ontology import Token, Sentence
| 36.282051 | 77 | 0.638869 |
fc02e2f67f44eb696a821c6397117531267c2ddc
| 496 |
py
|
Python
|
src/serve_files.py
|
eventh/m3u8looper
|
9c4ae166e9af4679cf64b19e3c3efc7bbdaed5a5
|
[
"MIT"
] | null | null | null |
src/serve_files.py
|
eventh/m3u8looper
|
9c4ae166e9af4679cf64b19e3c3efc7bbdaed5a5
|
[
"MIT"
] | null | null | null |
src/serve_files.py
|
eventh/m3u8looper
|
9c4ae166e9af4679cf64b19e3c3efc7bbdaed5a5
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Serve current folder files in a HTTP webserver.
"""
import socketserver
from threading import Thread
from http.server import SimpleHTTPRequestHandler
PORT = 8000
if __name__ == '__main__':
thread = start_http_server()
thread.join()
| 20.666667 | 76 | 0.709677 |
fc03078d9d14b23c740018bcdf9069c213af00f0
| 7,393 |
py
|
Python
|
pypy/module/__builtin__/test/test_compile.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/__builtin__/test/test_compile.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/__builtin__/test/test_compile.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
# coding: utf-8
# TODO: Check the value of __debug__ inside of the compiled block!
# According to the documentation, it should follow the optimize flag.
# However, cpython3.5.0a0 behaves the same way as PyPy (__debug__ follows
# -O, -OO flags of the interpreter).
| 34.226852 | 79 | 0.538212 |
fc035a3b69dad59dad81dc8e5b68a8db4a2f4aff
| 12,207 |
py
|
Python
|
tickers_graphing_module.py
|
huangbrandon432/Investing-Trading-Tool
|
370015b906b7ee90c0fb48ca69865ac7428b3917
|
[
"BSD-3-Clause"
] | null | null | null |
tickers_graphing_module.py
|
huangbrandon432/Investing-Trading-Tool
|
370015b906b7ee90c0fb48ca69865ac7428b3917
|
[
"BSD-3-Clause"
] | null | null | null |
tickers_graphing_module.py
|
huangbrandon432/Investing-Trading-Tool
|
370015b906b7ee90c0fb48ca69865ac7428b3917
|
[
"BSD-3-Clause"
] | null | null | null |
import yfinance as yf
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
from IPython.display import Markdown
import numpy as np
from datetime import date, timedelta
| 35.178674 | 169 | 0.530843 |
fc045ba1073202cd1ab4f6738b3709fb28279ff8
| 5,008 |
py
|
Python
|
flexbe_navigation_states/src/flexbe_navigation_states/navigation_sm.py
|
amsks/generic_flexbe_states
|
f7be84105d3370c943ed17fc19af672b330726de
|
[
"BSD-3-Clause"
] | null | null | null |
flexbe_navigation_states/src/flexbe_navigation_states/navigation_sm.py
|
amsks/generic_flexbe_states
|
f7be84105d3370c943ed17fc19af672b330726de
|
[
"BSD-3-Clause"
] | null | null | null |
flexbe_navigation_states/src/flexbe_navigation_states/navigation_sm.py
|
amsks/generic_flexbe_states
|
f7be84105d3370c943ed17fc19af672b330726de
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from flexbe_states.wait_state import WaitState
from flexbe_navigation_states.turn_right_sm import turn_rightSM
from flexbe_states.subscriber_state import SubscriberState
from flexbe_utility_states.MARCO import Carbonara
from flexbe_navigation_states.turn_left_sm import turn_leftSM
from flexbe_navigation_states.go_straight_sm import go_straightSM
from flexbe_navigation_states.obstacle_avoidance_sm import Obstacle_AvoidanceSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sat Jul 18 2020
@author: TG4
'''
| 34.777778 | 131 | 0.659145 |
fc070f80801a319fdf697b23e027ce45aa2d558c
| 26,632 |
py
|
Python
|
text2cc/xml_assessment.py
|
dlehman83/text2cc
|
303798993590bceaeb5238a6cce82893c37cdfc7
|
[
"BSD-3-Clause"
] | 1 |
2021-02-12T09:34:07.000Z
|
2021-02-12T09:34:07.000Z
|
text2cc/xml_assessment.py
|
dlehman83/text2cc
|
303798993590bceaeb5238a6cce82893c37cdfc7
|
[
"BSD-3-Clause"
] | null | null | null |
text2cc/xml_assessment.py
|
dlehman83/text2cc
|
303798993590bceaeb5238a6cce82893c37cdfc7
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, Dana Lehman
# Copyright (c) 2020, Geoffrey M. Poore
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
from .quiz import Quiz, Question, GroupStart, GroupEnd, TextRegion
BEFORE_ITEMS = '''\
<?xml version="1.0" encoding="UTF-8"?>
<questestinterop xmlns="http://www.imsglobal.org/xsd/ims_qtiasiv1p2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.imsglobal.org/xsd/ims_qtiasiv1p2 http://www.imsglobal.org/profile/cc/ccv1p2/ccv1p2_qtiasiv1p2p1_v1p0.xsd">
<assessment ident="{assessment_identifier}" title="{title}">
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_maxattempts</fieldlabel>
<fieldentry>1</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>
cc_profile
</fieldlabel>
<fieldentry>
cc.exam.v0p1
</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>
qmd_assessmenttype
</fieldlabel>
<fieldentry>
Examination
</fieldentry>
</qtimetadatafield>
</qtimetadata>
<section ident="root_section">
'''
AFTER_ITEMS = '''\
</section>
</assessment>
</questestinterop>
'''
GROUP_START = '''\
<section ident="{ident}" title="{group_title}">
<selection_ordering>
<selection>
<selection_number>{pick}</selection_number>
<selection_extension>
<points_per_item>{points_per_item}</points_per_item>
</selection_extension>
</selection>
</selection_ordering>
'''
GROUP_END = '''\
</section>
'''
TEXT = '''\
<item ident="{ident}" title="{text_title_xml}">
<itemmetadata>
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_profile</fieldlabel>
<fieldentry>text_only_question</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>points_possible</fieldlabel>
<fieldentry>0</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>original_answer_ids</fieldlabel>
<fieldentry></fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>assessment_question_identifierref</fieldlabel>
<fieldentry>{assessment_question_identifierref}</fieldentry>
</qtimetadatafield>
</qtimetadata>
</itemmetadata>
<presentation>
<material>
<mattext texttype="text/html">{text_html_xml}</mattext>
</material>
</presentation>
</item>
'''
START_ITEM = '''\
<item ident="{question_identifier}" title="{question_title}">
'''
END_ITEM = '''\
</item>
'''
ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM = '''\
<itemmetadata>
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_profile</fieldlabel>
<fieldentry>{question_type}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>points_possible</fieldlabel>
<fieldentry>{points_possible}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>original_answer_ids</fieldlabel>
<fieldentry>{original_answer_ids}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>assessment_question_identifierref</fieldlabel>
<fieldentry>{assessment_question_identifierref}</fieldentry>
</qtimetadatafield>
</qtimetadata>
</itemmetadata>
'''
ITEM_METADATA_ESSAY = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM.replace('{original_answer_ids}', '')
ITEM_METADATA_UPLOAD = ITEM_METADATA_ESSAY
ITEM_PRESENTATION_MCTF = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_lid ident="response1" rcardinality="Single">
<render_choice>
{choices}
</render_choice>
</response_lid>
</presentation>
'''
ITEM_PRESENTATION_MCTF_CHOICE = '''\
<response_label ident="{ident}">
<material>
<mattext texttype="text/html">{choice_html_xml}</mattext>
</material>
</response_label>'''
ITEM_PRESENTATION_MULTANS = ITEM_PRESENTATION_MCTF.replace('Single', 'Multiple')
ITEM_PRESENTATION_MULTANS_CHOICE = ITEM_PRESENTATION_MCTF_CHOICE
ITEM_PRESENTATION_SHORTANS = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib>
<response_label ident="answer1" rshuffle="No"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_PRESENTATION_ESSAY = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib>
<response_label ident="answer1" rshuffle="No"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_PRESENTATION_UPLOAD = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
</presentation>
'''
ITEM_PRESENTATION_NUM = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib fibtype="Decimal">
<response_label ident="answer1"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_RESPROCESSING_START = '''\
<resprocessing>
<outcomes>
<decvar maxvalue="100" minvalue="0" varname="SCORE" vartype="Decimal"/>
</outcomes>
'''
ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<other/>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="general_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="{ident}_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<other/>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="general_incorrect_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_SHORTANS_CHOICE_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<varequal respident="response1">{answer_xml}</varequal>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="{ident}_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
{varequal}
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
{varequal}
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_VAREQUAL = '''\
<varequal respident="response1">{answer_xml}</varequal>'''
ITEM_RESPROCESSING_SHORTANS_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_MULTANS_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_MULTANS_CHOICE_FEEDBACK = ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<and>
{varequal}
</and>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<and>
{varequal}
</and>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_CORRECT = '''\
<varequal respident="response1">{ident}</varequal>'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_INCORRECT = '''\
<not>
<varequal respident="response1">{ident}</varequal>
</not>'''
ITEM_RESPROCESSING_MULTANS_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_ESSAY_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_UPLOAD_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_NUM_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<or>
<varequal respident="response1">{num_exact}</varequal>
<and>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</and>
</or>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<or>
<varequal respident="response1">{num_exact}</varequal>
<and>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</and>
</or>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_ESSAY = '''\
<respcondition continue="No">
<conditionvar>
<other/>
</conditionvar>
</respcondition>
'''
ITEM_RESPROCESSING_END = '''\
</resprocessing>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_GENERAL = '''\
<itemfeedback ident="general_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_CORRECT = '''\
<itemfeedback ident="correct_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INCORRECT = '''\
<itemfeedback ident="general_incorrect_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INDIVIDUAL = '''\
<itemfeedback ident="{ident}_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
def assessment(*, quiz: Quiz, assessment_identifier: str, title_xml: str) -> str:
'''
Generate assessment XML from Quiz.
'''
xml = []
xml.append(BEFORE_ITEMS.format(assessment_identifier=assessment_identifier,
title=title_xml))
for question_or_delim in quiz.questions_and_delims:
if isinstance(question_or_delim, TextRegion):
xml.append(TEXT.format(ident=f'text2qti_text_{question_or_delim.id}',
text_title_xml=question_or_delim.title_xml,
assessment_question_identifierref=f'text2qti_question_ref_{question_or_delim.id}',
text_html_xml=question_or_delim.text_html_xml))
continue
if isinstance(question_or_delim, GroupStart):
xml.append(GROUP_START.format(ident=f'text2qti_group_{question_or_delim.group.id}',
group_title=question_or_delim.group.title_xml,
pick=question_or_delim.group.pick,
points_per_item=question_or_delim.group.points_per_question))
continue
if isinstance(question_or_delim, GroupEnd):
xml.append(GROUP_END)
continue
if not isinstance(question_or_delim, Question):
raise TypeError
question = question_or_delim
xml.append(START_ITEM.format(question_identifier=f'text2qti_question_{question.id}',
question_title=question.title_xml))
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question'):
item_metadata = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM
original_answer_ids = ','.join(f'text2qti_choice_{c.id}' for c in question.choices)
elif question.type == 'numerical_question':
item_metadata = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM
original_answer_ids = f'text2qti_numerical_{question.id}'
elif question.type == 'essay_question':
item_metadata = ITEM_METADATA_ESSAY
original_answer_ids = f'text2qti_essay_{question.id}'
elif question.type == 'file_upload_question':
item_metadata = ITEM_METADATA_UPLOAD
original_answer_ids = f'text2qti_upload_{question.id}'
else:
raise ValueError
#Type Change for Schoology CC Import
if question.type == 'multiple_choice_question':
typechange = 'cc.multiple_choice.v0p1'
elif question.type == 'true_false_question':
typechange = 'cc.true_false.v0p1'
elif question.type == 'short_answer_question':
typechange = 'cc.fib.v0p1'
elif question.type == 'multiple_answers_question':
typechange = 'cc.multiple_response.v0p1'
elif question.type == 'essay_question':
typechange = 'cc.essay.v0p1'
else:
typechange = question.type
xml.append(item_metadata.format(question_type=typechange,
points_possible=question.points_possible,
original_answer_ids=original_answer_ids,
assessment_question_identifierref=f'text2qti_question_ref_{question.id}'))
if question.type in ('true_false_question', 'multiple_choice_question', 'multiple_answers_question'):
if question.type in ('true_false_question', 'multiple_choice_question'):
item_presentation_choice = ITEM_PRESENTATION_MCTF_CHOICE
item_presentation = ITEM_PRESENTATION_MCTF
elif question.type == 'multiple_answers_question':
item_presentation_choice = ITEM_PRESENTATION_MULTANS_CHOICE
item_presentation = ITEM_PRESENTATION_MULTANS
else:
raise ValueError
choices = '\n'.join(item_presentation_choice.format(ident=f'text2qti_choice_{c.id}', choice_html_xml=c.choice_html_xml)
for c in question.choices)
xml.append(item_presentation.format(question_html_xml=question.question_html_xml, choices=choices))
elif question.type == 'short_answer_question':
xml.append(ITEM_PRESENTATION_SHORTANS.format(question_html_xml=question.question_html_xml))
elif question.type == 'numerical_question':
xml.append(ITEM_PRESENTATION_NUM.format(question_html_xml=question.question_html_xml))
elif question.type == 'essay_question':
xml.append(ITEM_PRESENTATION_ESSAY.format(question_html_xml=question.question_html_xml))
elif question.type == 'file_upload_question':
xml.append(ITEM_PRESENTATION_UPLOAD.format(question_html_xml=question.question_html_xml))
else:
raise ValueError
if question.type in ('true_false_question', 'multiple_choice_question'):
correct_choice = None
for choice in question.choices:
if choice.correct:
correct_choice = choice
break
if correct_choice is None:
raise TypeError
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}'))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_SET_CORRECT_WITH_FEEDBACK.format(ident=f'text2qti_choice_{correct_choice.id}'))
else:
resprocessing.append(ITEM_RESPROCESSING_MCTF_SET_CORRECT_NO_FEEDBACK.format(ident=f'text2qti_choice_{correct_choice.id}'))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'short_answer_question':
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}', answer_xml=choice.choice_xml))
varequal = []
for choice in question.choices:
varequal.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_VAREQUAL.format(answer_xml=choice.choice_xml))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_WITH_FEEDBACK.format(varequal='\n'.join(varequal)))
else:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_NO_FEEDBACK.format(varequal='\n'.join(varequal)))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'multiple_answers_question':
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}'))
varequal = []
for choice in question.choices:
if choice.correct:
varequal.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_CORRECT.format(ident=f'text2qti_choice_{choice.id}'))
else:
varequal.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_INCORRECT.format(ident=f'text2qti_choice_{choice.id}'))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_WITH_FEEDBACK.format(varequal='\n'.join(varequal)))
else:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_NO_FEEDBACK.format(varequal='\n'.join(varequal)))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'numerical_question':
xml.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_NUM_GENERAL_FEEDBACK)
if question.correct_feedback_raw is None:
if question.numerical_exact is None:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_NO_FEEDBACK
else:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_NO_FEEDBACK
else:
if question.numerical_exact is None:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_WITH_FEEDBACK
else:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_WITH_FEEDBACK
xml.append(item_resprocessing_num_set_correct.format(num_min=question.numerical_min_html_xml,
num_exact=question.numerical_exact_html_xml,
num_max=question.numerical_max_html_xml))
if question.incorrect_feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_NUM_INCORRECT_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
elif question.type == 'essay_question':
xml.append(ITEM_RESPROCESSING_START)
xml.append(ITEM_RESPROCESSING_ESSAY)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_ESSAY_GENERAL_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
elif question.type == 'file_upload_question':
xml.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_UPLOAD_GENERAL_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
else:
raise ValueError
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question',
'numerical_question', 'essay_question', 'file_upload_question'):
if question.feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_GENERAL.format(feedback=question.feedback_html_xml))
if question.correct_feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_CORRECT.format(feedback=question.correct_feedback_html_xml))
if question.incorrect_feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INCORRECT.format(feedback=question.incorrect_feedback_html_xml))
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question'):
for choice in question.choices:
if choice.feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INDIVIDUAL.format(ident=f'text2qti_choice_{choice.id}',
feedback=choice.feedback_html_xml))
xml.append(END_ITEM)
xml.append(AFTER_ITEMS)
return ''.join(xml)
| 40.474164 | 260 | 0.629769 |
fc072ef6a205b171dfc4d3510829d73d11a5f833
| 2,360 |
py
|
Python
|
tests/test_aggregate_stats_design.py
|
bids-standard/bids-statsmodels-design-synthesizer
|
d8a1dac3891760990082c2d3aa75a1edda44ffa0
|
[
"MIT"
] | null | null | null |
tests/test_aggregate_stats_design.py
|
bids-standard/bids-statsmodels-design-synthesizer
|
d8a1dac3891760990082c2d3aa75a1edda44ffa0
|
[
"MIT"
] | 1 |
2021-05-12T21:53:53.000Z
|
2021-05-12T22:26:09.000Z
|
tests/test_aggregate_stats_design.py
|
bids-standard/bids-statsmodels-design-synthesizer
|
d8a1dac3891760990082c2d3aa75a1edda44ffa0
|
[
"MIT"
] | 3 |
2021-05-06T12:44:04.000Z
|
2021-05-12T21:42:59.000Z
|
#!/usr/bin/env python
"""Tests for `bids_statsmodels_design_synthesizer` package."""
import pytest
import subprocess as sp
from pathlib import Path
SYNTHESIZER = "aggregate_stats_design.py"
from bids_statsmodels_design_synthesizer import aggregate_stats_design as synth_mod
# from bids_statsmodels_design_synthesizer import Path(SYNTHESIZER).stem as synth_mod
EXAMPLE_USER_ARGS = {
"OUTPUT_TSV": "aggregated_design.tsv",
"MODEL": "data/ds000003/models/model-001_smdl.json",
"EVENTS_TSV": "data/ds000003/sub-01/func/sub-01_task-rhymejudgment_events.tsv",
"DURATION": 320,
}
def test_minimal_cli_functionality():
"""
We roughly want to implement the equivalent of the following:
from bids.analysis import Analysis
from bids.layout import BIDSLayout
layout = BIDSLayout("data/ds000003")
analysis = Analysis(model="data/ds000003/models/model-001_smdl.json",layout=layout)
analysis.setup()
more specifically we want to reimplement this line
https://github.com/bids-standard/pybids/blob/b6cd0f6787230ce976a374fbd5fce650865752a3/bids/analysis/analysis.py#L282
"""
bids_dir = Path(__file__).parent / "data/ds000003"
model = "model-001_smdl.json"
arg_list = " " .join([f"""--{k.lower().replace("_","-")}={v}""" for k,v in EXAMPLE_USER_ARGS.items()])
cmd = f"{SYNTHESIZER} {arg_list}"
output = sp.check_output(cmd.split())
| 36.875 | 199 | 0.715678 |
fc084ddbb4a5b92a2c3c4c62cd1d09d582bd5892
| 689 |
py
|
Python
|
skynet-agent/plugins/plugin_api.py
|
skynetera/skynet
|
24a50f2a2eb95b777802934a2b66f162bf4b2d53
|
[
"Apache-2.0"
] | 3 |
2016-09-12T08:54:46.000Z
|
2016-09-18T07:54:10.000Z
|
skynet-agent/plugins/plugin_api.py
|
skynetera/skynet
|
24a50f2a2eb95b777802934a2b66f162bf4b2d53
|
[
"Apache-2.0"
] | null | null | null |
skynet-agent/plugins/plugin_api.py
|
skynetera/skynet
|
24a50f2a2eb95b777802934a2b66f162bf4b2d53
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: [email protected]
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: plugin_api.py
@time: 2015-11-28 1:52
"""
from linux import cpu,disk,iostats,loadavg,memory,netstats,swap
| 17.225 | 63 | 0.71553 |
fc09cc4c599dae963fa070fbe9dc0b9a5e9e17c9
| 1,425 |
py
|
Python
|
code/figure_warp.py
|
jwcarr/drift
|
a514c5970ba53025cc142257e953c1bda3cd049c
|
[
"CC-BY-4.0"
] | 2 |
2021-11-19T10:12:58.000Z
|
2021-11-30T03:33:59.000Z
|
code/figure_warp.py
|
jwcarr/vertical_drift
|
5b4b6c475b5118950514dc01960391ef0d95bd19
|
[
"CC-BY-4.0"
] | null | null | null |
code/figure_warp.py
|
jwcarr/vertical_drift
|
5b4b6c475b5118950514dc01960391ef0d95bd19
|
[
"CC-BY-4.0"
] | null | null | null |
import numpy as np
import eyekit
import algorithms
import core
data = eyekit.io.load(core.FIXATIONS / 'sample.json')
passages = eyekit.io.load(core.DATA / 'passages.json')
original_sequence = data['trial_5']['fixations']
fixation_XY = np.array([fixation.xy for fixation in original_sequence], dtype=int)
word_XY = np.array([word.center for word in passages['1B'].words(alphabetical_only=False)], dtype=int)
start_times = np.array([i*100 for i in range(len(word_XY))], dtype=int)
expected_sequence = eyekit.FixationSequence(np.column_stack([word_XY, start_times, start_times+100]))
diagram = eyekit.vis.Image(1920, 1080)
diagram.draw_text_block(passages['1B'], mask_text=True)
diagram.draw_fixation_sequence(expected_sequence, color='#E32823', fixation_radius=6)
diagram.draw_fixation_sequence(original_sequence, color='#205E84', fixation_radius=6)
_, warping_path = algorithms.dynamic_time_warping(fixation_XY, word_XY)
for fixation, mapped_words in zip(original_sequence, warping_path):
for word_i in mapped_words:
word_x, word_y = word_XY[word_i]
diagram.draw_line(fixation.xy, (word_x, word_y), color='black', stroke_width=0.5, dashed=True)
fig = eyekit.vis.Figure()
fig.add_image(diagram)
fig.set_crop_margin(2)
fig.set_padding(vertical=2, horizontal=3, edge=1)
fig.set_enumeration(False)
fig.save(core.VISUALS / 'illustration_warp.pdf', width=83)
# fig.save(core.FIGS / 'fig02_single_column.eps', width=83)
| 39.583333 | 102 | 0.781754 |
fc0a7d892ee7ccba2ec10d7aa3adc47150da3dac
| 98,817 |
py
|
Python
|
storm/Nimbus.py
|
krux/python-storm
|
1a9c06d3580a2b1bc2c27174d892a6dbcaa9e0bd
|
[
"BSD-3-Clause"
] | null | null | null |
storm/Nimbus.py
|
krux/python-storm
|
1a9c06d3580a2b1bc2c27174d892a6dbcaa9e0bd
|
[
"BSD-3-Clause"
] | null | null | null |
storm/Nimbus.py
|
krux/python-storm
|
1a9c06d3580a2b1bc2c27174d892a6dbcaa9e0bd
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
# HELPER FUNCTIONS AND STRUCTURES
| 30.182346 | 188 | 0.668508 |
fc0c40028b9c4945addfec469dd5871c8f82e05b
| 52 |
py
|
Python
|
gemucator/__init__.py
|
philipwfowler/genucator
|
d43a79afe1aa81ca24d7ab4370ed230e08aa89bf
|
[
"MIT"
] | null | null | null |
gemucator/__init__.py
|
philipwfowler/genucator
|
d43a79afe1aa81ca24d7ab4370ed230e08aa89bf
|
[
"MIT"
] | null | null | null |
gemucator/__init__.py
|
philipwfowler/genucator
|
d43a79afe1aa81ca24d7ab4370ed230e08aa89bf
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
from .core import gemucator
| 13 | 27 | 0.730769 |
fc0db1d4c1d538c8a8da3398414e346edd37ebe8
| 166 |
py
|
Python
|
client/checkout/schema/types.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | 6 |
2019-11-21T10:09:49.000Z
|
2021-06-19T09:52:59.000Z
|
client/checkout/schema/types.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
client/checkout/schema/types.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
import graphene
from graphene_django import DjangoObjectType
from graphene_django.converter import convert_django_field
from pyuploadcare.dj.models import ImageField
| 33.2 | 58 | 0.89759 |
fc0e5695633a29e1789efba016b66fc96fcedf4a
| 15,518 |
py
|
Python
|
pangenome_fluidity.py
|
PlantDr430/CSU_scripts
|
8ed9e1dc014b099ce68d77ce5c8747217c230e61
|
[
"MIT"
] | 1 |
2020-03-02T04:26:21.000Z
|
2020-03-02T04:26:21.000Z
|
pangenome_fluidity.py
|
PlantDr430/CSU_scripts
|
8ed9e1dc014b099ce68d77ce5c8747217c230e61
|
[
"MIT"
] | null | null | null |
pangenome_fluidity.py
|
PlantDr430/CSU_scripts
|
8ed9e1dc014b099ce68d77ce5c8747217c230e61
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
'''
This script follows formulas put forth in Kislyuk et al. (2011) to calculate genome
fluidity of a pangenome dataset. Variance and standard error are estimated as total
variance containing both the variance due to subsampling all possible combinations
(without replacement) of N genomes from the total pool of genomes and the variance
due to the limited number of sampled genomes (variance of the pangenome)(Kislyuk et al. 2011).
However, the script has a default max number of subsamples set to 250,000 for each N genomes.
This can be altered with the -max_sub / --max_subsamples flag or turned off with the --max_off flag.
Turning the max_off will force calculations to be done on all possible subsample combinations
of N genomes. For samples of N genomes that were stopped at the max number of subsamples the subsamples
are sampled WITH replacement and variance is calculated with a degree of freedom = 1 (i.e. n - 1).
Results are a text file of fluidity, variance, and standard error for all N genome samples
and a figure of pangenome fluidity with shaded regions showing total standard error with a
exponential regression fit.
Notes
1. This will only work if you have at least 5 isolates to make up your pangenome.
2. If you have 5 isolates your graph will probably not look pretty as it's difficult
to fit with such a low number of samples.
'''
import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Pool
from itertools import combinations
from collections import OrderedDict
from collections.abc import Iterable
from scipy.optimize import curve_fit, differential_evolution
rundir = os.getcwd()
parser = argparse.ArgumentParser(
usage='./%(prog)s [options] -i orthogroups -o output_folder',
description = ''' Performs multiple bootstraps and calculates genome fluidity
from a pangenome dataset (orthogroups).''',
epilog = """Written by Stephen A. Wyka (2019)""",
formatter_class = MyFormatter)
parser.add_argument(
'-i',
'--input',
required = True,
help = 'Orthogroups file, see format in READ.me',
metavar=''
)
parser.add_argument(
'-o',
'--out',
required = True,
help = 'Output folder',
metavar=''
)
parser.add_argument(
'-c',
'--cpus',
type=int,
default=1,
help = 'Number of cores to use for multiprocessing [default: 1]',
metavar=''
)
parser.add_argument(
'-max_sub',
'--max_subsamples',
type=int,
default=250000,
help = 'Max number of subsamples to run on N genomes sampled. [default: 250000]',
metavar=''
)
parser.add_argument(
'--max_off',
action='store_true',
help = 'Turn off the max subsamples. This will cause the script sample ALL possible combinations'\
'for N genomes',
)
parser.add_argument(
'-p',
'--prefix',
help = 'Prefix to append to the result files (such as Genus, species, etc.)',
metavar=''
)
args=parser.parse_args()
if not os.path.isdir(args.out):
os.makedirs(os.path.join(args.out))
result_dir = os.path.abspath(os.path.join(rundir, args.out))
if args.input:
input_file = os.path.abspath(args.input)
else:
print('ERROR: No orthogroups file was provided please provide on, -i or --input')
sys.exit()
if args.prefix:
fluid_results = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.png'))
else:
fluid_results = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.png'))
def create_ortho_dictionary(ortho_file): # create dictionary of gene clusters and isolates per cluster
'''Genereate dictionary of Orthogroups.'''
print('Creating ortholog dictionary')
ortho_isolates_dict = OrderedDict() # {Protein Cluster : list of isolates represented in cluster}
with open(ortho_file, 'r') as infile:
ortho_list = [item.strip() for item in sorted(infile)]
for line in ortho_list:
iso_list = []
if ':' in line:
cluster, genes = line.split(':')
elif '\t' in line:
cluster, genes = line.split('\t', 1)
else:
cluster, genes = line.split(' ', 1)
for match in re.finditer(r'([^\s]+)', genes):
isolate = match.group(0).split('_')[0]
iso_list.append(isolate)
ortho_isolates_dict[cluster] = list(set(iso_list))
return ortho_isolates_dict
def create_pair_dictionary(ortho_dictionary):
'''Create all possible unique pairs of isolates and get their unique
sum gene clusters.'''
print('Creating dictionary of paired ratio values')
pair_dict = {} # {(Isolate1, Isolate2) : [ratio of sum(unique clusters)/sum(all clusters)]}
for i in range(0, len(iso_list)):
for x in range(0, len(iso_list)):
if not iso_list[i] == iso_list[x]:
pair = tuple(sorted([iso_list[i], iso_list[x]]))
if not pair in pair_dict.keys():
cogs = {'Shared' : 0, 'Uk' : 0, 'Ul' : 0}
for k,v in ortho_dictionary.items():
if pair[0] in v and pair[1] in v:
cogs['Shared'] += 1
elif pair[0] in v and pair[1] not in v:
cogs['Uk'] += 1
elif pair[0] not in v and pair[1] in v:
cogs['Ul'] += 1
else:
pass # don't need to count a cluster if both isolates are not present
unique_pair = cogs['Uk'] + cogs['Ul']
all_pair = (cogs['Uk'] + cogs['Shared']) + (cogs['Ul'] + cogs['Shared'])
pair_dict[pair] = unique_pair/all_pair
return pair_dict
def compute_fluidity_all_genomes():
'''
Computes the fluidity and variance for the pangenome in question from the max number
of genomes in the pangenome.
'''
N = iso_num
fluidity_list = [ratio for ratio in pair_dict.values()] # list of ratios
pangenome_fluidity = (2/(N*(N-1)))*sum(fluidity_list) # get fluidity from average of all ratios
jack_samples = list(combinations(iso_list, N - 1)) # get list of all combos of N-1 from max num of genomes
fluidity_i_list = []
for sample in jack_samples:
jack_pairs = tuple(combinations(sample,2)) # get all pairs from current jackknife sample
jack_sample_fluidity = [pair_dict[tuple(sorted(p))] for p in jack_pairs] # get ratios from pair_dict
fluidity_i = (2/((N-1)*(N-2)))*sum(jack_sample_fluidity) # calculate fluidity_i
fluidity_i_list.append(fluidity_i)
fluidity_i_mean = np.mean(fluidity_i_list) # calculate fluidity_i_mean from all fluidity_i's
fluidity_variance = ((N-1)/N)*sum([(i-fluidity_i_mean)**2 for i in fluidity_i_list]) # calculate variance
return pangenome_fluidity, fluidity_variance
def subsample_multiprocess(combo_list):
'''
Takes portions of the full combo_list and runs them on separate threads for faster processing.
Calcualtes fluidity for each sample and returns list of fluidities.
'''
N = len(combo_list[0]) # get N from number of genomes present
sample_process_list = []
for sample in combo_list:
pairs = tuple(combinations(sample,2))
pair_fluidity_list = [pair_dict[tuple(sorted(p))] for p in pairs]
sample_fluidity = (2/(N*(N-1)))*sum(pair_fluidity_list)
sample_process_list.append(sample_fluidity)
return sample_process_list
def genome_subsamples_fluidities(perm_list):
'''
Compute fluidities from all possible combinations of genomes from 3 to N randomly sampled genomes
(N is the max number of gneomes in sample, so only sampled once). Has a cut off of max subsamples
at which point variances are calcualted as sample variances (n-1) instead of full population
variances.
'''
sub_fluid_dict = {} # {N genomes sampled : [list of fluidities from subsamples]}
for N in range(3, iso_num + 1):
sub_fluid_dict[N] = []
N_combos = list(combinations(iso_list, N))
if args.max_off:
combos = N_combos
else:
if len(N_combos) > args.max_subsamples:
combos = random.choices(N_combos, k=args.max_subsamples)
perm_list.append(N)
else:
combos = N_combos
print('Performing fluidity calculations on {} subsample combinations of {} genomes'.format(len(combos),N))
if not len(N_combos) == 1:
chunk = round(len(combos)/args.cpus)
split_combos = [combos[i:i + chunk] for i in range(0, len(combos), chunk)]
pool = Pool(processes=args.cpus)
results = pool.imap(subsample_multiprocess, split_combos)
pool.close()
pool.join()
sub_fluid_dict[N].append(results)
else:
last_run = subsample_multiprocess(N_combos)
sub_fluid_dict[N].append(last_run)
sub_fluid_dict[N]=list(flatten(sub_fluid_dict[N]))
print(len(sub_fluid_dict[N]))
return sub_fluid_dict
if __name__ == "__main__":
ortho_dict = create_ortho_dictionary(input_file)
iso_num = max([len(v) for v in ortho_dict.values()])
iso_list = list(set(itertools.chain.from_iterable([v for v in ortho_dict.values() if len(v) == iso_num])))
pair_dict = create_pair_dictionary(ortho_dict)
pan_results = compute_fluidity_all_genomes()
pan_fluidity = pan_results[0]
pan_variance = pan_results[1]
permutation_list = []
sub_fluid_dict = genome_subsamples_fluidities(permutation_list)
create_fluidity_results(fluid_fig, fluid_results)
| 46.322388 | 135 | 0.669738 |
fc100b64b37cc26f7af79a394d9e388ede43f204
| 7,610 |
py
|
Python
|
osvolbackup/backup.py
|
CCSGroupInternational/osvolbackup
|
d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5
|
[
"Apache-2.0"
] | 1 |
2019-02-27T12:59:49.000Z
|
2019-02-27T12:59:49.000Z
|
osvolbackup/backup.py
|
CCSGroupInternational/osvolbackup
|
d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5
|
[
"Apache-2.0"
] | 4 |
2019-03-07T09:31:51.000Z
|
2019-03-12T15:19:40.000Z
|
osvolbackup/backup.py
|
CCSGroupInternational/osvolbackup
|
d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5
|
[
"Apache-2.0"
] | null | null | null |
#
# This module provides the Instance class that encapsulate some complex server instances related operations
#
from __future__ import print_function
from json import loads
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from cinderclient import client as cinder_client
from osvolbackup.server import ServerInstance, ServerNotFound
from osvolbackup.osauth import get_session, VERSION
from osvolbackup.verbose import vprint
from time import time, sleep
| 42.044199 | 107 | 0.642181 |
fc107e595b21342f82e5161a579e155e45e95a50
| 13,314 |
py
|
Python
|
gammapy/estimators/profile.py
|
JohannesBuchner/gammapy
|
48769519f04b7df7b3e4580ebb61396445790bc3
|
[
"BSD-3-Clause"
] | 1 |
2021-02-02T21:35:27.000Z
|
2021-02-02T21:35:27.000Z
|
gammapy/estimators/profile.py
|
kabartay/gammapy
|
015206d2418b1d254f1c9d3ea819ab0c5ece99e9
|
[
"BSD-3-Clause"
] | 2 |
2018-08-09T20:49:13.000Z
|
2019-01-23T17:30:49.000Z
|
gammapy/estimators/profile.py
|
kabartay/gammapy
|
015206d2418b1d254f1c9d3ea819ab0c5ece99e9
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tools to create profiles (i.e. 1D "slices" from 2D images)."""
import numpy as np
import scipy.ndimage
from astropy import units as u
from astropy.convolution import Box1DKernel, Gaussian1DKernel
from astropy.coordinates import Angle
from astropy.table import Table
from .core import Estimator
__all__ = ["ImageProfile", "ImageProfileEstimator"]
# TODO: implement measuring profile along arbitrary directions
# TODO: think better about error handling. e.g. MC based methods
def normalize(self, mode="peak"):
"""Normalize profile to peak value or integral.
Parameters
----------
mode : ['integral', 'peak']
Normalize image profile so that it integrates to unity ('integral')
or the maximum value corresponds to one ('peak').
Returns
-------
profile : `ImageProfile`
Normalized image profile.
"""
table = self.table.copy()
profile = self.table["profile"]
if mode == "peak":
norm = np.nanmax(profile)
elif mode == "integral":
norm = np.nansum(profile)
else:
raise ValueError(f"Invalid normalization mode: {mode!r}")
table["profile"] /= norm
if "profile_err" in table.colnames:
table["profile_err"] /= norm
return self.__class__(table)
| 31.928058 | 84 | 0.570828 |
fc109f21dbb2efc4b477a59e275c911d6c56316e
| 221 |
py
|
Python
|
ABC/abc001-abc050/abc007/b.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 2 |
2020-06-12T09:54:23.000Z
|
2021-05-04T01:34:07.000Z
|
ABC/abc001-abc050/abc007/b.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 961 |
2020-06-23T07:26:22.000Z
|
2022-03-31T21:34:52.000Z
|
ABC/abc001-abc050/abc007/b.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
if __name__ == '__main__':
main()
| 13 | 48 | 0.466063 |
fc1121d14735ee8c8c982d686f96751beb66af86
| 7,270 |
py
|
Python
|
env/lib/python3.8/site-packages/versatileimagefield/mixins.py
|
crimergio/linux_test
|
5e688a06884ab10b4eaaad10a5d0df417a1c9b31
|
[
"CC-BY-4.0"
] | 1 |
2021-04-07T16:25:20.000Z
|
2021-04-07T16:25:20.000Z
|
env/lib/python3.8/site-packages/versatileimagefield/mixins.py
|
crimergio/linux_test
|
5e688a06884ab10b4eaaad10a5d0df417a1c9b31
|
[
"CC-BY-4.0"
] | 9 |
2021-03-19T03:06:53.000Z
|
2022-03-12T00:37:04.000Z
|
myvenv/lib/python3.6/site-packages/versatileimagefield/mixins.py
|
yog240597/saleor
|
b75a23827a4ec2ce91637f0afe6808c9d09da00a
|
[
"CC-BY-4.0"
] | 1 |
2021-04-23T15:01:05.000Z
|
2021-04-23T15:01:05.000Z
|
"""versatileimagefield Field mixins."""
import os
import re
from .datastructures import FilterLibrary
from .registry import autodiscover, versatileimagefield_registry
from .settings import (
cache,
VERSATILEIMAGEFIELD_CREATE_ON_DEMAND,
VERSATILEIMAGEFIELD_SIZED_DIRNAME,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
from .validators import validate_ppoi
autodiscover()
filter_regex_snippet = r'__({registered_filters})__'.format(
registered_filters='|'.join([
key
for key, filter_cls in versatileimagefield_registry._filter_registry.items()
])
)
sizer_regex_snippet = r'-({registered_sizers})-(\d+)x(\d+)(?:-\d+)?'.format(
registered_sizers='|'.join([
sizer_cls.get_filename_key_regex()
for key, sizer_cls in versatileimagefield_registry._sizedimage_registry.items()
])
)
filter_regex = re.compile(filter_regex_snippet + '$')
sizer_regex = re.compile(sizer_regex_snippet + '$')
filter_and_sizer_regex = re.compile(
filter_regex_snippet + sizer_regex_snippet + '$'
)
def get_filtered_root_folder(self):
"""Return the location where filtered images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME, '')
def get_sized_root_folder(self):
"""Return the location where sized images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(VERSATILEIMAGEFIELD_SIZED_DIRNAME, folder, '')
def get_filtered_sized_root_folder(self):
"""Return the location where filtered + sized images are stored."""
sized_root_folder = self.get_sized_root_folder()
return os.path.join(
sized_root_folder,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
def delete_matching_files_from_storage(self, root_folder, regex):
"""
Delete files in `root_folder` which match `regex` before file ext.
Example values:
* root_folder = 'foo/'
* self.name = 'bar.jpg'
* regex = re.compile('-baz')
Result:
* foo/bar-baz.jpg <- Deleted
* foo/bar-biz.jpg <- Not deleted
"""
if not self.name: # pragma: no cover
return
try:
directory_list, file_list = self.storage.listdir(root_folder)
except OSError: # pragma: no cover
pass
else:
folder, filename = os.path.split(self.name)
basename, ext = os.path.splitext(filename)
for f in file_list:
if not f.startswith(basename) or not f.endswith(ext): # pragma: no cover
continue
tag = f[len(basename):-len(ext)]
assert f == basename + tag + ext
if regex.match(tag) is not None:
file_location = os.path.join(root_folder, f)
self.storage.delete(file_location)
cache.delete(
self.storage.url(file_location)
)
print(
"Deleted {file} (created from: {original})".format(
file=os.path.join(root_folder, f),
original=self.name
)
)
def delete_filtered_images(self):
"""Delete all filtered images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_root_folder(),
filter_regex
)
def delete_sized_images(self):
"""Delete all sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_sized_root_folder(),
sizer_regex
)
def delete_filtered_sized_images(self):
"""Delete all filtered sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_sized_root_folder(),
filter_and_sizer_regex
)
def delete_all_created_images(self):
"""Delete all images created from `self.name`."""
self.delete_filtered_images()
self.delete_sized_images()
self.delete_filtered_sized_images()
| 34.454976 | 90 | 0.599037 |
fc11ec393a7dcebc05211e5be317a56b62dc07c0
| 9,450 |
py
|
Python
|
differential_privacy/run_federated.py
|
HanGuo97/federated
|
7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9
|
[
"BSD-3-Clause"
] | 330 |
2020-09-14T23:10:16.000Z
|
2022-03-30T19:49:19.000Z
|
differential_privacy/run_federated.py
|
HanGuo97/federated
|
7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9
|
[
"BSD-3-Clause"
] | 52 |
2020-09-30T06:10:51.000Z
|
2022-03-31T19:25:16.000Z
|
differential_privacy/run_federated.py
|
HanGuo97/federated
|
7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9
|
[
"BSD-3-Clause"
] | 119 |
2020-09-24T04:54:46.000Z
|
2022-03-31T21:46:57.000Z
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs federated training with differential privacy on various tasks."""
import functools
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from utils import task_utils
from utils import training_utils
from utils import utils_impl
from utils.optimizers import optimizer_utils
with utils_impl.record_hparam_flags() as optimizer_flags:
# Defining optimizer flags
optimizer_utils.define_optimizer_flags('client')
optimizer_utils.define_optimizer_flags('server')
with utils_impl.record_hparam_flags() as shared_flags:
# Federated training hyperparameters
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('client_batch_size', 20, 'Batch size on the clients.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer('client_datasets_random_seed', 1,
'Random seed for client sampling.')
flags.DEFINE_integer(
'max_elements_per_client', None, 'Maximum number of '
'elements for each training client. If set to None, all '
'available examples are used.')
# Training loop configuration
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_string(
'experiment_name', None, 'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',
'Root directory for writing experiment output.')
flags.DEFINE_integer(
'rounds_per_eval', 1,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer(
'num_validation_examples', -1, 'The number of validation'
'examples to use. If set to -1, all available examples '
'are used.')
flags.DEFINE_integer('rounds_per_checkpoint', 50,
'How often to checkpoint the global model.')
with utils_impl.record_hparam_flags() as dp_flags:
# Differential privacy flags
flags.DEFINE_float(
'clip', None, 'Clip value for fixed clipping or initial clip for '
'adaptive clipping. If None, no clipping is used.')
flags.DEFINE_float('noise_multiplier', None,
'Noise multiplier. If None, non-DP aggregator is used.')
flags.DEFINE_float(
'adaptive_clip_learning_rate', None, 'Adaptive clip learning rate. If '
'None, clip adaptation is not used.')
flags.DEFINE_float('target_unclipped_quantile', 0.5,
'Target unclipped quantile.')
flags.DEFINE_boolean('uniform_weighting', False,
'Whether to weigh clients uniformly.')
# Task specification
with utils_impl.record_hparam_flags() as task_flags:
task_utils.define_task_flags()
FLAGS = flags.FLAGS
def _write_hparam_flags():
"""Returns an ordered dictionary of pertinent hyperparameter flags."""
hparam_dict = utils_impl.lookup_flag_values(shared_flags)
# Update with optimizer flags corresponding to the chosen optimizers.
opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)
opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)
opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)
hparam_dict.update(opt_flag_dict)
# Update with task flags
task_flag_dict = utils_impl.lookup_flag_values(task_flags)
hparam_dict.update(task_flag_dict)
training_utils.write_hparams_to_csv(hparam_dict, FLAGS.root_output_dir,
FLAGS.experiment_name)
if __name__ == '__main__':
app.run(main)
| 42.760181 | 91 | 0.742963 |
fc11f9759b82ea3a650e3c9261504b9c80e953f0
| 417 |
py
|
Python
|
waymo_kitti_converter/tools/visual_point_cloud.py
|
anhvth/Pseudo_Lidar_V2
|
d7a29ffc811e315df25bba2a43acf288d4ceb30e
|
[
"MIT"
] | null | null | null |
waymo_kitti_converter/tools/visual_point_cloud.py
|
anhvth/Pseudo_Lidar_V2
|
d7a29ffc811e315df25bba2a43acf288d4ceb30e
|
[
"MIT"
] | null | null | null |
waymo_kitti_converter/tools/visual_point_cloud.py
|
anhvth/Pseudo_Lidar_V2
|
d7a29ffc811e315df25bba2a43acf288d4ceb30e
|
[
"MIT"
] | null | null | null |
import open3d as o3d
import numpy as np
pc_load_pathname = '/home/caizhongang/github/waymo_kitti_converter/007283-000.bin'
pc = np.fromfile(pc_load_pathname, dtype=np.float32).reshape(-1, 3)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc)
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0,0,0])
visual = [pcd, axis]
o3d.visualization.draw_geometries(visual)
| 34.75 | 82 | 0.781775 |
fc11f9bf036f8314167de520f758c42b9fa4aa63
| 2,306 |
py
|
Python
|
designate-8.0.0/designate/tests/test_api/test_v2/test_limits.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 145 |
2015-01-02T09:35:53.000Z
|
2021-12-14T17:03:53.000Z
|
designate/tests/test_api/test_v2/test_limits.py
|
sapcc/designate
|
c3f084751006a2fe7562f137930542c4759d6fd9
|
[
"Apache-2.0"
] | 6 |
2015-03-15T00:22:27.000Z
|
2019-12-16T09:37:38.000Z
|
designate/tests/test_api/test_v2/test_limits.py
|
sapcc/designate
|
c3f084751006a2fe7562f137930542c4759d6fd9
|
[
"Apache-2.0"
] | 109 |
2015-01-13T16:47:34.000Z
|
2021-03-15T13:18:48.000Z
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from designate.tests.test_api.test_v2 import ApiV2TestCase
| 41.927273 | 76 | 0.667823 |
fc1210baa0e8a8267a154dad6a47b17fe2942673
| 1,696 |
py
|
Python
|
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/scenegraph/nodepath.py
|
alexus37/AugmentedRealityChess
|
7f600ad153270feff12aa7aa86d7ed0a49ebc71c
|
[
"MIT"
] | 1 |
2015-07-12T07:24:17.000Z
|
2015-07-12T07:24:17.000Z
|
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/scenegraph/nodepath.py
|
alexus37/AugmentedRealityChess
|
7f600ad153270feff12aa7aa86d7ed0a49ebc71c
|
[
"MIT"
] | null | null | null |
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/scenegraph/nodepath.py
|
alexus37/AugmentedRealityChess
|
7f600ad153270feff12aa7aa86d7ed0a49ebc71c
|
[
"MIT"
] | 1 |
2016-02-19T21:55:53.000Z
|
2016-02-19T21:55:53.000Z
|
"""node-path implementation for OpenGLContext
"""
from vrml.vrml97 import nodepath, nodetypes
from vrml.cache import CACHE
from OpenGLContext import quaternion
from OpenGL.GL import glMultMatrixf
| 32 | 69 | 0.630896 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.