blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1eb7d4b356ecdfbafd7359821f946512d7724998
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/googlecloudsdk/generated_clients/apis/artifactregistry/v1beta2/resources.py
|
1c5440583e39b379a1c8a68cde0b2d6841f35146
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 |
NOASSERTION
| 2022-10-29T20:49:13 | 2021-02-02T05:47:30 |
Python
|
UTF-8
|
Python
| false | false | 3,295 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for Cloud Platform Apis generated from apitools."""
import enum
BASE_URL = 'https://artifactregistry.googleapis.com/v1beta2/'
DOCS_URL = 'https://cloud.google.com/artifacts/docs/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
['projectsId'],
True
)
PROJECTS_LOCATIONS = (
'projects.locations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_OPERATIONS = (
'projects.locations.operations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/operations/'
'{operationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES = (
'projects.locations.repositories',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_FILES = (
'projects.locations.repositories.files',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/files/{filesId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES = (
'projects.locations.repositories.packages',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/packages/{packagesId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES_TAGS = (
'projects.locations.repositories.packages.tags',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/packages/{packagesId}/tags/{tagsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES_VERSIONS = (
'projects.locations.repositories.packages.versions',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/packages/{packagesId}/versions/{versionsId}',
},
['name'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
|
[
"[email protected]"
] | |
9805ffe4daef50c8bdfe737999913fe9357c8479
|
e4da82e4beb9b1af7694fd5b49824a1c53ee59ff
|
/AutoWorkup/SEMTools/registration/averagebraingenerator.py
|
b206faa7d7b842adead8675771f35338e6d91db4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ipekoguz/BRAINSTools
|
c8732a9206525adb5779eb0c2ed97f448e2df47f
|
dc32fa0820a0d0b3bd882fa744e79194c9c137bc
|
refs/heads/master
| 2021-01-18T08:37:03.883250 | 2013-05-14T21:08:33 | 2013-05-14T21:08:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,709 |
py
|
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class AverageBrainGeneratorInputSpec(CommandLineInputSpec):
inputDirectory = File(desc="Image To Warp", exists=True, argstr="--inputDirectory %s")
templateVolume = File(desc="Reference image defining the output space", exists=True, argstr="--templateVolume %s")
resolusion = traits.Str(desc="The resolusion.", argstr="--resolusion %s")
iteration = traits.Str(desc="The iteration.", argstr="--iteration %s")
pixelType = traits.Enum("uchar", "short", "ushort", "int", "uint", "float", desc="Specifies the pixel type for the input/output images", argstr="--pixelType %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Resulting deformed image", argstr="--outputVolume %s")
class AverageBrainGeneratorOutputSpec(TraitedSpec):
outputVolume = File(desc="Resulting deformed image", exists=True)
class AverageBrainGenerator(SEMLikeCommandLine):
"""title: Average Brain Generator
category: Registration
description:
This programs creates synthesized average brain.
version: 0.1
documentation-url: http:://mri.radiology.uiowa.edu/mriwiki
license: NEED TO ADD
contributor: This tool was developed by Yongqiang Zhao.
"""
input_spec = AverageBrainGeneratorInputSpec
output_spec = AverageBrainGeneratorOutputSpec
_cmd = " AverageBrainGenerator "
_outputs_filenames = {'outputVolume':'outputVolume'}
|
[
"[email protected]"
] | |
4ff8a625e52e7a2fc0f40fd40fdb70a36086c6e2
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/sunData/SType/ST_facets/ST_facets00201m/ST_facets00201m9_p.py
|
6b09bb1b8dd9512268b76bbd79e2c658e0d3fc7d
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| 2023-07-25T14:19:04 | 2020-02-10T21:59:47 |
Python
|
UTF-8
|
Python
| false | false | 139 |
py
|
from output.models.sun_data.stype.st_facets.st_facets00201m.st_facets00201m9_xsd.st_facets00201m9 import Test
obj = Test(
value=10
)
|
[
"[email protected]"
] | |
e6dfd9cb391b1dc09795b1911c78d7980a0ff1ee
|
b7f45072d056b80ed49e6bcde91877d8576e970d
|
/ImageJ/py/Wayne-blob-example.py
|
610a35e6e5ddb80455ce608015ed6b1efdfc7ff2
|
[] |
no_license
|
jrminter/tips
|
128a18ee55655a13085c174d532c77bcea412754
|
f48f8b202f8bf9e36cb6d487a23208371c79718e
|
refs/heads/master
| 2022-06-14T08:46:28.972743 | 2022-05-30T19:29:28 | 2022-05-30T19:29:28 | 11,463,325 | 5 | 8 | null | 2019-12-18T16:24:02 | 2013-07-17T00:16:43 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 321 |
py
|
from org.python.core import codecs
codecs.setDefaultEncoding('utf-8')
import os
from ij import IJ, WindowManager
IJ.run("Close All")
img = IJ.openImage("http://wsr.imagej.net/images/blobs.gif")
IJ.setAutoThreshold(img, "Default")
IJ.run(img, "Analyze Particles...", " show=[Bare Outlines] include in_situ")
img.show()
|
[
"[email protected]"
] | |
59accba5a656d5b413c7c3ad528bee9b9a83ad95
|
9025c27655e2f150d01e64ce0826df8166ac6813
|
/core/urls.py
|
a1c84250501f6e331d1daaab5d0a66f5b2db6bbf
|
[] |
no_license
|
kairat3/bella-plain
|
02dd219f6bf087c99772490a32d61cd242a18f28
|
1950fd46dc53b800461f6077af3044bdfcf8300c
|
refs/heads/master
| 2023-07-13T05:06:17.575811 | 2021-08-19T14:05:29 | 2021-08-19T14:05:29 | 393,064,884 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,452 |
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from product.views import ProductApiView
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Bella API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="[email protected]"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
router = DefaultRouter()
router.register('products', ProductApiView)
urlpatterns = [
path('', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('docs/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('', include('account.urls')),
path('', include('product.urls')),
path('admin/', admin.site.urls),
path('', include(router.urls)),
path('', include('info.urls')),
path('', include('news.urls')),
path('', include('cart.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
50363bbf710a2b67812e488531ed086fe0b32138
|
d40fbefbd5db39f1c3fb97f17ed54cb7b6f230e0
|
/datadog_checks_dev/datadog_checks/dev/tooling/config.py
|
7d63ecb7890e8d4df068f1419c36389ea8bb11bc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
slightilusion/integrations-core
|
47a170d791e809f3a69c34e2426436a6c944c322
|
8f89e7ba35e6d27c9c1b36b9784b7454d845ba01
|
refs/heads/master
| 2020-05-20T18:34:41.716618 | 2019-05-08T21:51:17 | 2019-05-08T21:51:17 | 185,708,851 | 2 | 0 |
BSD-3-Clause
| 2019-05-09T02:05:19 | 2019-05-09T02:05:18 | null |
UTF-8
|
Python
| false | false | 3,143 |
py
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from collections import OrderedDict, deque
from copy import deepcopy
import toml
from appdirs import user_data_dir
from atomicwrites import atomic_write
from six import string_types
from ..compat import FileNotFoundError
from ..utils import ensure_parent_dir_exists, file_exists, read_file
APP_DIR = user_data_dir('dd-checks-dev', '')
CONFIG_FILE = os.path.join(APP_DIR, 'config.toml')
SECRET_KEYS = {'dd_api_key', 'github.token', 'pypi.pass', 'trello.key', 'trello.token'}
DEFAULT_CONFIG = OrderedDict(
[
('core', os.path.join('~', 'dd', 'integrations-core')),
('extras', os.path.join('~', 'dd', 'integrations-extras')),
('agent', os.path.join('~', 'dd', 'datadog-agent')),
('repo', 'core'),
('agent6', OrderedDict((('docker', 'datadog/agent-dev:master'), ('local', 'latest')))),
('agent5', OrderedDict((('docker', 'datadog/dev-dd-agent:master'), ('local', 'latest')))),
('dd_api_key', os.getenv('DD_API_KEY')),
('github', OrderedDict((('user', ''), ('token', '')))),
('pypi', OrderedDict((('user', ''), ('pass', '')))),
('trello', OrderedDict((('key', ''), ('token', '')))),
]
)
def config_file_exists():
return file_exists(CONFIG_FILE)
def copy_default_config():
return deepcopy(DEFAULT_CONFIG)
def save_config(config):
ensure_parent_dir_exists(CONFIG_FILE)
with atomic_write(CONFIG_FILE, mode='wb', overwrite=True) as f:
f.write(toml.dumps(config).encode('utf-8'))
def load_config():
config = copy_default_config()
try:
config.update(toml.loads(read_config_file(), OrderedDict))
except FileNotFoundError:
pass
return config
def read_config_file():
return read_file(CONFIG_FILE)
def read_config_file_scrubbed():
return toml.dumps(scrub_secrets(load_config()))
def restore_config():
config = copy_default_config()
save_config(config)
return config
def update_config():
config = copy_default_config()
config.update(load_config())
# Support legacy config where agent5 and agent6 were strings
if isinstance(config['agent6'], string_types):
config['agent6'] = OrderedDict((('docker', config['agent6']), ('local', 'latest')))
if isinstance(config['agent5'], string_types):
config['agent5'] = OrderedDict((('docker', config['agent5']), ('local', 'latest')))
save_config(config)
return config
def scrub_secrets(config):
for secret_key in SECRET_KEYS:
branch = config
paths = deque(secret_key.split('.'))
while paths:
path = paths.popleft()
if not hasattr(branch, 'get'):
break
if path in branch:
if not paths:
old_value = branch[path]
if isinstance(old_value, string_types):
branch[path] = '*' * len(old_value)
else:
branch = branch[path]
else:
break
return config
|
[
"[email protected]"
] | |
beb223699fadcff443ec1b36fb64cecf67b2359c
|
b5d0a6254b54c0a778181a67bcda14cc6663e871
|
/0-notes/job-search/Cracking the Coding Interview/C10SortingSearching/questions/10.5-question.py
|
5ec618baaa19cdb2c7b27b33ac1bfb9f081b82c6
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Web-Dev-Collaborative/Lambda-Final-Backup
|
113e719a76a144b86d06f3a412afe4b02689cad7
|
e9ab84928faa8364bacd863009ae9aec01ff9d1e
|
refs/heads/master
| 2023-06-07T15:34:00.682815 | 2021-04-17T01:53:14 | 2021-04-17T01:53:14 | 358,899,122 | 0 | 0 |
MIT
| 2023-05-30T04:03:16 | 2021-04-17T14:24:53 |
JavaScript
|
UTF-8
|
Python
| false | false | 318 |
py
|
# Sparse Search
# Given a sorted array of strings that is interspersed with empty strings,
# write a method to find the location of a given string.
# EXAMPLE: INPUT: ball, {"at", "", "", "", "ball", "", "", "car", "", "", "dad", "", ""}
# OUTPUT: 4
# time complexity: O()
# space complexity: O()
|
[
"[email protected]"
] | |
a8a1af44b4ff29b22520121f30295c8ebe1d693f
|
554ec84f23825452f7692f91f742bdc81fa50e84
|
/chatbot_27549/urls.py
|
7d1264887b9b6eb6dad7fc662d8571cc66eddd66
|
[] |
no_license
|
crowdbotics-apps/chatbot-27549
|
a7806af210b6e7ccdfb3db3dbaaac9e9dcb5a5af
|
0e615cbb191a8d91e2874e7329b059193a8ad625
|
refs/heads/master
| 2023-05-26T13:30:53.116812 | 2021-05-29T07:24:50 | 2021-05-29T07:24:50 | 371,908,087 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,022 |
py
|
"""chatbot_27549 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Chatbot"
admin.site.site_title = "Chatbot Admin Portal"
admin.site.index_title = "Chatbot Admin"
# swagger
api_info = openapi.Info(
title="Chatbot API",
default_version="v1",
description="API documentation for Chatbot App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"[email protected]"
] | |
f8b918dbc080c727941fe32353727591500f3f2d
|
5c61851a03dd1ac98d03c2e98f27487f188ff00f
|
/{{cookiecutter.repo_name}}/manage.py
|
13bffdcfd10dc0e98343059f47512923a6698335
|
[
"BSD-3-Clause"
] |
permissive
|
tony/cookiecutter-flask-pythonic
|
e7208a8fc9ccbde10e541f8e657dbf4da7b388b3
|
d1274ec5d5b72cab128e593ed78de88c29bd54b5
|
refs/heads/master
| 2023-05-29T20:49:21.927268 | 2021-10-05T12:39:04 | 2021-10-05T12:39:04 | 35,064,692 | 39 | 4 | null | 2023-05-01T21:06:54 | 2015-05-04T22:52:20 |
Python
|
UTF-8
|
Python
| false | false | 1,203 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from flask_script import Manager
from {{ cookiecutter.repo_name }} import {{ cookiecutter.repo_name | capitalize }}
"""If not using Flask-Script::
app = {{ cookiecutter.repo_name | capitalize }}.from_cli(sys.argv[1:])
Does the trick for retrieving an application object using
pure argparse. But let's hook into Flask-Script's CLI argparse
instance.
"""
def app_wrapper(*args, **kwargs):
"""App factory returns the :class:`flask.Flask` via ``__call__``,
but because of the way :class:`flask_script.Manager` handles
accepting app objects, this wrapper returns the flask object directly.
:returns: Flask object build from CLI
:rtype: :class:`flask.Flask`
"""
return {{ cookiecutter.repo_name | capitalize }}.from_file(*args, **kwargs).app
manager = Manager(app_wrapper)
manager.add_option('-c', '--config', dest='config', required=False)
@manager.command
def run_server(*args, **kwargs):
{{ cookiecutter.repo_name | capitalize }}.from_file().run()
@manager.command
def testing(*args, **kwargs):
print('Run "./run-tests.py" or "python setup.py test".')
if __name__ == "__main__":
run_server()
|
[
"[email protected]"
] | |
af5d3531a0c3b27b202c1ef66223d898bd77ec13
|
008aada8c0e718e0220eabc5b54732a1e1b07f97
|
/sergeant/connector/_connector.py
|
ee1985d5cf05a1683d5b4b588c6a582648b9599b
|
[
"MIT"
] |
permissive
|
gabriel-yahav/sergeant
|
59259a92c4c072e317d82022f19b440b21d2c294
|
0de9bfb4fdca62f061d6588c6839c4491c5d4f9b
|
refs/heads/master
| 2022-09-30T04:38:48.414842 | 2020-05-26T10:28:50 | 2020-05-26T10:28:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,931 |
py
|
import typing
class Lock:
def acquire(
self,
timeout: typing.Optional[float] = None,
check_interval: float = 1.0,
ttl: int = 60,
) -> bool:
raise NotImplementedError()
def release(
self,
) -> bool:
raise NotImplementedError()
def is_locked(
self,
) -> bool:
raise NotImplementedError()
def set_ttl(
self,
ttl: int,
) -> bool:
raise NotImplementedError()
def get_ttl(
self,
) -> typing.Optional[int]:
raise NotImplementedError()
class Connector:
name: str
def key_set(
self,
key: str,
value: bytes,
) -> bool:
raise NotImplementedError()
def key_get(
self,
key: str,
) -> typing.Optional[bytes]:
raise NotImplementedError()
def key_delete(
self,
key: str,
) -> bool:
raise NotImplementedError()
def queue_pop(
self,
queue_name: str,
) -> typing.Optional[bytes]:
raise NotImplementedError()
def queue_pop_bulk(
self,
queue_name: str,
number_of_items: int,
) -> typing.List[bytes]:
raise NotImplementedError()
def queue_push(
self,
queue_name: str,
item: bytes,
priority: str = 'NORMAL',
) -> bool:
raise NotImplementedError()
def queue_push_bulk(
self,
queue_name: str,
items: typing.Iterable[bytes],
priority: str = 'NORMAL',
) -> bool:
raise NotImplementedError()
def queue_length(
self,
queue_name: str,
) -> int:
raise NotImplementedError()
def queue_delete(
self,
queue_name: str,
) -> bool:
raise NotImplementedError()
def lock(
self,
name: str,
) -> Lock:
raise NotImplementedError()
|
[
"[email protected]"
] | |
b12892a96f4b48796a35f6700c11b1ce1875c2cf
|
94c8dd4126da6e9fe9acb2d1769e1c24abe195d3
|
/test/python/circuit/library/test_phase_estimation.py
|
8bf3d15d9ea0a395cd1d2ede7c122fdb666605b4
|
[
"Apache-2.0"
] |
permissive
|
levbishop/qiskit-terra
|
a75c2f96586768c12b51a117f9ccb7398b52843d
|
98130dd6158d1f1474e44dd5aeacbc619174ad63
|
refs/heads/master
| 2023-07-19T19:00:53.483204 | 2021-04-20T16:30:16 | 2021-04-20T16:30:16 | 181,052,828 | 1 | 0 |
Apache-2.0
| 2019-06-05T15:32:13 | 2019-04-12T17:20:54 |
Python
|
UTF-8
|
Python
| false | false | 5,238 |
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test library of phase estimation circuits."""
import unittest
import numpy as np
from qiskit.test.base import QiskitTestCase
from qiskit import BasicAer, execute
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library import PhaseEstimation, QFT
from qiskit.quantum_info import Statevector
class TestPhaseEstimation(QiskitTestCase):
"""Test the phase estimation circuit."""
def assertPhaseEstimationIsCorrect(self, pec: QuantumCircuit, eigenstate: QuantumCircuit,
phase_as_binary: str):
r"""Assert that the phase estimation circuit implements the correct transformation.
Applying the phase estimation circuit on a target register which holds the eigenstate
:math:`|u\rangle` (say the last register), the final state should be
.. math::
|\phi_1\rangle \cdots |\phi_t\rangle |u\rangle
where the eigenvalue is written as :math:`e^{2\pi i \phi}` and the angle is represented
in binary fraction, i.e. :math:`\phi = 0.\phi_1 \ldots \phi_t`.
Args:
pec: The circuit implementing the phase estimation circuit.
eigenstate: The eigenstate as circuit.
phase_as_binary: The phase of the eigenvalue in a binary fraction. E.g. if the
phase is 0.25, the binary fraction is '01' as 0.01 = 0 * 0.5 + 1 * 0.25 = 0.25.
"""
# the target state
eigenstate_as_vector = Statevector.from_instruction(eigenstate).data
reference = eigenstate_as_vector
zero, one = [1, 0], [0, 1]
for qubit in phase_as_binary[::-1]:
reference = np.kron(reference, zero if qubit == '0' else one)
# the simulated state
circuit = QuantumCircuit(pec.num_qubits)
circuit.compose(eigenstate,
list(range(pec.num_qubits - eigenstate.num_qubits, pec.num_qubits)),
inplace=True)
circuit.compose(pec, inplace=True)
# TODO use Statevector for simulation once Qiskit/qiskit-terra#4681 is resolved
# actual = Statevector.from_instruction(circuit).data
backend = BasicAer.get_backend('statevector_simulator')
actual = execute(circuit, backend).result().get_statevector()
np.testing.assert_almost_equal(reference, actual)
def test_phase_estimation(self):
"""Test the standard phase estimation circuit."""
with self.subTest('U=S, psi=|1>'):
unitary = QuantumCircuit(1)
unitary.s(0)
eigenstate = QuantumCircuit(1)
eigenstate.x(0)
# eigenvalue is 1j = exp(2j pi 0.25) thus phi = 0.25 = 0.010 = '010'
# using three digits as 3 evaluation qubits are used
phase_as_binary = '0100'
pec = PhaseEstimation(4, unitary)
self.assertPhaseEstimationIsCorrect(pec, eigenstate, phase_as_binary)
with self.subTest('U=SZ, psi=|11>'):
unitary = QuantumCircuit(2)
unitary.z(0)
unitary.s(1)
eigenstate = QuantumCircuit(2)
eigenstate.x([0, 1])
# eigenvalue is -1j = exp(2j pi 0.75) thus phi = 0.75 = 0.110 = '110'
# using three digits as 3 evaluation qubits are used
phase_as_binary = '110'
pec = PhaseEstimation(3, unitary)
self.assertPhaseEstimationIsCorrect(pec, eigenstate, phase_as_binary)
with self.subTest('a 3-q unitary'):
unitary = QuantumCircuit(3)
unitary.x([0, 1, 2])
unitary.cz(0, 1)
unitary.h(2)
unitary.ccx(0, 1, 2)
unitary.h(2)
eigenstate = QuantumCircuit(3)
eigenstate.h(0)
eigenstate.cx(0, 1)
eigenstate.cx(0, 2)
# the unitary acts as identity on the eigenstate, thus the phase is 0
phase_as_binary = '00'
pec = PhaseEstimation(2, unitary)
self.assertPhaseEstimationIsCorrect(pec, eigenstate, phase_as_binary)
def test_phase_estimation_iqft_setting(self):
"""Test default and custom setting of the QFT circuit."""
unitary = QuantumCircuit(1)
unitary.s(0)
with self.subTest('default QFT'):
pec = PhaseEstimation(3, unitary)
expected_qft = QFT(3, inverse=True, do_swaps=False).reverse_bits()
self.assertEqual(pec.data[-1][0].definition, expected_qft)
with self.subTest('custom QFT'):
iqft = QFT(3, approximation_degree=2).inverse()
pec = PhaseEstimation(3, unitary, iqft=iqft)
self.assertEqual(pec.data[-1][0].definition, iqft)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
aa3069e85491124d364115e57d1a97e1ff6dbda7
|
e2589896ad0e629d933f1e9e03f9963eb922664a
|
/backend/cool_dust_27675/wsgi.py
|
297564f38beadc76f1ea37eeabd22b393dcbc0c4
|
[] |
no_license
|
crowdbotics-apps/cool-dust-27675
|
89b947ddd6c87d70febeb2af15ffab3706b6cc13
|
f2fa1d6f4206955173a2ebf1b0f824ee5d184d1a
|
refs/heads/master
| 2023-05-08T10:46:07.382608 | 2021-06-02T06:44:22 | 2021-06-02T06:44:22 | 373,066,266 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 407 |
py
|
"""
WSGI config for cool_dust_27675 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cool_dust_27675.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
b94eb3cd9714f1550d11a2faa1808f08db720be0
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/surface/storage/delete.py
|
b0dd92d45fc1d77f4de21763de0131975f546827
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 |
NOASSERTION
| 2022-10-29T20:49:13 | 2021-02-02T05:47:30 |
Python
|
UTF-8
|
Python
| false | false | 5,897 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list Cloud Storage objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.storage import storage_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.storage import expansion
from googlecloudsdk.command_lib.storage import flags
from googlecloudsdk.command_lib.storage import storage_parallel
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
@base.Hidden
@base.Deprecate(is_removed=False, warning='This command is deprecated. '
'Use `gcloud alpha storage rm` instead.')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Delete(base.Command):
"""Delete Cloud Storage objects and buckets."""
detailed_help = {
'DESCRIPTION': """\
*{command}* lets you delete Cloud Storage objects and buckets. You can
specify one or more paths (including wildcards) and all matching objects
and buckets will be deleted.
""",
'EXAMPLES': """\
To delete an object, run:
$ *{command}* gs://mybucket/a.txt
To delete all objects in a directory, run:
$ *{command}* gs://mybucket/remote-dir/*
The above command will delete all objects under remote-dir/ but not its sub-directories.
To delete a directory and all its objects and subdirectories, run:
$ *{command}* --recursive gs://mybucket/remote-dir
$ *{command}* gs://mybucket/remote-dir/**
To delete all objects and subdirectories of a directory, without deleting the directory
itself, run:
$ *{command}* --recursive gs://mybucket/remote-dir/*
or
$ *{command}* gs://mybucket/remote-dir/**
To delete all objects and directories in a bucket without deleting the bucket itself, run:
$ *{command}* gs://mybucket/**
To delete all text files in a bucket or a directory, run:
$ *{command}* gs://mybucket/*.txt
$ *{command}* gs://mybucket/remote-dir/*.txt
To go beyond directory boundary and delete all text files in a bucket or a directory, run:
$ *{command}* gs://mybucket/**/*.txt
$ *{command}* gs://mybucket/remote-dir/**/*.txt
To delete a bucket, run:
$ *{command}* gs://mybucket
You can use wildcards in bucket names. To delete all buckets with prefix of `my`, run:
$ *{command}* --recursive gs://my*
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'path',
nargs='+',
help='The path of objects and directories to delete. The path must '
'begin with gs:// and may or may not contain wildcard characters.')
parser.add_argument(
'--recursive',
action='store_true',
help='Recursively delete the contents of any directories that match '
'the path expression.')
parser.add_argument(
'--num-threads',
type=int,
hidden=True,
default=16,
help='The number of threads to use for the delete.')
flags.add_additional_headers_flag(parser)
def Run(self, args):
paths = args.path or ['gs://']
expander = expansion.GCSPathExpander()
objects, dirs = expander.ExpandPaths(paths)
if dirs and not args.recursive:
raise exceptions.RequiredArgumentException(
'--recursive',
'Source path matches directories but --recursive was not specified.')
buckets = []
dir_paths = []
for d in dirs:
obj_ref = storage_util.ObjectReference.FromUrl(d, allow_empty_object=True)
if not obj_ref.name:
buckets.append(obj_ref.bucket_ref)
dir_paths.append(d + '**')
sub_objects, _ = expander.ExpandPaths(dir_paths)
objects.update(sub_objects)
tasks = []
for o in sorted(objects):
tasks.append(storage_parallel.ObjectDeleteTask(
storage_util.ObjectReference.FromUrl(o)))
if buckets:
# Extra warnings and confirmation if any buckets will be deleted.
log.warning('Deleting a bucket is irreversible and makes that bucket '
'name available for others to claim.')
message = 'This command will delete the following buckets:\n '
message += '\n '.join([b.bucket for b in buckets])
console_io.PromptContinue(
message=message, throw_if_unattended=True, cancel_on_no=True)
# TODO(b/120033753): Handle long lists of items.
message = 'You are about to delete the following:'
message += ''.join(['\n ' + b.ToUrl() for b in buckets])
message += ''.join(['\n ' + t.obj_ref.ToUrl() for t in tasks])
console_io.PromptContinue(
message=message, throw_if_unattended=True, cancel_on_no=True)
storage_parallel.ExecuteTasks(tasks, num_threads=args.num_threads,
progress_bar_label='Deleting Files')
log.status.write(
'Deleted [{}] file{}.\n'.format(
len(tasks), 's' if len(tasks) > 1 else ''))
storage_client = storage_api.StorageClient()
for b in buckets:
storage_client.DeleteBucket(b)
log.DeletedResource(b.ToUrl(), kind='bucket')
|
[
"[email protected]"
] | |
74d689c8c85d5d2561a6abc2a06ba077a7496e0e
|
0fa82ccc0b93944c4cbb8255834b019cf16d128d
|
/Az/temp.py
|
caf3bc211fbf8fccda75e10e1fee9d32caddc4ec
|
[] |
no_license
|
Akashdeepsingh1/project
|
6ad477088a3cae2d7eea818a7bd50a2495ce3ba8
|
bdebc6271b39d7260f6ab5bca37ab4036400258f
|
refs/heads/master
| 2022-12-13T23:09:35.782820 | 2020-08-27T14:22:37 | 2020-08-27T14:22:37 | 279,722,741 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 563 |
py
|
def mincostTickets (days, costs):
dp = [0] * 366
for i in range (1,max (days)+1):
if i in days:
dp[i] = min (dp[i - 1] + costs[0], dp[i - 7] + costs[1], dp[i - 30] + costs[2])
else:
dp[i] = dp[i-1]
return dp[:max (days) + 1][-1]
def mincostTickets2( days, costs):
dp = [0]*366
for i in range(1,max(days)+1):
dp[i] = min(dp[i-1] + costs[0] , dp[i-7] + costs[1], dp[i-30] + costs[2])
return dp[:max(days)+1][-1]
days = [1,4,6,7,8,20]
costs= [2,7,15]
print (mincostTickets2 (days, costs))
|
[
"[email protected]"
] | |
114910137765ee9246494ef8b775990951da0d1f
|
b321ca6310cd84bd8603fa9685365bb2a4acc945
|
/公司真题/拼多多/phone_number.py
|
144534cc23631ee5da9b7f732598e83ae9e6c492
|
[] |
no_license
|
baixiaoyanvision/python-algorithm
|
71b2fdf7d6b57be8a2960c44160f2a7459e153ae
|
6cbb61213af8264e083af1994522929fb7711616
|
refs/heads/master
| 2020-08-27T03:41:08.332322 | 2019-10-02T13:28:49 | 2019-10-02T13:28:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,096 |
py
|
# line1 = input()
# line2 = input()
line1 = '6 5'
line2 = '787585'
N, K = [int(i) for i in line1.split()]
line2 = [int(i) for i in line2]
result = []
line2_set = set(line2)
min_money = 99999999
for val in line2_set:
sub_vals = [abs(val - number) for number in line2]
sort_sub_vals = sorted( list(range(len(sub_vals))), key=lambda x: sub_vals[x] )
pay_money = sum([sub_vals[i] for i in sort_sub_vals[:K]])
equal_val = sub_vals[sort_sub_vals[K-1]]
copy_line2 = line2[:]
for i in sort_sub_vals[:K-1]:
copy_line2[i] = val
last_change = None
for i in range(len(copy_line2)):
if abs(copy_line2[i]-val) == equal_val:
last_change = i
copy_line2[last_change] = val
copy_line2 = [str(i) for i in copy_line2]
copy_line2 = ''.join(copy_line2)
if pay_money > min_money:
continue
elif pay_money < min_money:
result = []
result.append(copy_line2)
min_money = pay_money
else:
result.append(copy_line2)
result = sorted(result)
print(min_money)
print(result[0])
|
[
"[email protected]"
] | |
c0d29ea3e56d0a9a1129476105c243a8a2566772
|
8d2a124753905fb0455f624b7c76792c32fac070
|
/pytnon-month01/周六练习-practice on saturday/独立完成/OOP-fanb-1_student_manager_system.py
|
370a4186757ac84e2f949eca27cb01e393c5348c
|
[] |
no_license
|
Jeremy277/exercise
|
f38e4f19aae074c804d265f6a1c49709fd2cae15
|
a72dd82eb2424e4ae18e2f3e9cc66fc4762ec8fa
|
refs/heads/master
| 2020-07-27T09:14:00.286145 | 2019-09-17T11:31:44 | 2019-09-17T11:31:44 | 209,041,629 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,533 |
py
|
#学生信息管理系统:
# 数据模型类:StudentModel
# 数据:编号 id,姓名 name,年龄 age,成绩 score
class StudentModel:
def __init__(self,name,age,score,id = 0):
self.name = name
self.age = age
self.score = score
self.id = id
# 逻辑控制类:StudentManagerController
# 数据:学生列表 __stu_list
# (#私有属性,提供只读)
# 行为:获取列表 stu_list,添加学生 add_student,删除学生remove_student,
# 修改学生update_student,根据成绩排序order_by_score。
class StudentManagerController:
__stu_id = 1000
def __init__(self): #函数中不需要定义行参
self.__stu_list = [] #赋值空列表
@property
def stu_list(self):
return self.__stu_list
def add_student(self,stu):
StudentManagerController.__stu_id += 1
stu.id = StudentManagerController.__stu_id
self.__stu_list.append(stu)
def remove_student(self,id):
for item in self.__stu_list:
if item.id == id:
self.__stu_list.remove(item)
return True
def update_student(self,stu):
for item in self.__stu_list:
if item.id == stu.id:
item.name = stu.name
item.age = stu.age
item.score = stu.score
return True
def order_by_score(self):
for i in range(len(self.__stu_list)-1):
for j in range(i+1,len(self.__stu_list)):
if self.__stu_list[i].score > self.__stu_list[j].score:
self.__stu_list[i],self.__stu_list[j] = self.__stu_list[j],self.__stu_list[i]
# 界面视图类:StudentManagerView
# 数据:逻辑控制对象__manager
# 行为:显示菜单__display_menu,选择菜单项__select_menu_item,入口逻辑main,
# 输入学生__input_students,输出学生__output_students,
# 删除学生__delete_student,修改学生信息__modify_student
class StudentManagerView():
def __init__(self):
self.__manager = StudentManagerController()
def __display_menu(self):
print('''
学生信息管理系统1.0
+-----------------------+
| 0)退出管理系统 |
| 1)添加学生信息 |
| 2)显示学生信息 |
| 3)删除学生信息 |
| 4)修改学生信息 |
| 5)按照成绩排序 |
+-----------------------+
''')
def main(self):
choice = None
while choice != 0:
self.__display_menu()
choice = input('请输入选项:')
if choice == '0':
print('谢谢使用,退出!')
break
elif choice == '1':
self.__input_students()
elif choice == '2':
self.__output_students()
elif choice == '3':
self.__delete_student()
elif choice == '4':
self.__modify_student()
elif choice == '5':
self.__sort_by_score()
else:
print('请重新输入选项!')
def __input_students(self):
name = input('请输入学生姓名:')
age = int(input('请输入学生年龄:'))
score = int(input('请输入学生成绩:'))
stu = StudentModel(name,age,score)
self.__manager.add_student(stu)
print('添加学生信息成功!')
def __output_students(self):
print('学生信息:')
for item in self.__manager.stu_list:
print(item.id,item.name,item.age,item.score)
def __delete_student(self):
stu_id = int(input('请输入学生编号:'))
if self.__manager.remove_student(stu_id):
print('删除学生信息成功!')
else:
print('删除学生信息失败!')
def __modify_student(self):
id = int(input('请输入需要修改的学生ID:'))
name = input('请输入修改后学生姓名:')
age = int(input('请输入修改后学生年龄:'))
score = int(input('请输入修改后学生成绩:'))
stu = StudentModel(name, age, score, id)
if self.__manager.update_student(stu):
print('修改学生信息成功!')
else:
print('修改学生信息失败!')
def __sort_by_score(self):
self.__manager.order_by_score()
print('排序成功!')
view = StudentManagerView()
view.main()
#1.测试逻辑控制代码
#测试添加学员
# manger = StudentManagerController()
# s01 = StudentModel('许瑶',18,98)
# s02 = StudentModel('许仙',16,99)
# s03 = StudentModel('小青',15,79)
# s04 = StudentModel('姐夫',15,79)
# manger.add_student(s01)
# manger.add_student(s02)
# manger.add_student(s03)
# manger.add_student(s04)
# for item in manger.stu_list:
# print(item.id,item.name,item.age,item.score)
# # #manger.stu_list列表 保存学生对象
# # print(manger.stu_list[1].name)
# # #测试删除学员
# manger.remove_student(1004)
# for item in manger.stu_list:
# print('删除后:',item.id,item.name)
# # #测试修改学员
# manger.update_student(StudentModel('娘子',19,80,1001))
# for item in manger.stu_list:
# print('修改后:',item.id,item.name,item.age,item.score)
# # #测试按成绩排序
# manger.order_by_score()
# for item in manger.stu_list:
# print('按分数升序排列:',item.id,item.name,item.age,item.score)
|
[
"[email protected]"
] | |
edbc5843172b296c275bf4d38092d8dabd6213fe
|
bd3b1eaedfd0aab45880c100b86bc4714149f5cd
|
/student/dyp1/11.py
|
c6e63aa6b223b8b5cdbb13353fe5872beeeea0a7
|
[] |
no_license
|
ophwsjtu18/ohw19f
|
a008cd7b171cd89fa116718e2a5a5eabc9f7a93e
|
96dedf53a056fbb4d07c2e2d37d502171a6554a6
|
refs/heads/master
| 2020-08-08T12:59:38.875197 | 2020-04-01T10:38:14 | 2020-04-01T10:38:14 | 213,835,959 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,178 |
py
|
import numpy as np
import cv2
capture = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('C:\\Users\\DING-DING\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('C:\\Users\\DING-DING\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\cv2\\data\\haarcascade_eye.xml')
while(True):
ret, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(gray,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
def hhh( lists ):
for (x,y,w,h) in lists:
a = x
for num in range(1,4):
for num in range(1,4):
cv2.rectangle(img,(x,y),(x+int(w/3),y+int(h/3)),(255,0,0),2)
x+=int(w/3)
x=a
y+=int(h/3)
hhh(faces)
cv2.imshow('frame',gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
34fc9717d6ba5477e1aa8e8cc9c71b46b8ee7fd2
|
2f2feae3dee5847edbf95c1eeb14e656490dae35
|
/2022/day_13_distress_signal_1.py
|
e89f9fb5f20ecbd78b7b38f8d58eca40028031af
|
[] |
no_license
|
olga3n/adventofcode
|
32597e9044e11384452410b7a7dda339faf75f32
|
490a385fb8f1c45d22deb27bf21891e193fe58a2
|
refs/heads/master
| 2023-01-07T09:19:04.090030 | 2022-12-25T13:31:22 | 2022-12-25T13:31:22 | 163,669,598 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,209 |
py
|
#!/usr/bin/env python3
import sys
import json
from typing import Iterable, List, Any, Tuple, Optional
def is_right_order(left: List[Any], right: List[Any]) -> Optional[bool]:
iter_left = iter(left)
iter_right = iter(right)
while True:
item_left = next(iter_left, None)
item_right = next(iter_right, None)
if item_left is None and item_right is None:
return None
if item_left is None:
return True
if item_right is None:
return False
if isinstance(item_left, int) and isinstance(item_right, int):
if item_left < item_right:
return True
if item_left > item_right:
return False
if item_right == item_left:
continue
if isinstance(item_left, int):
item_left = [item_left]
if isinstance(item_right, int):
item_right = [item_right]
value = is_right_order(item_left, item_right)
if value is not None:
return value
def build_pairs(data: Iterable[str]) -> Iterable[Tuple[List[Any], List[Any]]]:
buf = []
for line in data:
if not line.strip():
continue
buf.append(line)
if len(buf) == 2:
yield json.loads(buf[0]), json.loads(buf[1])
buf = []
def right_order_pairs(data: Iterable[str]) -> int:
return sum(
index + 1 for index, pair in enumerate(build_pairs(data))
if is_right_order(pair[0], pair[1])
)
def test_right_order_pairs():
data = [
'[1,1,3,1,1]',
'[1,1,5,1,1]',
'',
'[[1],[2,3,4]]',
'[[1],4]',
'',
'[9]',
'[[8,7,6]]',
'',
'[[4,4],4,4]',
'[[4,4],4,4,4]',
'',
'[7,7,7,7]',
'[7,7,7]',
'',
'[]',
'[3]',
'',
'[[[]]]',
'[[]]',
'',
'[1,[2,[3,[4,[5,6,7]]]],8,9]',
'[1,[2,[3,[4,[5,6,0]]]],8,9]'
]
assert right_order_pairs(data) == 13
def main():
data = sys.stdin
result = right_order_pairs(data)
print(result)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
9c7357576d312b577fde01d5955822e944b46c7b
|
d0f11aa36b8c594a09aa06ff15080d508e2f294c
|
/leecode/1-500/401-500/472-连接词.py
|
4edb1540db15225aeb711ca0bd0954fa23641a7b
|
[] |
no_license
|
saycmily/vtk-and-python
|
153c1fe9953fce685903f938e174d3719eada0f5
|
5045d7c44a5af5c16df5a3b72c157e9a2928a563
|
refs/heads/master
| 2023-01-28T14:02:59.970115 | 2021-04-28T09:03:32 | 2021-04-28T09:03:32 | 161,468,316 | 1 | 1 | null | 2023-01-12T05:59:39 | 2018-12-12T10:00:08 |
Python
|
UTF-8
|
Python
| false | false | 1,016 |
py
|
class Solution:
def findAllConcatenatedWordsInADict(self, words):
def search(word, pre_dict):
if len(word)==0:
return True
cur_dict = pre_dict
for i,c in enumerate(word):
cur_dict = cur_dict.get(c,None)
if not cur_dict:
return False
if '#' in cur_dict:
if search(word[i+1:], pre_dict):
return True
return False
def insert(word, cur_dict):
for c in word:
if c not in cur_dict:
cur_dict[c] = {}
cur_dict = cur_dict[c]
cur_dict['#'] ={}
words.sort(key=lambda x: len(x))
ret = []
pre_dict = {}
for word in words:
if len(word)==0:
continue
if search(word, pre_dict):
ret.append(word)
else:
insert(word, pre_dict)
return ret
|
[
"[email protected]"
] | |
713b479653ed7764eabad8e061233c7fc1086f24
|
0c2ca3b3c7f307c29f45957e87ed940c23571fae
|
/fhirclient/models/bodysite_tests.py
|
a3aaa3593967b5390640ec04095fcc47317b4e4a
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
myungchoi/client-py-1.0.3
|
49c3d15b8dfb845e7cbc933084ed5fcc37e7c4ed
|
08e4e5828fb461c105907fd454b19dfc8463aad8
|
refs/heads/master
| 2021-06-25T04:36:26.952685 | 2021-02-11T16:27:26 | 2021-02-11T16:27:26 | 209,669,881 | 0 | 0 |
NOASSERTION
| 2021-03-20T01:45:42 | 2019-09-20T00:11:10 |
Python
|
UTF-8
|
Python
| false | false | 2,663 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-03-23.
# 2016, SMART Health IT.
import os
import io
import unittest
import json
from . import bodysite
from .fhirdate import FHIRDate
class BodySiteTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("BodySite", js["resourceType"])
return bodysite.BodySite(js)
def testBodySite1(self):
inst = self.instantiate_from("bodysite-example.json")
self.assertIsNotNone(inst, "Must have instantiated a BodySite instance")
self.implBodySite1(inst)
js = inst.as_json()
self.assertEqual("BodySite", js["resourceType"])
inst2 = bodysite.BodySite(js)
self.implBodySite1(inst2)
def implBodySite1(self, inst):
self.assertEqual(inst.code.coding[0].code, "53120007")
self.assertEqual(inst.code.coding[0].display, "Arm")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Arm")
self.assertEqual(inst.description, "front of upper left arm directly below the tattoo")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://www.acmehosp.com/bodysites")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.image[0].contentType, "image/png;base64")
self.assertEqual(inst.image[0].title, "ARM")
self.assertEqual(inst.modifier[0].coding[0].code, "419161000")
self.assertEqual(inst.modifier[0].coding[0].display, "Unilateral left")
self.assertEqual(inst.modifier[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.modifier[0].text, "Left")
self.assertEqual(inst.modifier[1].coding[0].code, "261183002")
self.assertEqual(inst.modifier[1].coding[0].display, "Upper")
self.assertEqual(inst.modifier[1].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.modifier[1].text, "Upper")
self.assertEqual(inst.modifier[2].coding[0].code, "255549009")
self.assertEqual(inst.modifier[2].coding[0].display, "Anterior")
self.assertEqual(inst.modifier[2].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.modifier[2].text, "Anterior")
self.assertEqual(inst.text.status, "generated")
|
[
"[email protected]"
] | |
3351932d3d3a75e35b6b1fcbd967fa8b054bd65b
|
13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab
|
/home--tommy--mypy/mypy/lib/python2.7/site-packages/theano/sandbox/cuda/tests/test_tensor_op.py
|
cb9162354ac7fa9120cf4dd3b05d616e784e0f36
|
[
"Unlicense"
] |
permissive
|
tommybutler/mlearnpy2
|
8ec52bcd03208c9771d8d02ede8eaa91a95bda30
|
9e5d377d0242ac5eb1e82a357e6701095a8ca1ff
|
refs/heads/master
| 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 |
Unlicense
| 2022-10-15T23:32:18 | 2018-01-22T23:27:10 |
Python
|
UTF-8
|
Python
| false | false | 5,283 |
py
|
"""
This file test tensor op that should also operate on CudaNdaray.
"""
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
from nose_parameterized import parameterized
import numpy
import theano
from theano import tensor
import theano.tensor as T
import theano.tests.unittest_tools as utt
# Skip test if cuda_ndarray is not available.
import theano.sandbox.cuda as cuda
from theano.tensor.nnet.tests import test_conv3d2d
if cuda.cuda_available is False:
raise SkipTest('Optional package cuda disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def test_shape_i():
x = cuda.ftensor3()
v = cuda.CudaNdarray(numpy.zeros((3, 4, 5), dtype='float32'))
f = theano.function([x], x.shape[1])
topo = f.maker.fgraph.toposort()
assert f(v) == 4
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 1
assert isinstance(topo[0].op, T.opt.Shape_i)
def test_shape():
x = cuda.ftensor3()
v = cuda.CudaNdarray(numpy.zeros((3, 4, 5), dtype='float32'))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert numpy.all(f(v) == (3, 4, 5))
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 4
assert isinstance(topo[0].op, T.opt.Shape_i)
assert isinstance(topo[1].op, T.opt.Shape_i)
assert isinstance(topo[2].op, T.opt.Shape_i)
assert isinstance(topo[3].op, T.opt.MakeVector)
def test_softmax_optimizations():
from theano.tensor.nnet.nnet import softmax, crossentropy_categorical_1hot
x = tensor.fmatrix('x')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
op(x, one_of_n)
fgraph = theano.gof.FunctionGraph(
[x, one_of_n],
[op(softmax(x), one_of_n)])
assert fgraph.outputs[0].owner.op == op
mode_with_gpu.optimizer.optimize(fgraph)
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert fgraph.outputs[0].owner.inputs[0].owner.op == cuda.host_from_gpu
assert fgraph.outputs[0].owner.inputs[0].owner.inputs[0].owner.op == cuda.nnet.gpu_crossentropy_softmax_argmax_1hot_with_bias
def test_may_share_memory_cuda():
from theano.misc.may_share_memory import may_share_memory
a = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
b = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
na = numpy.zeros((3, 4))
nb = numpy.zeros((3, 4))
va = a.view()
vb = b.view()
ra = a.reshape((4, 3))
rb = b.reshape((4, 3))
# can't test the transpose as ta._strides = is not implemented
# manual transpose of a
# ta = a.reshape((4,3))
# ta._strides = (ta._strides[1],ta._strides[0])#not implemented
# elem_size=elem_size = numpy.zeros(0,dtype=a.dtype).dtype.itemsize
# ta.gpudata += ta.size*elem_size
for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False),
(a, na, False), (b, nb, False),
(na, b, False), (nb, a, False),
(a, va, True), (b, vb, True),
(va, b, False), (a, vb, False),
(a, ra, True), (b, rb, True),
(ra, b, False), (a, rb, False), ]:
assert may_share_memory(a_, b_) == rep
assert may_share_memory(b_, a_) == rep
# test that it raise error when needed.
for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False)]:
assert may_share_memory(a_, b_, False) == rep
assert may_share_memory(b_, a_, False) == rep
try:
may_share_memory(a_, b_)
raise Exception("An error was expected")
except TypeError:
pass
try:
may_share_memory(b_, a_)
raise Exception("An error was expected")
except TypeError:
pass
def test_deepcopy():
a = cuda.fmatrix()
a_v = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
# We force the c code to check that we generate c code
mode = theano.Mode("c", mode_with_gpu.optimizer)
f = theano.function([a], a, mode=mode)
theano.printing.debugprint(f)
out = f(a_v)
assert out is not a_v
assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
# We force the python linker as the default code should work for this op
mode = theano.Mode("py", mode_with_gpu.optimizer)
f = theano.function([a], a, mode=mode)
theano.printing.debugprint(f)
out = f(a_v)
assert out is not a_v
assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
def test_get_diagonal_subtensor_view():
test_conv3d2d.test_get_diagonal_subtensor_view(wrap=cuda.CudaNdarray)
@parameterized.expand(('valid', 'full'), utt.custom_name_func)
def test_conv3d(border_mode):
test_conv3d2d.check_conv3d(border_mode=border_mode,
mode=mode_with_gpu,
shared=cuda.shared_constructor)
|
[
"[email protected]"
] | |
1f54af48b0de5de3deb1326d6dfc2e3b9b08012e
|
7246faf9a222269ce2612613f58dc5ff19091f10
|
/baekjoon/3000~5999/4949_균형잡힌세상.py
|
69e300ec26003ff839d8917a542427b2e7f68cc4
|
[] |
no_license
|
gusdn3477/Algorithm_Study
|
87a2eb72a8488d9263a86db70dadc7944434d41d
|
3fefe1dcb40122157845ffc542f41cb097711cc8
|
refs/heads/main
| 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 598 |
py
|
a = input()
while a != '.':
poc = []
for i in range(len(a)):
if a[i] == '(' or a[i] == '[':
poc.append(a[i])
if a[i] == ')':
if not poc or poc[-1] != '(':
poc.append(a[i])
break
if poc[-1] == '(':
poc.pop()
if a[i] == ']':
if not poc or poc[-1] != '[':
poc.append(a[i])
break
if poc[-1] == '[':
poc.pop()
if not poc:
print("yes")
else:
print("no")
poc.clear()
a = input()
|
[
"[email protected]"
] | |
0d8f76b499ac816e3bd0061d7450637456aaa4d7
|
1681332a25e5130517c403bb7a860ca30506d5ea
|
/res/dlworkshop/conv_test.py
|
abb17bf25cec4bb3835f22de94b69b03e0211a02
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
dgyrt/dgyrt.github.io
|
240550826aa031323db1f64b00b36db1ac3d65df
|
fac6c1a9d10d8e87bad6e80aa96027b84975ee1d
|
refs/heads/master
| 2020-05-21T12:23:00.437395 | 2017-01-31T14:05:39 | 2017-01-31T14:05:39 | 43,422,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,173 |
py
|
"""" convnet test """
import os;
import gzip;
import cPickle as pickle;
import numpy as np;
import theano;
import theano.tensor as T;
from theano.tensor.nnet import conv;
from theano.tensor.signal import downsample;
n_epochs=100;
batch_size=100;
def relu(x):
return x*(x>1e-13);
class ConvLayer(object):
def __init__(self, filter_size, num_filters, num_channels, fm_size, batch_size, **kwargs):
self.filter_size=filter_size;
self.num_filters=num_filters;
self.num_channels=num_channels;
self.fm_size=fm_size;
self.batch_size=batch_size;
super(ConvLayer, self).__init__(**kwargs);
self.initialize();
self.params=[self.filters, self.bias];
def initialize(self):
filter_shape=(self.num_filters, self.num_channels)+(self.filter_size);
self.filters=theano.shared(np.asarray(np.random.uniform(low=-0.0001,
high=0.0001,
size=filter_shape),
dtype="float32"),
borrow=True);
self.bias=theano.shared(np.asarray(np.zeros((self.num_filters, )),
dtype="float32"), borrow=True);
def apply_lin(self, X):
Y=conv.conv2d(input=X,
filters=self.filters,
image_shape=(self.batch_size, self.num_channels)+(self.fm_size),
filter_shape=(self.num_filters, self.num_channels)+(self.filter_size));
Y+=self.bias.dimshuffle('x', 0, 'x', 'x');
return Y;
class ReLUConvLayer(ConvLayer):
def __init__(self, **kwargs):
super(ReLUConvLayer, self).__init__(**kwargs);
def apply(self, X):
return relu(self.apply_lin(X));
class MaxPooling(object):
def __init__(self, pool_size):
self.pool_size=pool_size;
def apply(self, X):
return downsample.max_pool_2d(X, self.pool_size);
class Layer(object):
def __init__(self, in_dim, out_dim, W=None, b=None, **kwargs):
self.in_dim=in_dim;
self.out_dim=out_dim;
self.W=W;
self.b=b;
self.initialize();
super(Layer, self).__init__(**kwargs);
self.params=[self.W, self.b];
def initialize(self):
if self.W == None:
self.W=theano.shared(np.asarray(np.random.uniform(low=-0.0001,
high=0.0001,
size=(self.in_dim, self.out_dim)),
dtype="float32"),
borrow=True);
if self.b == None:
self.b=theano.shared(np.asarray(np.zeros((self.out_dim, )),
dtype="float32"), borrow=True);
def apply_lin(self, X):
return T.dot(X, self.W)+self.b;
class ReLULayer(Layer):
def __init__(self, **kwargs):
super(ReLULayer, self).__init__(**kwargs);
def apply(self, X):
return relu(self.apply_lin(X));
class TanhLayer(Layer):
def __init__(self, **kwargs):
super(TanhLayer, self).__init__(**kwargs);
def apply(self, X):
return T.tanh(self.apply_lin(X));
class SoftmaxLayer(Layer):
def __init__(self, **kwargs):
super(SoftmaxLayer, self).__init__(**kwargs);
def apply(self, X):
return T.nnet.softmax(self.apply_lin(X));
def predict(self, X_out):
return T.argmax(X_out, axis=1);
def error(self, X_out, Y):
return T.mean(T.neq(self.predict(X_out), Y));
# load dataset
def shared_dataset(data_xy):
data_x, data_y=data_xy;
shared_x=theano.shared(np.asarray(data_x, dtype="float32"),
borrow=True);
shared_y=theano.shared(np.asarray(data_y, dtype="float32"),
borrow=True);
return shared_x, T.cast(shared_y, "int32");
def load_mnist(dataset):
f=gzip.open(dataset, 'rb');
train_set, valid_set, test_set=pickle.load(f);
f.close();
train_set_x, train_set_y=shared_dataset(train_set);
valid_set_x, valid_set_y=shared_dataset(valid_set);
test_set_x, test_set_y=shared_dataset(test_set);
return [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)];
dataset=load_mnist("mnist.pkl.gz");
train_set_x, train_set_y=dataset[0];
valid_set_x, valid_set_y=dataset[1];
test_set_x, test_set_y=dataset[2];
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_valid_batches=valid_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print n_train_batches
print n_valid_batches
print n_test_batches
print "dataset loaded"
# build mode
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
images=X.reshape((batch_size, 1, 28, 28));
### configure some layers
### build some convlayers
layer_0=ReLUConvLayer(filter_size=(7,7), num_filters=10, num_channels=1,
fm_size=(28, 28), batch_size=batch_size);
pool_0=MaxPooling((2,2));
layer_1=ReLUConvLayer(filter_size=(4,4), num_filters=10, num_channels=10,
fm_size=(11,11), batch_size=batch_size);
pool_1=MaxPooling((2,2));
layer_2=ReLULayer(in_dim=160, out_dim=100);
layer_3=SoftmaxLayer(in_dim=100, out_dim=10);
### compile some model
out=pool_1.apply(layer_1.apply(pool_0.apply(layer_0.apply(images))))
out=out.flatten(ndim=2);
out=layer_3.apply(layer_2.apply(out));
cost=T.nnet.categorical_crossentropy(out, y).mean();
params=layer_0.params+layer_1.params+layer_2.params+layer_3.params;
#### calculate the updates of each params
gparams=T.grad(cost, params);
from collections import OrderedDict;
updates=OrderedDict();
for param, gparam in zip(params, gparams):
updates[param]=param-0.01*gparam;
train=theano.function(inputs=[idx],
outputs=cost,
updates=updates,
givens={X: train_set_x[idx*batch_size: (idx+1)*batch_size],
y: train_set_y[idx*batch_size: (idx+1)*batch_size]});
test=theano.function(inputs=[idx],
outputs=layer_3.error(out, y),
givens={X: test_set_x[idx*batch_size: (idx+1)*batch_size],
y: test_set_y[idx*batch_size: (idx+1)*batch_size]});
print "the model is built :)"
# train the model
test_record=np.zeros((n_epochs, 1));
epoch=0;
while (epoch<n_epochs):
epoch+=1;
for minibatch_index in xrange(n_train_batches):
mlp_train_cost=train(minibatch_index);
iteration=(epoch-1)*n_train_batches+minibatch_index;
if (iteration+1)%n_train_batches==0:
print "MLP model";
test_cost=[test(i) for i in xrange(n_test_batches)];
test_record[epoch-1]=np.mean(test_cost);
print " epoch %i, test error %f %%" % (epoch, test_record[epoch-1]*100.);
|
[
"[email protected]"
] | |
36297de68d4dda62481025cf1bbce659d0ce664f
|
3b89c0a97ac6b58b6923a213bc8471e11ad4fe69
|
/python/CodingExercises/CheckSecondStringFormedFirstString.py
|
82a8ded99bb4b608b37b268d47ca9e6f94271932
|
[] |
no_license
|
ksayee/programming_assignments
|
b187adca502ecf7ff7b51dc849d5d79ceb90d4a6
|
13bc1c44e1eef17fc36724f20b060c3339c280ea
|
refs/heads/master
| 2021-06-30T07:19:34.192277 | 2021-06-23T05:11:32 | 2021-06-23T05:11:32 | 50,700,556 | 1 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,218 |
py
|
'''
Check whether second string can be formed from characters of first string
Given two strings str1 and str2, check if str2 can be formed from str1
Example :
Input : str1 = geekforgeeks, str2 = geeks
Output : Yes
Here, string2 can be formed from string1.
Input : str1 = geekforgeeks, str2 = and
Output : No
Here string2 cannot be formed from string1.
Input : str1 = geekforgeeks, str2 = geeeek
Output : Yes
Here string2 can be formed from string1
as string1 contains 'e' comes 4 times in
string2 which is present in string1.
'''
import collections
def CheckSecondStringFormedFirstString(str1,str2):
dict1=collections.Counter(str1)
dict2=collections.Counter(str2)
for key,val in dict2.items():
if key in dict1.keys() and dict1[key]>0:
dict1[key]=dict1.get(key)-1
else:
return False
return True
def main():
str1='geekforgeeks'
str2='geeks'
print(CheckSecondStringFormedFirstString(str1,str2))
str1 = 'geekforgeeks'
str2 = 'and'
print(CheckSecondStringFormedFirstString(str1, str2))
str1 = 'geekforgeeks'
str2 = 'geeeek'
print(CheckSecondStringFormedFirstString(str1, str2))
if __name__=='__main__':
main()
|
[
"[email protected]"
] | |
9796214d25e80f9655fb1910bc028c1969ce3aca
|
1d8535658ed07fc88558c7d9bf3a01b709f189b1
|
/src/reversion/migrations/0001_initial.py
|
986fd81ac986f7c87b8babac57ae6a6c0bfa701a
|
[
"BSD-2-Clause"
] |
permissive
|
druids/django-reversion
|
ebedc4debe3ffc611f9e2bf72a04f388274502a0
|
d80a24b6a195c8a68bfc3100ba533419226fa18d
|
refs/heads/master
| 2020-12-25T08:50:58.658410 | 2018-06-10T20:19:42 | 2018-06-10T20:19:42 | 40,229,843 | 0 | 3 |
NOASSERTION
| 2020-04-09T13:16:57 | 2015-08-05T06:56:01 |
Python
|
UTF-8
|
Python
| false | false | 2,506 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Revision',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('manager_slug', models.CharField(default='default', max_length=200, db_index=True)),
('date_created', models.DateTimeField(auto_now_add=True, help_text='The date and time this revision was created.', verbose_name='date created', db_index=True)),
('comment', models.TextField(help_text='A text comment on this revision.', verbose_name='comment', blank=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, help_text='The user who created this revision.', null=True, verbose_name='user')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.TextField(help_text='Primary key of the model under version control.')),
('object_id_int', models.IntegerField(help_text="An indexed, integer version of the stored model's primary key, used for faster lookups.", null=True, db_index=True, blank=True)),
('format', models.CharField(help_text='The serialization format used by this model.', max_length=255)),
('serialized_data', models.TextField(help_text='The serialized form of this version of the model.')),
('object_repr', models.TextField(help_text='A string representation of the object.')),
('content_type', models.ForeignKey(help_text='Content type of the model under version control.', to='contenttypes.ContentType')),
('revision', models.ForeignKey(help_text='The revision that contains this version.', to='reversion.Revision')),
],
options={
},
bases=(models.Model,),
),
]
|
[
"[email protected]"
] | |
290c90e1ec3e9aea7039b80484a81718c05d1dfb
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_shelled.py
|
92293753951a72a46ead1e9e801bf3e2ad1a351b
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 233 |
py
|
from xai.brain.wordbase.nouns._shell import _SHELL
#calss header
class _SHELLED(_SHELL, ):
def __init__(self,):
_SHELL.__init__(self)
self.name = "SHELLED"
self.specie = 'nouns'
self.basic = "shell"
self.jsondata = {}
|
[
"[email protected]"
] | |
ca58b1ce2b21900200329d5dbd2507235c210435
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03853/s066137758.py
|
acb2ac42342d566e74d51b19e21c6c91f5ab7f87
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
py
|
num = input().split()
hei = int(num[0])
wei = int(num[1])
photo = []
for i in range(hei):
temp = input()
temp = list(temp)
photo.append(temp)
photo.append(temp)
for i in range(hei*2):
for j in range(wei):
print(photo[i][j],end="")
print("\n",end="")
|
[
"[email protected]"
] | |
a0a3e8f4dab8d2d3cc6497f8b4e8c5507e50f494
|
9497432cd07d17be15853544197853d1ae7ae472
|
/encryption files/hashing/sha384hash.py
|
1880fd67f6fa014e3adfcf43b48c4f4a11238ba8
|
[] |
no_license
|
SeresAdrian/Crypto-Project
|
e99be9c2bf9155e1a54be4419d5626633fd2b333
|
4c2fd709f667bdfa71bc5fadd9b47a1c79f59c6a
|
refs/heads/master
| 2022-07-25T13:54:46.704949 | 2020-05-18T19:40:42 | 2020-05-18T19:40:42 | 265,021,044 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 194 |
py
|
#!/usr/bin/python
import hashlib
string=input("Please enter tha plaintext : ")
result = hashlib.sha384(string.encode())
print("The hexadecimal equivalent of hash is : ", result.hexdigest())
|
[
"[email protected]"
] | |
9b8ffd02c0680421820d9d17d7078ba7ee1365ba
|
ce8bb40bf2b688f19ab8bcc20cfd58994413bc0f
|
/ajax/ajax_mysite/app01/views.py
|
b372bd95be6a215aa5b89bd42d3acb0b23b5da03
|
[] |
no_license
|
Fover21/project1
|
457f452d7f6e7ecbfc81a18512377ebc5457f3f6
|
84d596caf5701d7d76eee8c50f61bcb6150c57f2
|
refs/heads/master
| 2020-03-24T20:01:51.506348 | 2018-12-26T06:07:45 | 2018-12-26T06:07:45 | 142,955,917 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,389 |
py
|
from django.shortcuts import render, HttpResponse, reverse, redirect
# Create your views here.
from django.views.decorators.csrf import csrf_exempt, csrf_protect
@csrf_exempt # 排除
def login(request):
return render(request, 'login.html')
def index(request):
i1, i2, i3 = '', '', ''
if request.method == 'POST':
i1 = int(request.POST.get('i1'))
i2 = int(request.POST.get('i2'))
i3 = i1 + i2
return render(request, 'index.html', {
'i1': i1,
'i2': i2,
'i3': i3,
})
# from django.views.decorators.csrf import ensure_csrf_cookie 全局的第二中配置方法
# @csrf_exempt
def calc(request):
# csrfmiddlewaretoken = request.POST.get('csrfmiddlewaretoken')
# print(csrfmiddlewaretoken)
i1 = int(request.POST.get('i1'))
i2 = int(request.POST.get('i2'))
i3 = i1 + i2
print(request.POST)
return HttpResponse(i3)
# 上传
def upload(request):
if request.method == "POST":
print("FILES", request.FILES)
file_obj = request.FILES.get("file")
with open(file_obj.name, "wb") as f:
for i in file_obj.chunks():
f.write(i)
return HttpResponse("success!")
# test
def tt(request):
if request.method == "POST":
ret = reverse('uu')
print(ret)
return redirect(ret)
return render(request, 'index.html')
|
[
"[email protected]"
] | |
525faba85baf47e70bd840eb6b17b29331739083
|
0c41031269497790425702d4ad882423dc443a6a
|
/pandas14/pandas14_9.py
|
ad0ca612be2e850e77d6d818f876fb6c53ce6255
|
[] |
no_license
|
diegoami/datacamp-courses-PY
|
4c546e69241ca429adefdd459db92d617cfa0e9f
|
bab3082929fa6f1cf2fc2f2efb46d16374715b4b
|
refs/heads/master
| 2023-07-20T06:42:29.776349 | 2018-10-28T22:57:21 | 2018-10-28T22:57:21 | 92,448,198 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,599 |
py
|
import pandas as pd
# Load DataFrame from file_path: editions
medals = pd.read_csv('../data/medals_2.csv')
# Construct the pivot_table: medal_counts
medal_counts = medals.pivot_table(index='Edition',values='Athlete',columns='NOC',aggfunc='count')
# Load DataFrame from file_path: editions
editions = pd.read_csv('../data/editions.csv')
# Set Index of editions: totals
totals = editions.set_index('Edition')
# Reassign totals['Grand Total']: totals
totals = totals['Grand Total']
# Divide medal_counts by totals: fractions
fractions = medal_counts.divide( totals, axis = 'rows' )
# Apply the expanding mean: mean_fractions
mean_fractions = fractions.expanding().mean()
# Compute the percentage change: fractions_change
fractions_change = mean_fractions.pct_change().multiply(100)
# Reset the index of fractions_change: fractions_change
fractions_change = fractions_change.reset_index()
ioc_codes = pd.read_csv('../data/country_codes.csv')
# Extract the relevant columns: ioc_codes
ioc_codes = ioc_codes[['Country', 'NOC']]
# Left join editions and ioc_codes: hosts
hosts = pd.merge(editions,ioc_codes, how='left')
# Extract relevant columns and set index: hosts
hosts = hosts[['Edition','NOC']].set_index( 'Edition')
# Reshape fractions_change: reshaped
reshaped = pd.melt(fractions_change,id_vars='Edition', value_name='Change')
# Print reshaped.shape and fractions_change.shape
print(reshaped.shape, fractions_change.shape)
# Extract rows from reshaped where 'NOC' == 'CHN': chn
chn = reshaped.loc[reshaped['NOC'] == 'CHN']
# Print last 5 rows of chn with .tail()
print(chn.tail())
|
[
"[email protected]"
] | |
15d215b500c6d26dbd37bfda3a9d73e8979c26aa
|
01af3f8a79453482febefe64d356a616abc08c1e
|
/backend/config/settings/production/third_party.py
|
c58e3e64fb67c81f95dfd14e878c6d18778211f4
|
[] |
no_license
|
by-Exist/django-skeleton
|
0ea3dbc815cb8da8417ef0f64e304715b8e5b5dd
|
4848dd1074533b368015cdde943719114d001bcc
|
refs/heads/master
| 2023-06-12T12:52:09.216952 | 2021-07-12T08:48:09 | 2021-07-12T08:48:09 | 372,245,930 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,638 |
py
|
from .django import *
# Django Storage
# =============================================================================
STATICFILES_STORAGE = "config.storages.StaticStorage"
DEFAULT_FILE_STORAGE = "config.storages.MediaStorage"
AWS_S3_REGION_NAME = "ewr1" # TODO: region, endpoint url도 environment로 관리해야 하지 않을까?
AWS_S3_ENDPOINT_URL = f"https://{AWS_S3_REGION_NAME}.vultrobjects.com/"
AWS_ACCESS_KEY_ID = env.str("DJANGO_STORAGE_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = env.str("DJANGO_STORAGE_SECRET_ACCESS_KEY")
# Django REST Framework
# =============================================================================
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK[
"DEFAULT_VERSIONING_CLASS"
] = "rest_framework.versioning.URLPathVersioning"
# DRF Spectacular
# =============================================================================
# https://drf-spectacular.readthedocs.io/en/latest/settings.html
SPECTACULAR_SETTINGS["TITLE"] = "Backend Production API"
SPECTACULAR_SETTINGS["DESCRIPTION"] = "Backend Production api description..."
SPECTACULAR_SETTINGS["VERSION"] = "0.0.1"
# https://swagger.io/docs/open-source-tools/swagger-ui/usage/configuration/
SPECTACULAR_SETTINGS["SWAGGER_UI_SETTINGS"]["supportedSubmitMethods"] = []
# Django Cachalot
# =============================================================================
# https://django-cachalot.readthedocs.io/en/latest/quickstart.html#settings
INSTALLED_APPS += ["cachalot"]
CACHES["cachalot"] = env.cache("DJANGO_CACHALOT_CACHE_URL")
CACHALOT_CACHE = "cachalot"
CACHALOT_UNCACHABLE_TABLES = ["django_migrations"]
|
[
"[email protected]"
] | |
bf8a6a3bbd710bdaa7611c6890907a61a0e9cce7
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_010/ch136_2020_04_01_12_09_01_220465.py
|
566b2a82eb758b3344edaf9b17037a14dee59e8d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,641 |
py
|
import random
dinheiro=10
dicas=True
jogo=True
chutes=True
dado1=random.randint (1,6)
dado2=random.randint (1,6)
dado3=random.randint (1,6)
soma=dado1+dado2+dado3
while dicas:
print ("Fase de dicas")
print ("Você tem {} dinheiros.".format (dinheiro))
if dinheiro==0:
dicas=False
chutes=False
print ("Você perdeu o jogo!")
else:
pergunta=str(input("Você quer uma dica?"))
if pergunta=="sim":
dinheiro=dinheiro-1
dica1=int(input("Digite o primeiro número: "))
dica2=int(input("Digite o segundo número: "))
dica3=int(input("Digite o terceiro número: "))
if dica1==soma or dica2==soma or dica3==soma:
print ("Está entre os três")
else:
print ("Não está entre os três")
elif pergunta=="não":
dicas=False
while chutes:
print ("Fase de chutes")
print ("Você tem {} dinheiros.".format (dinheiro))
if dinheiro==0:
print ("Você perdeu o jogo!")
chutes=False
else:
chute=int(input("Chute um número: "))
if chute==soma:
dinheiro=dinheiro + 5*dinheiro
print ("Você acertou!")
chutes=False
print ("Você ganhou o jogo com {} dinheiros.".format (dinheiro))
else:
print ("Você errou!")
dinheiro=dinheiro-1
if dinheiro==0:
print ("Você perdeu!")
chutes=False
|
[
"[email protected]"
] | |
69024abc125c1c0fbb26411947e1976dc81fb6e6
|
1f41b828fb652795482cdeaac1a877e2f19c252a
|
/maya_menus/_MAINMENU_PMC_Rigging/05.Put Object-(RP[N])/03.Put Controller/18.sidePin.py
|
e9ed02aaea8fe1c13e224651d6f47fb6657f251a
|
[] |
no_license
|
jonntd/mayadev-1
|
e315efe582ea433dcf18d7f1e900920f5590b293
|
f76aeecb592df766d05a4e10fa2c2496f0310ca4
|
refs/heads/master
| 2021-05-02T07:16:17.941007 | 2018-02-05T03:55:12 | 2018-02-05T03:55:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 173 |
py
|
from sgMaya import sgModel, sgCmds
sels = cmds.ls( sl=1 )
if not sels: sels = [None]
for sel in sels:
sgCmds.putControllerToGeo( sel, sgModel.Controller.sidePinPoints )
|
[
"[email protected]"
] | |
22b95182bd9050b6d8dbb6cfd970e83489eff911
|
477c8309420eb102b8073ce067d8df0afc5a79b1
|
/Applications/ParaView/Testing/Python/DisconnectAndSaveAnimation.py
|
f9f080edafa9f6c87116a65800627d5c41831290
|
[
"LicenseRef-scancode-paraview-1.2"
] |
permissive
|
aashish24/paraview-climate-3.11.1
|
e0058124e9492b7adfcb70fa2a8c96419297fbe6
|
c8ea429f56c10059dfa4450238b8f5bac3208d3a
|
refs/heads/uvcdat-master
| 2021-07-03T11:16:20.129505 | 2013-05-10T13:14:30 | 2013-05-10T13:14:30 | 4,238,077 | 1 | 0 |
NOASSERTION
| 2020-10-12T21:28:23 | 2012-05-06T02:32:44 |
C++
|
UTF-8
|
Python
| false | false | 3,862 |
py
|
#/usr/bin/env python
import QtTesting
import QtTestingImage
object1 = 'pqClientMainWindow/menubar/menuSources'
QtTesting.playCommand(object1, 'activate', 'SphereSource')
object2 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/Accept'
QtTesting.playCommand(object2, 'activate', '')
object3 = 'pqClientMainWindow/centralwidget/MultiViewManager/SplitterFrame/MultiViewSplitter/0/MultiViewFrameMenu/SplitVerticalButton'
QtTesting.playCommand(object3, 'activate', '')
QtTesting.playCommand(object1, 'activate', 'SphereSource')
object4 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/ScrollArea/qt_scrollarea_viewport/PanelArea/Editor/Center_0'
QtTesting.playCommand(object4, 'set_string', '1')
QtTesting.playCommand(object2, 'activate', '')
object5 = 'pqClientMainWindow/pipelineBrowserDock/pipelineBrowser'
QtTesting.playCommand(object5, 'currentChanged', '/0/0|0')
object6 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_tabbar'
QtTesting.playCommand(object6, 'set_tab', '1')
object7 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/1QScrollArea0/qt_scrollarea_viewport/1pqDisplayProxyEditorWidget0/Form/ViewGroup/ViewData'
QtTesting.playCommand(object7, 'set_boolean', 'true')
object8 = 'pqClientMainWindow/menubar'
QtTesting.playCommand(object8, 'activate', 'menu_View')
object8 = 'pqClientMainWindow/menubar/menu_View'
QtTesting.playCommand(object8, 'activate', 'Animation View')
object9 = 'pqClientMainWindow/animationViewDock/animationView/pqAnimationWidget/CreateDeleteWidget/PropertyCombo'
QtTesting.playCommand(object9, 'set_string', 'Start Theta')
object10 = "pqClientMainWindow/animationViewDock/animationView/1pqAnimationWidget0/1QHeaderView0"
QtTesting.playCommand(object10, "mousePress", "1,1,0,0,0,2")
QtTesting.playCommand(object10, "mouseRelease", "1,1,0,0,0,2")
object11 = 'pqClientMainWindow/VCRToolbar/1QToolButton3'
QtTesting.playCommand(object11, 'activate', '')
QtTesting.playCommand(object11, 'activate', '')
object12 = 'pqClientMainWindow/menubar/menu_File'
QtTesting.playCommand(object12, 'activate', '')
QtTesting.playCommand(object12, 'activate', 'actionFileSaveAnimation')
object13 = 'pqAnimationSettingsDialog/checkBoxDisconnect'
QtTesting.playCommand(object13, 'set_boolean', 'true')
object14 = 'pqAnimationSettingsDialog/width'
QtTesting.playCommand(object14, 'set_string', '300')
object14 = 'pqAnimationSettingsDialog/height'
QtTesting.playCommand(object14, 'set_string', '300')
object15 = 'pqAnimationSettingsDialog/okButton'
QtTesting.playCommand(object15, 'activate', '')
object16 = 'pqClientMainWindow/FileSaveAnimationDialog'
# Remove old files.
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0000.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0001.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0002.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0003.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0004.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0005.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0006.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0007.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0008.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0009.png')
QtTesting.playCommand(object16, 'filesSelected', '$PARAVIEW_TEST_ROOT/disconnectSave.png')
import time
print "Wait for 60 secs"
time.sleep(60);
QtTestingImage.compareImage('$PARAVIEW_TEST_ROOT/disconnectSave.0005.png', 'DisconnectAndSaveAnimation.png');
|
[
"[email protected]"
] | |
be33d28852484275819ace98b621bc01decf9381
|
985a874c832d7632e287f2185b18aaf2e1b42018
|
/dtcwt_gainlayer/layers/nonlinear.py
|
f3f2b274c311342e0a0b16400783156a896a9a06
|
[
"MIT"
] |
permissive
|
fbcotter/dtcwt_gainlayer
|
e2ea03ccfe8ad4f903b59846c1c902391c66b227
|
32ec3e21066edc2a0d5edefaf70f43d031d1b4ac
|
refs/heads/master
| 2023-03-28T13:08:37.919222 | 2019-08-20T09:05:46 | 2019-08-20T09:05:46 | 157,608,716 | 6 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,276 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as func
from dtcwt_gainlayer.layers.shrink import SparsifyWaveCoeffs_std, mag, SoftShrink
class PassThrough(nn.Module):
def forward(self, x):
return x
class WaveNonLinearity(nn.Module):
""" Performs a wavelet-based nonlinearity.
Args:
C (int): Number of input channels. Some of the nonlinearities have batch
norm, so need to know this.
lp (str): Nonlinearity to use for the lowpass coefficients
bp (list(str)): Nonlinearity to use for the bandpass coefficients.
lp_q (float): Quantile value for sparsity threshold for lowpass.
1 keeps all coefficients and 0 keeps none. Only valid if lp is
'softshrink_std' or 'hardshrink_std'. See
:class:`SparsifyWaveCoeffs_std`.
bp_q (float): Quantile value for sparsity threshold for bandpass
coefficients. Only valid if bp is 'softshrink_std' or
'hardshrink_std'.
The options for the lowpass are:
- none
- relu (as you'd expect)
- relu2 - applies batch norm + relu
- softshrink - applies soft shrinkage with a learnable threshold
- hardshrink_std - applies hard shrinkage. The 'std' implies that it
tracks the standard deviation of the activations, and sets a threshold
attempting to reach a desired sparsity level. This assumes that the
lowpass coefficients follow a laplacian distribution. See
:class:`dtcwt_gainlayer.layers.shrink.SparsifyWaveCoeffs_std`.
- softshrink_std - same as hardshrink std except uses soft shrinkage.
The options for the bandpass are:
- none
- relu (applied indepently to the real and imaginary components)
- relu2 - applies batch norm + relu to the magnitude of the bandpass
coefficients
- softshrink - applies shoft shrinkage to the magnitude of the bp
coefficietns with a learnable threshold
- hardshrink_std - applies hard shrinkage by tracking the standard
deviation. Assumes the bp distributions follow an exponential
distribution. See
:class:`dtcwt_gainlayer.layers.shrink.SparsifyWaveCoeffs_std`.
- softshrink_std - same as hardshrink_std but with soft shrinkage.
"""
def __init__(self, C, lp=None, bp=(None,), lp_q=0.8, bp_q=0.8):
super().__init__()
if lp is None or lp == 'none':
self.lp = PassThrough()
elif lp == 'relu':
self.lp = nn.ReLU()
elif lp == 'relu2':
self.lp = BNReLUWaveCoeffs(C, bp=False)
elif lp == 'softshrink':
self.lp = SoftShrink(C, complex=False)
elif lp == 'hardshrink_std':
self.lp = SparsifyWaveCoeffs_std(C, lp_q, bp=False, soft=False)
elif lp == 'softshrink_std':
self.lp = SparsifyWaveCoeffs_std(C, lp_q, bp=False, soft=True)
else:
raise ValueError("Unkown nonlinearity {}".format(lp))
fs = []
for b in bp:
if b is None or b == 'none':
f = PassThrough()
elif b == 'relu':
f = nn.ReLU()
elif b == 'relu2':
f = BNReLUWaveCoeffs(C, bp=True)
elif b == 'softshrink':
f = SoftShrink(C, complex=True)
elif b == 'hardshrink_std':
f = SparsifyWaveCoeffs_std(C, bp_q, bp=True, soft=False)
elif b == 'softshrink_std':
f = SparsifyWaveCoeffs_std(C, bp_q, bp=True, soft=True)
else:
raise ValueError("Unkown nonlinearity {}".format(lp))
fs.append(f)
self.bp = nn.ModuleList(fs)
def forward(self, x):
""" Applies the selected lowpass and bandpass nonlinearities to the
input x.
Args:
x (tuple): tuple of (lowpass, bandpasses)
Returns:
y (tuple): tuple of (lowpass, bandpasses)
"""
yl, yh = x
yl = self.lp(yl)
yh = [bp(y) if y.shape != torch.Size([0]) else y
for bp, y in zip(self.bp, yh)]
return (yl, yh)
class BNReLUWaveCoeffs(nn.Module):
""" Applies batch normalization followed by a relu
Args:
C (int): number of channels
bp (bool): If true, applies bn+relu to the magnitude of the bandpass
coefficients. If false, is applying bn+relu to the lowpass coeffs.
"""
def __init__(self, C, bp=True):
super().__init__()
self.bp = bp
if bp:
self.BN = nn.BatchNorm2d(6*C)
else:
self.BN = nn.BatchNorm2d(C)
self.ReLU = nn.ReLU()
def forward(self, x):
""" Applies nonlinearity to the input x """
if self.bp:
s = x.shape
# Move the orientation dimension to the channel
x = x.view(s[0], s[1]*s[2], s[3], s[4], s[5])
θ = torch.atan2(x.data[..., 1], x.data[..., 0])
r = mag(x, complex=True)
r_new = self.ReLU(self.BN(r))
y = torch.stack((r_new * torch.cos(θ), r_new * torch.sin(θ)), dim=-1)
# Reshape to a 6D tensor again
y = y.view(s[0], s[1], s[2], s[3], s[4], s[5])
else:
y = self.ReLU(self.BN(x))
return y
|
[
"[email protected]"
] | |
34a5496edaf78c200fe0a67006564fb6d0ff9b2b
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli-core/azure/cli/core/tests/test_aaz_paging.py
|
2ec14f790d3d6bce17f38400edfd9df57904a7dc
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 |
MIT
| 2023-09-14T11:11:05 | 2016-02-04T00:21:51 |
Python
|
UTF-8
|
Python
| false | false | 3,855 |
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure.cli.core.aaz import AAZUndefined
from azure.cli.core.aaz._paging import AAZPaged, AAZPageIterator
from azure.cli.core.mock import DummyCli
class TestAAZPaging(unittest.TestCase):
def test_aaz_paging_sample(self):
data_by_pages = [(['a', 'b', 'c'], 1), (['d', 'e'], 2), (['f'], 3), (['g', 'h'], AAZUndefined)]
result = {
"value": AAZUndefined,
"next_link": AAZUndefined,
}
def executor(next_link):
if next_link is None:
next_link = 0
value, next_link = data_by_pages[next_link]
result["value"] = value
result['next_link'] = next_link
def extract_result():
return result['value'], result['next_link']
paged = AAZPaged(executor=executor, extract_result=extract_result, cli_ctx=DummyCli())
self.assertTrue(list(paged) == ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
def test_aaz_paging_with_limit_and_token(self):
data_by_pages = [
(["a", "b", "c"], 1),
(["d", "e"], 2),
(["f"], 3),
(["g", "h"], AAZUndefined)
]
result = {
"value": AAZUndefined,
"next_link": AAZUndefined
}
def executor(next_link):
if next_link is None:
next_link = 0
value, next_link = data_by_pages[next_link]
result["value"] = value
result["next_link"] = next_link
def extract_result():
return result["value"], result["next_link"]
next_token = '{"next_link": 1, "offset": 1}'
paged = AAZPaged(
executor=executor, extract_result=extract_result, cli_ctx=DummyCli(),
token=next_token, limit=4
)
self.assertTrue(list(paged) == ["e", "f", "g", "h"])
def test_aaz_paging_iterator(self):
data_by_pages = [
(["a", "b", "c"], 1),
(["d", "e"], 2),
(["f"], 3),
(["g", "h"], AAZUndefined)
]
result = {
"value": AAZUndefined,
"next_link": AAZUndefined
}
def executor(next_link):
if next_link is None:
next_link = 0
value, next_link = data_by_pages[next_link]
result["value"] = value
result["next_link"] = next_link
def extract_result():
return result["value"], result["next_link"]
page_iterator = AAZPageIterator(
executor=executor, extract_result=extract_result, cli_ctx=DummyCli(),
next_link=1, offset=1, limit=4
)
# | a b c | d e | f | g h |
# *
self.assertTrue(page_iterator._next_link == 1)
self.assertTrue(page_iterator._start == 1) # offset
self.assertTrue(page_iterator._total == 5)
# | a b c | d e | f | g h |
# *
next(page_iterator)
self.assertTrue(page_iterator._next_link == 2)
self.assertTrue(page_iterator._total == 3)
# | a b c | d e | f | g h |
# *
next(page_iterator)
self.assertTrue(page_iterator._next_link == 3)
self.assertTrue(page_iterator._total == 2)
# | a b c | d e | f | g h |
# *
next(page_iterator)
self.assertTrue(page_iterator._next_link == AAZUndefined)
self.assertTrue(page_iterator._total == 0)
|
[
"[email protected]"
] | |
2a43d736e2b0bed80741d6dc401155c5fb685570
|
374aac5655cbdead72683a5e8b6e02126a024768
|
/tests/test_sqlalchemy.py
|
b05d87ce415b1c3218592d47c7af99354879f0b8
|
[
"MIT"
] |
permissive
|
naveenkumar-grofers/nplusone
|
0f51179a5a4aa717ea2b537bfa1a8e07af568ebb
|
2bcf727a73c05afa01a020993997a6a60778b872
|
refs/heads/master
| 2021-01-24T21:54:08.390445 | 2015-11-15T16:52:42 | 2015-11-15T16:52:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,041 |
py
|
# -*- coding: utf-8 -*-
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from nplusone.core import signals
import nplusone.ext.sqlalchemy # noqa
from tests import utils
from tests.utils import calls # noqa
pytest.yield_fixture(calls)
Base = declarative_base()
models = utils.make_models(Base)
@pytest.fixture()
def session():
engine = sa.create_engine('sqlite:///:memory:')
Session = sa.orm.sessionmaker(bind=engine)
Base.metadata.create_all(bind=engine)
return Session()
@pytest.fixture()
def objects(session):
hobby = models.Hobby()
address = models.Address()
user = models.User(addresses=[address], hobbies=[hobby])
session.add(user)
session.commit()
session.close()
class TestManyToOne:
def test_many_to_one(self, session, objects, calls):
user = session.query(models.User).first()
user.addresses
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.User, 'addresses')
assert 'user.addresses' in ''.join(call.frame[4])
def test_many_to_one_ignore(self, session, objects, calls):
user = session.query(models.User).first()
with signals.ignore(signals.lazy_load):
user.addresses
assert len(calls) == 0
def test_many_to_one_subquery(self, session, objects, calls):
user = session.query(
models.User
).options(
sa.orm.subqueryload('addresses')
).first()
user.addresses
assert len(calls) == 0
def test_many_to_one_joined(self, session, objects, calls):
user = session.query(models.User).options(sa.orm.joinedload('addresses')).first()
user.addresses
assert len(calls) == 0
def test_many_to_one_reverse(self, session, objects, calls):
address = session.query(models.Address).first()
address.user
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.Address, 'user')
assert 'address.user' in ''.join(call.frame[4])
def test_many_to_one_reverse_subquery(self, session, objects, calls):
address = session.query(
models.Address
).options(
sa.orm.subqueryload('user')
).first()
address.user
assert len(calls) == 0
def test_many_to_one_reverse_joined(self, session, objects, calls):
address = session.query(models.Address).options(sa.orm.joinedload('user')).first()
address.user
assert len(calls) == 0
class TestManyToMany:
def test_many_to_many(self, session, objects, calls):
user = session.query(models.User).first()
user.hobbies
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.User, 'hobbies')
assert 'user.hobbies' in ''.join(call.frame[4])
def test_many_to_many_subquery(self, session, objects, calls):
user = session.query(models.User).options(sa.orm.subqueryload('hobbies')).first()
user.hobbies
assert len(calls) == 0
def test_many_to_many_joined(self, session, objects, calls):
user = session.query(models.User).options(sa.orm.joinedload('hobbies')).first()
user.hobbies
assert len(calls) == 0
def test_many_to_many_reverse(self, session, objects, calls):
hobby = session.query(models.Hobby).first()
hobby.users
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.Hobby, 'users')
assert 'hobby.users' in ''.join(call.frame[4])
def test_many_to_many_reverse_subquery(self, session, objects, calls):
hobby = session.query(models.Hobby).options(sa.orm.subqueryload('users')).first()
hobby.users
assert len(calls) == 0
def test_many_to_many_reverse_joined(self, session, objects, calls):
hobby = session.query(models.Hobby).options(sa.orm.joinedload('users')).first()
hobby.users
assert len(calls) == 0
|
[
"[email protected]"
] | |
8a3441b439ae0c781ace3ba8281fe64a57450d67
|
b550eda62179ffd8e49a59df7f8a30163140204f
|
/backend/openshift-old/services/user/service/model/user.py
|
72451f8834453939723096891846cc39a7ccf1a3
|
[
"Apache-2.0"
] |
permissive
|
bgoesswe/openeo-repeatability
|
6222fb235b70fda9da998b63fec92c0e5ac07169
|
087b9965e710d16cd6f29cb25e2cb94e443c2b30
|
refs/heads/master
| 2022-12-11T03:43:35.365574 | 2018-08-07T20:02:02 | 2018-08-07T20:02:02 | 139,158,921 | 0 | 1 | null | 2022-12-08T02:15:15 | 2018-06-29T14:27:34 |
Python
|
UTF-8
|
Python
| false | false | 2,121 |
py
|
''' Model of User '''
import jwt
import datetime
from flask import current_app
from service import DB, BCRYPT
class User(DB.Model):
__tablename__ = "users"
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) # Umbennen in uid
username = DB.Column(DB.String(128), unique=True, nullable=False)
email = DB.Column(DB.String(128), unique=True, nullable=False)
password = DB.Column(DB.String(255), nullable=False)
admin = DB.Column(DB.Boolean, default=False, nullable=False)
active = DB.Column(DB.Boolean, default=True, nullable=False)
created_at = DB.Column(DB.DateTime, nullable=False)
def __init__(self, username, email, password, created_at=datetime.datetime.utcnow(), admin=False):
self.username = username
self.email = email
self.password = self.generate_hash(password)
self.admin = admin
self.created_at = created_at
def get_dict(self):
''' Returns the users data '''
return {
"id": self.id,
"username": self.username,
"email": self.email,
"admin": self.admin,
"created_at": self.created_at
}
@staticmethod
def generate_hash(password):
''' Generates the password hash '''
return BCRYPT.generate_password_hash(password, current_app.config.get('BCRYPT_LOG_ROUNDS')).decode()
@staticmethod
def encode_auth_token(user_id):
''' Generates the auth token '''
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(
days=current_app.config.get('TOKEN_EXPIRATION_DAYS'),
seconds=current_app.config.get('TOKEN_EXPIRATION_SECONDS')
),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(payload, current_app.config.get('SECRET_BCRYPT'), algorithm='HS256')
@staticmethod
def decode_auth_token(auth_token):
''' Decodes the auth token '''
payload = jwt.decode(auth_token, current_app.config.get('SECRET_BCRYPT'))
return payload['sub']
|
[
"[email protected]"
] | |
2db80125614126b1bda5dac81b52721288060e5e
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/D/dasfaha/get_imdb_movie_rating.py
|
4f8bab114f20f4b7fedaa3cbfb02a591f9fa6362
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,532 |
py
|
import lxml.html
import scraperwiki
#Ge the data
html = scraperwiki.scrape("http://www.imdb.com/title/tt2103264/")
#The request to IMDB returns plain text so the line below processes this text and turns it into a format that can be queried
root = lxml.html.fromstring(html)
#The rating of a movie is within a div with class: "titlePageSprite star-box-giga-star" like this:
#
#<div class="titlePageSprite star-box-giga-star">
# 7.7
#</div>
#
#Use CSS selector to get the div html element that has class="titlePageSprite"
el = root.cssselect("div.titlePageSprite")
#el is a list as there could be several div elements with the same class. In our case we know there is only one div with that class
print "Number of elements in el: {0}".format(len(el))
#Create a python 'dictionary' to store the two fields of the data we just scraped: 'movie title' and 'rating'
data = {
'movie title': 'Emperor', #exercise: is it possible to scrape the movie name from the page? :p
'rating' : el[0].text
}
print "Movie rating: {0}".format(data['rating']) #The fields in 'data' can be accessed by their names
#Save into a databaase. Completely pointless in this case but useful if the data changes...
scraperwiki.sqlite.save(unique_keys=['movie title'], data=data)
import lxml.html
import scraperwiki
#Ge the data
html = scraperwiki.scrape("http://www.imdb.com/title/tt2103264/")
#The request to IMDB returns plain text so the line below processes this text and turns it into a format that can be queried
root = lxml.html.fromstring(html)
#The rating of a movie is within a div with class: "titlePageSprite star-box-giga-star" like this:
#
#<div class="titlePageSprite star-box-giga-star">
# 7.7
#</div>
#
#Use CSS selector to get the div html element that has class="titlePageSprite"
el = root.cssselect("div.titlePageSprite")
#el is a list as there could be several div elements with the same class. In our case we know there is only one div with that class
print "Number of elements in el: {0}".format(len(el))
#Create a python 'dictionary' to store the two fields of the data we just scraped: 'movie title' and 'rating'
data = {
'movie title': 'Emperor', #exercise: is it possible to scrape the movie name from the page? :p
'rating' : el[0].text
}
print "Movie rating: {0}".format(data['rating']) #The fields in 'data' can be accessed by their names
#Save into a databaase. Completely pointless in this case but useful if the data changes...
scraperwiki.sqlite.save(unique_keys=['movie title'], data=data)
|
[
"[email protected]"
] | |
c4be35002664253e83bad83bee500cc207fa909c
|
e4700f3ff598b997bf0ea35bcdb76b00c530c994
|
/tmp.py
|
d616e22344314282bffb61071d044da898ac2eef
|
[] |
no_license
|
nikkibisarya/therapysummarization
|
64d056683454289561a45b6e5e1d88f5e3f78dae
|
203b5a06577456d68d3022aa94d9476e0d352e18
|
refs/heads/master
| 2020-03-16T23:23:11.698069 | 2019-11-05T18:08:54 | 2019-11-05T18:08:54 | 133,075,146 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 407 |
py
|
import numpy as np
import matplotlib.pyplot as plt
loss = [1.0761, 0.8476, 0.7516, 0.6956, 0.6562, 0.6243, 0.5985, 0.5765, 0.5586, 0.5427, 0.5315, 0.5169, 0.5089, 0.4994,
0.4923, 0.4866, 0.4806, 0.4763, 0.4708, 0.4707]
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.plot(np.arange(len(loss)), loss)
plt.savefig('MISCloss.png')
|
[
"[email protected]"
] | |
a0afd01311fc3c8b2e58fd920285130338e86b2d
|
62c11667bc780b8fb80b69a069c5e4135a40ac8a
|
/src/newsletter/migrations/0001_initial.py
|
77ec77167df437d057a369a632f89115ed37d047
|
[] |
no_license
|
garabek/Django_BootcampSite
|
39b8bc976730c0776d733536f020a043d2f89370
|
8752cd7f2c469e2e4c9cf639e357c51cd05b5c4d
|
refs/heads/master
| 2021-07-01T12:09:57.557274 | 2017-09-21T23:07:01 | 2017-09-21T23:07:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 710 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='SignUp',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=254)),
('full_name', models.CharField(max_length=100, null=True, blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"[email protected]"
] | |
88c0d4f7001e4d7f2d2a994d979b9b99a1ed7d08
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/ashvin/icml2020/hand/buffers/pen1.py
|
c92cde36156496ccf82fa584986ffbc35a17a452
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,576 |
py
|
"""
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy, BinnedGMMPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
buffer_policy_class=BinnedGMMPolicy,
buffer_policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
num_gaussians=11,
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
use_validation_buffer=True,
)
search_space = {
'env': ["pen-sparse-v0", "door-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.5, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [11, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
|
[
"[email protected]"
] | |
70c3c06f681b066ac0388b0d3c1198b4074e9724
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/tests/trac/trac-0186/check.py
|
08b3119a43dd3dd72dd22febf93509b88bca7eca
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756 | 2023-04-29T20:38:15 | 2023-04-29T20:45:13 | 20,547,850 | 130 | 63 |
Apache-2.0
| 2021-08-19T16:52:18 | 2014-06-06T01:49:03 |
Python
|
UTF-8
|
Python
| false | false | 493 |
py
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.utils.domutils
import resources
import unittest
class ExternalTrac0186 (unittest.TestCase):
def testXBIngress (self):
instance = resources.XBIngress(match='all', action1='none', digits1='', action2='none', digits2='')
def testXBMatch (self):
instance = resources.XBMatch('all')
if '__main__' == __name__:
unittest.main()
|
[
"[email protected]"
] | |
969d035c63ace1f7b4c413e93f06400bb2d2bf34
|
119437adb7830659307c18b79a9cc3f6bfc6fe40
|
/transformers_learning/english_sequence_labeling/torch_model_train.py
|
234011630b2febd960451887847252ee4bdd95c0
|
[] |
no_license
|
percent4/PyTorch_Learning
|
478bec35422cdc66bf41b4258e29fbcb6d24f60c
|
24184d49032c9c9a68142aff89dabe33adc17b52
|
refs/heads/master
| 2023-03-31T03:01:19.372830 | 2023-03-17T17:02:39 | 2023-03-17T17:02:39 | 171,400,828 | 16 | 7 | null | 2023-09-02T08:53:26 | 2019-02-19T03:47:41 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 5,513 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2021/1/31 15:01
# @Author : Jclian91
# @File : torch_model_train.py
# @Place : Yangpu, Shanghai
import json
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from transformers import BertForTokenClassification, BertTokenizer, BertConfig
from util import event_type, train_file_path, test_file_path
from util import MAX_LEN, BERT_MODEL_DIR, TRAIN_BATCH_SIZE, VALID_BATCH_SIZE, EPOCHS, LEARNING_RATE
from load_data import read_data
# tokenizer and label_2_id_dict
with open("{}_label2id.json".format(event_type), "r", encoding="utf-8") as f:
tag2idx = json.loads(f.read())
idx2tag = {v: k for k, v in tag2idx.items()}
class CustomDataset(Dataset):
def __init__(self, tokenizer, sentences, labels, max_len):
self.len = len(sentences)
self.sentences = sentences
self.labels = labels
self.tokenizer = tokenizer
self.max_len = max_len
def __getitem__(self, index):
sentence = str(self.sentences[index])
inputs = self.tokenizer.encode_plus(
sentence,
None,
add_special_tokens=True,
max_length=self.max_len,
truncation=True,
padding="max_length",
# pad_to_max_length=True,
return_token_type_ids=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
label = self.labels[index]
label.extend([0] * MAX_LEN)
label = label[:MAX_LEN]
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'tags': torch.tensor(label, dtype=torch.long)
}
def __len__(self):
return self.len
# Creating the customized model
class BERTClass(torch.nn.Module):
def __init__(self):
super(BERTClass, self).__init__()
config = BertConfig.from_pretrained("./bert-base-uncased", num_labels=len(list(tag2idx.keys())))
self.l1 = BertForTokenClassification.from_pretrained('./bert-base-uncased', config=config)
# self.l2 = torch.nn.Dropout(0.3)
# self.l3 = torch.nn.Linear(768, 200)
def forward(self, ids, mask, labels):
output_1 = self.l1(ids, mask, labels=labels)
# output_2 = self.l2(output_1[0])
# output = self.l3(output_2)
return output_1
def flat_accuracy(preds, labels):
flat_preds = np.argmax(preds, axis=2).flatten()
flat_labels = labels.flatten()
return np.sum(flat_preds == flat_labels)/len(flat_labels)
def valid(model, testing_loader):
model.eval()
eval_loss = 0; eval_accuracy = 0
nb_eval_steps, nb_eval_examples = 0, 0
with torch.no_grad():
for _, data in enumerate(testing_loader):
ids = data['ids'].to(dev, dtype=torch.long)
mask = data['mask'].to(dev, dtype=torch.long)
targets = data['tags'].to(dev, dtype=torch.long)
output = model(ids, mask, labels=targets)
loss, logits = output[:2]
logits = logits.detach().cpu().numpy()
label_ids = targets.to('cpu').numpy()
accuracy = flat_accuracy(logits, label_ids)
eval_loss += loss.mean().item()
eval_accuracy += accuracy
nb_eval_examples += ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss/nb_eval_steps
print("Validation loss: {}".format(eval_loss))
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
if __name__ == '__main__':
# Preparing for CPU or GPU usage
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tokenizer = BertTokenizer.from_pretrained('./{}'.format(BERT_MODEL_DIR))
# Creating the Dataset and DataLoader for the neural network
train_sentences, train_labels = read_data(train_file_path)
train_labels = [[tag2idx.get(l) for l in lab] for lab in train_labels]
test_sentences, test_labels = read_data(test_file_path)
test_labels = [[tag2idx.get(l) for l in lab] for lab in test_labels]
print("TRAIN Dataset: {}".format(len(train_sentences)))
print("TEST Dataset: {}".format(len(test_sentences)))
training_set = CustomDataset(tokenizer, train_sentences, train_labels, MAX_LEN)
testing_set = CustomDataset(tokenizer, test_sentences, test_labels, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE, 'shuffle': True, 'num_workers': 0}
test_params = {'batch_size': VALID_BATCH_SIZE, 'shuffle': True, 'num_workers': 0}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
# train the model
model = BERTClass()
model.to(dev)
optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
for epoch in range(EPOCHS):
model.train()
for _, data in enumerate(training_loader):
ids = data['ids'].to(dev, dtype=torch.long)
mask = data['mask'].to(dev, dtype=torch.long)
targets = data['tags'].to(dev, dtype=torch.long)
loss = model(ids, mask, labels=targets)[0]
# optimizer.zero_grad()
if _ % 50 == 0:
print(f'Epoch: {epoch}, Batch: {_}, Loss: {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
# model evaluate
valid(model, testing_loader)
torch.save(model.state_dict(), '{}_ner.pth'.format(event_type))
|
[
"[email protected]"
] | |
d8c137dda1852fc28941eac7e6a8c8a76905993e
|
9bde6cafb4273d721229448d115853ff2f5994a6
|
/myblog/blog/models.py
|
29739ca1865621b4e4224bca3f600e41f915a179
|
[] |
no_license
|
davejonesbkk/myblog
|
11eb30b4d75270b3e99f172f27f05ce31e318f93
|
4a5cbeb47154004ef239b16e63155997b1c9afe6
|
refs/heads/master
| 2021-01-17T17:43:28.465235 | 2016-05-31T02:02:07 | 2016-05-31T02:02:07 | 59,930,156 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 706 |
py
|
from django.db import models
from django_markdown.models import MarkdownField
from django.core.urlresolvers import reverse
class EntryQuerySet(models.QuerySet):
def published(self):
return self.filter(publish=True)
class Entry(models.Model):
title = models.CharField(max_length=200)
body = models.TextField()
slug = models.SlugField(max_length=200, unique=True)
publish = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = EntryQuerySet.as_manager()
def __str__(self):
return self.title
class Meta:
verbose_name = 'Blog Entry'
verbose_name_plural = 'Blog Entries'
ordering = ["-created"]
|
[
"[email protected]"
] | |
447a75ff7f1e949a3c268918e94f8ab08d58da0f
|
68cd659b44f57adf266dd37789bd1da31f61670d
|
/2020-01/python/18188_다오의데이트.py
|
7c55c44e597a14f68e338a66b4a4458c5ab95c41
|
[] |
no_license
|
01090841589/solved_problem
|
c0c6f5a46e4d48860dccb3b0288aa5b56868fbca
|
bbea2f31e5fe36cad100bc514eacd83545fb25b1
|
refs/heads/master
| 2023-07-02T23:55:51.631478 | 2021-08-04T13:57:00 | 2021-08-04T13:57:00 | 197,157,830 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,117 |
py
|
import sys
sys.stdin = open("다오의데이트.txt")
DIR = [[-1, 0], [0, 1], [1, 0], [0, -1]]
def go_dao(y, x, k, route):
global result, rts
if result:
return
if k >= A:
return
flag = 1
for i in range(4):
if can[k][i]:
Y = y+DIR[i][0]
X = x+DIR[i][1]
if 0 <= Y < H and 0 <= X < W:
if MAP[Y][X] != '@':
if MAP[Y][X] == 'Z':
rts = route+arr[i]
result = 1
return
flag = 0
go_dao(Y, X, k+1, route+arr[i])
H, W = map(int, input().split())
MAP = [list(input()) for _ in range(H)]
for h in range(H):
for w in range(W):
if MAP[h][w] == 'D':
y = h
x = w
result = 0
rts = ''
A = int(input())
arr = ['W', 'D', 'S', 'A']
can = [[0, 0, 0, 0] for _ in range(A)]
for i in range(A):
B, C = map(str, input().split())
can[i][arr.index(B)] = 1
can[i][arr.index(C)] = 1
go_dao(y, x, 0, '')
if result:
print("YES")
print(rts)
else:
print("NO")
|
[
"[email protected]"
] | |
b1918d70a960ef445232d6b1b21ffd44d9848c48
|
71c7683331a9037fda7254b3a7b1ffddd6a4c4c8
|
/Phys/Urania/examples/KsPiZeroMM_angularPDF.py
|
a83417211276319e5a15c72d57e48769a1b46477
|
[] |
no_license
|
pseyfert-cern-gitlab-backup/Urania
|
edc58ba4271089e55900f8bb4a5909e9e9c12d35
|
1b1c353ed5f1b45b3605990f60f49881b9785efd
|
refs/heads/master
| 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,684 |
py
|
from Urania.Helicity import *
from Urania.SympyBasic import *
from os import *
DiLeptonSpins = [0,1,2] ## DMS: I doube we'll need 2, probably we'll only
## have Pwave (J=1) from the photon, plus maybe some S-wave (J=0)
### transAmp=1 : Changes to transversity amplitude basis
A = doKsPizeroMuMu(DiLeptonSpins ) ## This is now in Urania.Helicity
### massage a bit the expression to make it more suitable for fitting
pdf_split = DecomposeAmplitudes(A,TransAmplitudes.values())
phys = 0
for key in pdf_split: phys += StrongPhases(key)*pdf_split[key]
### change the free variables to cosines
x = USymbol("helcosthetaK","c\\theta_{K}",real = True)
y = USymbol("helcosthetaL", "c\\theta_{l}", real = True)
z = USymbol("helphi" , "\\phi", real = True)
CThL = Cos(ThetaL)
CThK = Cos(ThetaK)
def changeFreeVars(function):
### Phi now as in DTT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
function = function.subs( Sin(2*ThetaK), 2*Sin(ThetaK)*Cos(ThetaK))
function = function.subs( Sin(2*ThetaL), 2*Sin(ThetaL)*Cos(ThetaL))
function = function.subs( Cos(2*ThetaK), 2*Cos(ThetaK)**2 - 1)
function = function.subs( Cos(2*ThetaL), 2*Cos(ThetaL)**2 - 1)
function = function.subs( Sin(ThetaK), Sqrt(1-Cos(ThetaK)**2))
function = function.subs( Sin(ThetaL), Sqrt(1-Cos(ThetaL)**2))
function = function.subs([(CThK,x),(CThL,y), (Phi, -z)])
return function
func = changeFreeVars(phys)
### Print out to a latex document
from Urania.LatexFunctions import *
flatex = file("Kspizeromm_PDF.tex","w")
begintex(flatex)
begin_multline(flatex)
i = 0
for key in pdf_split.keys():
if i > 20:
i = 0
multline_break(flatex)
if pdf_split[key]:
flatex.write(Ulatex(key) + "\t" + Ulatex(pdf_split[key]) + "\\\\" + "\n")
i += 1
end_multline(flatex)
flatex.write("\\end{document}\n")
flatex.close()
system("pdflatex " + "Kspizeromm_PDF")
print "angular function saved in Kspizeromm_PDF.pdf"
print "Now making RooFit class as well"
##BREAK
##### Generate and compile a fitting class corresponding to "A"
### Trial 1, w/o analytical integrals
from Urania.RooInterfaces import *
potential_list = [x,y,z]+TransAmpModuli.values() + TransAmpPhases.values()
final_list = []
for thing in potential_list:
if thing in func.atoms(): final_list.append(thing)
op = RooClassGenerator(func, final_list ,"RooKspizeroMM")
### Define intermediate variables to be calculated once
op.makePdf(integrable = 1)
op.doIntegral(1,(y,-1,1))#,(y,-1,1),(z,-Pi,Pi))
##op.doIntegral(2,(x,-1,1),(y,-1,1))
##op.doIntegral(3,(x,-1,1),(z,-Pi,Pi))
##op.doIntegral(4,(y,-1,1),(z,-Pi,Pi))
op.overwrite()
op.invoke()
|
[
"[email protected]"
] | |
e8492bd500e419e50fa3815209d4889eb2e4e971
|
c761f3fbce728e61cbcf5179f1d3f27e1e5625cd
|
/register_key.py
|
1328baddc2fe4d7e5f91b2052b07daa49e53649f
|
[] |
no_license
|
philopon/usermon
|
16033d41436efe2cf4971bcd3b25f99cf82de318
|
7f97db09a65466e2133d4304f9fe5ba212299598
|
refs/heads/master
| 2021-01-18T16:51:56.457593 | 2017-04-21T13:06:12 | 2017-04-21T13:06:12 | 86,775,704 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 469 |
py
|
#!/usr/bin/env python3
def main():
import sys
import os
import pwd
import pamela
pw = pwd.getpwuid(os.getuid())
ssh_dir = os.path.join(pw.pw_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
os.makedirs(ssh_dir, mode=0o700, exist_ok=True)
with open(auth_keys, 'a') as f:
for key in sys.stdin:
print(key.strip(), file=f)
os.chmod(auth_keys, 0o600)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
3e35560a675840b2ed59a45d39e280ce612af5c6
|
4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5
|
/suning/api/union/UnionInfomationGetRequest.py
|
5a52d242f32e5e4c7c3d65d8e1872c3832f9291a
|
[] |
no_license
|
shijingyu/sunningAPI
|
241f33b0660dc84635ce39688fed499f5c57a5da
|
4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5
|
refs/heads/master
| 2020-04-24T22:15:11.584028 | 2019-02-24T06:41:20 | 2019-02-24T06:41:20 | 172,305,179 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 525 |
py
|
# -*- coding: utf-8 -*-
'''
Created on 2016-1-27
@author: suning
'''
from suning.api.abstract import AbstractApi
class UnionInfomationGetRequest(AbstractApi):
'''
'''
def __init__(self):
AbstractApi.__init__(self)
self.goodsCode = None
self.setParamRule({
'goodsCode':{'allow_empty':False}
})
def getApiBizName(self):
return 'getUnionInfomation'
def getApiMethod(self):
return 'suning.netalliance.unioninfomation.get'
|
[
"[email protected]"
] | |
da850d8841ddddfdccfc6bde153467956b91789c
|
78e60a7d8a67ed76244004e8a3ed573fbf396e41
|
/samples/get_zip_codes.py
|
a89c105f5ec1a635d350ba870418f9f735a0bb60
|
[
"MIT"
] |
permissive
|
Crivez/apiclient-python
|
837a9f7cc0453ccd3121311adc7920b5fe6b3e33
|
860fc054f546152a101e29b1af388c381075ac47
|
refs/heads/master
| 2023-06-08T13:24:09.249704 | 2021-06-17T12:16:35 | 2021-06-17T12:16:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 420 |
py
|
from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Search for zip codes in Germany.
COUNTRY_CODE = "DE"
COUNT = 1
try:
res = voxapi.get_zip_codes(COUNTRY_CODE,
count=COUNT)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
|
[
"[email protected]"
] | |
0ae55acd20bb59d6c3f499e32e0f526820a351d7
|
822d3cd484b54f0531fc205520c765a8321c0613
|
/pyFile/8.面向对象/2.类的属性/9.类方法和静态方法.py
|
a0ccbf84964d8f9059c7feb1ae5efeedb1a3e65a
|
[] |
no_license
|
mghxy123/learnPython
|
31d1cc18deeed5a89864ca0333fe488e0dbf08b4
|
00740e87d55a4dffd78773deaff8689485df31e8
|
refs/heads/master
| 2021-07-21T14:31:02.421788 | 2020-06-27T11:28:01 | 2020-06-27T11:28:01 | 187,751,182 | 0 | 0 | null | 2020-06-07T05:14:05 | 2019-05-21T02:58:35 |
Python
|
UTF-8
|
Python
| false | false | 1,232 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : 9.类方法和静态方法.py
# Author: HuXianyong
# Mail: [email protected]
# Date : 2019/5/16 0016
#类中普通函数的方法
# class Person:
# def normal_method(): #可以吗? 这样是可以的没有语法上面的问题,执行也没问题,只是大家都默认不这么写
# print('normal')
#
# # 如何调用?
# Person.normal_method() #可以吗? 这个是可以的,应为只是直接调用函数
# # Person().normal_method() #可以吗? 这个不可以,应为这个是实例化,实例化之后类里面的方法需要接受一个类的实例化对象,然而这里并没有传入,self,因此会报错
# print(Person.__dict__)
# # 静态方法
# class Person:
# @staticmethod
# def class_method():
# print('this is staticMethod')
# Person.class_method()
# Person().class_method()
#静态方法
class Person:
@classmethod
def class_method(cls): #cls 是什么?
print('this is class method')
print('class = {0.__name__}({0})'.format(cls))
cls.HEIGHT = 170
@staticmethod
def static_method():
print('this is staticMethod')
Person.class_method()
print(Person.__dict__)
|
[
"[email protected]"
] | |
f29fc6830528398b792fd60578b01a78f12aa4e7
|
41ede4fd3bfba1bff0166bca7aee80dcf21434c6
|
/ayhanyalcinsoy/Desktop/lxde/base/libfm/actions.py
|
ad79cdbb6f0b2d887aa5244a18b52080cbb19379
|
[] |
no_license
|
pisilinux/playground
|
a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c
|
e4e12fff8a847ba210befc8db7e2af8556c3adf7
|
refs/heads/master
| 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 |
Python
|
UTF-8
|
Python
| false | false | 811 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "libfm-%s" % (get.srcVERSION())
def setup():
autotools.configure("--disable-static \
--sysconfdir=/etc \
--enable-debug \
--enable-udisks \
--enable-demo")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
pisitools.dosed("data/libfm.conf", "xarchiver", "file-roller")
autotools.install()
pisitools.dodoc("AUTHORS", "COPYING", "TODO")
|
[
"[email protected]"
] | |
32965056a1b7a8f68e29a888ddf16692219f8202
|
6f2675eee55b7ebc5adf9c2176ced8cb59fc64d4
|
/dataInterKingdee/interDebug.py
|
f5873ce9a0c97db0f8dd05bed388d20b019fdced
|
[] |
no_license
|
wildmanwang/proDataInter
|
8c2b65fa96ad45b21165d997b1769a28e12fc42a
|
f5a1f1fb195c66bf586bd999465c7e3b16453369
|
refs/heads/master
| 2023-06-07T11:57:16.763251 | 2023-06-03T08:54:56 | 2023-06-03T08:54:56 | 157,559,747 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 602 |
py
|
# -*- coding:utf-8 -*-
"""
"""
__author__ = "Cliff.wang"
import os
from interConfig import Settings
#from interProcess import InterProcess
from interControl import InterControl
if __name__ == "__main__":
try:
path = os.path.abspath(os.path.dirname(__file__))
sett = Settings(path, "config")
inter = InterControl(sett)
inter.interInit()
if 1 == 2:
# 传输基础资料、业务数据
inter.interBusiData()
elif 1 == 2:
# 获取部门ID和用户ID
pass
except Exception as e:
print(str(e))
|
[
"[email protected]"
] | |
a44db705bdc58cdcecdcd4b8200bf85a3d08fc83
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/group/_get_group_join_request_public_v2.py
|
32ba9735f4911a02f803f73dab69c4e7a260ec52
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 |
MIT
| 2022-08-02T03:54:11 | 2021-09-27T04:00:10 |
Python
|
UTF-8
|
Python
| false | false | 2,611 |
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Group Service (2.18.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.group import (
get_group_join_request_public_v2 as get_group_join_request_public_v2_internal,
)
from accelbyte_py_sdk.api.group.models import ModelsGetMemberRequestsListResponseV1
from accelbyte_py_sdk.api.group.models import ResponseErrorResponse
@click.command()
@click.argument("group_id", type=str)
@click.option("--limit", "limit", type=int)
@click.option("--offset", "offset", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def get_group_join_request_public_v2(
group_id: str,
limit: Optional[int] = None,
offset: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(get_group_join_request_public_v2_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = get_group_join_request_public_v2_internal(
group_id=group_id,
limit=limit,
offset=offset,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"getGroupJoinRequestPublicV2 failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
get_group_join_request_public_v2.operation_id = "getGroupJoinRequestPublicV2"
get_group_join_request_public_v2.is_deprecated = False
|
[
"[email protected]"
] | |
c2d9305312002748edb2d0e5470f541784c71352
|
3fc00c49c6b5a5d3edb4f5a97a86ecc8f59a3035
|
/shared_models/test/test_api.py
|
ae9465bb6b3b41416d097c202b1034470650a378
|
[] |
no_license
|
yc-hu/dm_apps
|
9e640ef08da8ecefcd7008ee2d4f8f268ec9062e
|
483f855b19876fd60c0017a270df74e076aa0d8b
|
refs/heads/master
| 2023-04-07T13:13:55.999058 | 2021-04-12T10:19:21 | 2021-04-12T10:19:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,264 |
py
|
from django.test import tag
from django.urls import reverse
from rest_framework import status
from shared_models.test import SharedModelsFactoryFloor as FactoryFloor
from shared_models.test.common_tests import CommonTest
class TestUserAPIListView(CommonTest):
def setUp(self):
super().setUp()
self.user = self.get_and_login_user()
self.test_url = reverse("user-list", args=None)
@tag("api", 'user')
def test_url(self):
self.assert_correct_url("user-list", test_url_args=None, expected_url_path=f"/api/shared/users/")
@tag("api", 'user')
def test_get(self):
# PERMISSIONS
# authenticated users
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# unauthenticated users
self.client.logout()
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# TODO: build up this test!
# # RESPONSE DATA
# valid_user = None
# self.get_and_login_user(user=None)
# response = self.client.get(self.test_url)
# self.assertEqual(len(response.data), 1)
# self.assertEqual(response.data[0]["id"], self.instance.id)
# # or, for lists with pagination...
# self.assertEqual(len(data["results"]), 1)
# self.assertEqual(data["results"][0]["id"], self.instance.id)
#
# # check query params
# object = FactoryFloor.UserFactory()
# data = self.client.get(self.test_url+f"?={object.id}").data
# keys.extend([
# "",
# ])
# self.assert_dict_has_keys(data, keys)
@tag("api", 'user')
def test_unallowed_methods_only(self):
restricted_statuses = [status.HTTP_405_METHOD_NOT_ALLOWED, status.HTTP_403_FORBIDDEN]
self.assertIn(self.client.put(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.delete(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.post(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.patch(self.test_url, data=None).status_code, restricted_statuses)
|
[
"[email protected]"
] | |
270875ed2be025781a975375972379cf8f211f80
|
dfad28a2e1a0199c0117e551fd1e31804804d5b9
|
/app/auth/views.py
|
d2df7a97666207276aa6648ef9f85af4a25d98bc
|
[
"MIT"
] |
permissive
|
wilbrone/Pitches
|
c33d60b142b43de9ccf60a86cf59acbc262c6711
|
b20d234fd930a6551f26d9cf863c6d1631b62bc2
|
refs/heads/master
| 2022-12-09T08:02:08.631177 | 2019-11-25T23:47:13 | 2019-11-25T23:47:13 | 223,405,696 | 0 | 0 |
MIT
| 2022-12-08T06:55:48 | 2019-11-22T13:09:30 |
Python
|
UTF-8
|
Python
| false | false | 1,583 |
py
|
from flask import render_template,redirect,url_for, flash,request
from flask_login import login_user,logout_user,login_required
from . import auth
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "One Minute Perfect Pitch login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,full_name= form.full_name.data,password = form.password.data)
# saving the data
db.session.add(user)
db.session.commit()
mail_message("Welcome to One Minute Perfect Pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
|
[
"[email protected]"
] | |
9f04557904bdeeb5a5b0b9e265605429682ff434
|
a867b1c9da10a93136550c767c45e0d8c98f5675
|
/G_11_RemoveKthNode.py
|
408aa2a8a0bdec884c65ff5c410cb79045ed72b6
|
[] |
no_license
|
Omkar02/FAANG
|
f747aacc938bf747129b8ff35b6648fb265d95b6
|
ee9b245aa83ea58aa67954ab96442561dbe68d06
|
refs/heads/master
| 2023-03-25T19:45:08.153403 | 2021-03-28T07:13:08 | 2021-03-28T07:13:08 | 280,783,785 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 941 |
py
|
import __main__ as main
from Helper.TimerLogger import CodeTimeLogging
fileName = main.__file__
fileName = fileName.split('\\')[-1]
CodeTimeLogging(Flag='F', filename=fileName, Tag='Linked-List', Difficult='Medium')
from Datastruct.masterLinkedList import l
arr = [1, 2, 3, 4, 5, 6]
# arr = [1, 2]
for i in arr:
l.insertStart(i)
# l.traverseList()
def removeKNodeFromEnd(head, k):
print(f'Removed {k} node: ',end = '')
first = head
second = head
count = 1
while count <= k and second is not None:
second = second.nextNode
count += 1
if second is None:
head.data = first.nextNode.data
head.nextNode = first.nextNode.nextNode
l.traverseList()
return
while second.nextNode is not None:
second = second.nextNode
first = first.nextNode
first.nextNode = first.nextNode.nextNode
l.traverseList()
removeKNodeFromEnd(l.getHead(), 3)
|
[
"[email protected]"
] | |
7b5a81f5531be906c6c75c6ea6ee45ae41407e10
|
188950fb7b1fce4840b41e1e9454f0133a8d75ce
|
/src/Server/Controller/guess_controller.py
|
a2518f5c1fdefce113aeaec0371319b7b16a82fa
|
[] |
no_license
|
cloew/WordGuessAngular
|
3f5c6a1e0e14f6e905ec78a618b606ff3cb3e798
|
0d889cd3bb9cafe35a6e7e2ccba97914a26825b9
|
refs/heads/master
| 2021-01-01T05:53:26.776161 | 2014-09-01T14:55:39 | 2014-09-01T14:55:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 400 |
py
|
from Server.game_wrapper import GameWrapper
from kao_flask.controllers.json_controller import JSONController
class GuessController(JSONController):
""" Controller to allow a player to guess the word for the current Round """
def performWithJSON(self, gameId):
game = GameWrapper(id=gameId)
results = game.guess(self.json['guesses'])
return game.toJSON()
|
[
"[email protected]"
] | |
45bd5115c7a3405823961182633a568318a1d2ef
|
7234e6c72eb3f09c4a66dbe91f00fdf7742f010f
|
/algo/arrays/binarysearch/shiftedBinarySearch.py
|
fc901758206f1662bac912102f0b1b7740f4186f
|
[] |
no_license
|
srinathalla/python
|
718ac603473e7bed060ba66aa3d39a90cf7ef69d
|
b6c546070b1738350303df3939888d1b0e90e89b
|
refs/heads/master
| 2021-06-13T06:11:42.653311 | 2021-02-19T06:01:41 | 2021-02-19T06:01:41 | 150,374,828 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 687 |
py
|
#
# T.C : O(logn) S.C : O(1)
# #
def shiftedBinarySearch(array, target):
l = 0
r = len(array)-1
while l < r:
m = (l + r)//2
if array[m] == target:
return m
elif array[m] < array[r]:
if array[m] < target and target <= array[r]:
l = m + 1
else:
r = m - 1
elif array[m] > array[r]:
if array[l] <= target and target < array[m]:
r = m - 1
else:
l = m + 1
return l if array[l] == target else -1
print(shiftedBinarySearch([5, 23, 111, 1], 111))
print(shiftedBinarySearch([45, 61, 71, 72, 73, 0, 1, 21, 33, 45], 33))
|
[
"[email protected]"
] | |
cd83a748401283dfbf2bddb5137bb34063e8eb43
|
1825283527f5a479204708feeaf55f4ab6d1290b
|
/leetcode/python/50/50.powx-n.py
|
c24eb3b7c7bcc033fb5286680caebed06bbe3c0f
|
[] |
no_license
|
frankieliu/problems
|
b82c61d3328ffcc1da2cbc95712563355f5d44b5
|
911c6622448a4be041834bcab25051dd0f9209b2
|
refs/heads/master
| 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 |
HTML
|
UTF-8
|
Python
| false | false | 802 |
py
|
#
# @lc app=leetcode id=50 lang=python3
#
# [50] Pow(x, n)
#
# https://leetcode.com/problems/powx-n/description/
#
# algorithms
# Medium (27.38%)
# Total Accepted: 281K
# Total Submissions: 1M
# Testcase Example: '2.00000\n10'
#
# Implement pow(x, n), which calculates x raised to the power n (x^n).
#
# Example 1:
#
#
# Input: 2.00000, 10
# Output: 1024.00000
#
#
# Example 2:
#
#
# Input: 2.10000, 3
# Output: 9.26100
#
#
# Example 3:
#
#
# Input: 2.00000, -2
# Output: 0.25000
# Explanation: 2^-2 = 1/2^2 = 1/4 = 0.25
#
#
# Note:
#
#
# -100.0 < x < 100.0
# n is a 32-bit signed integer, within the range [−2^31, 2^31 − 1]
#
#
#
class Solution:
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
|
[
"[email protected]"
] | |
2c72fc48e73c2fcf5db27a84c63d3341b2696983
|
ed7fde0483a4836bfc9ef3ab887cf1220559bfc7
|
/masters_scripts/EC17_get_allele_dist_1.py
|
80bb3023acd365ccf7683c6816f51994e190d9c1
|
[] |
no_license
|
cizydorczyk/python_scripts
|
326b3142a3c6ce850237e8b13e229854699c6359
|
b914dcff60727bbfaa2b32e1a634ca9ca354eeeb
|
refs/heads/master
| 2023-05-11T14:29:44.548144 | 2023-05-05T19:39:28 | 2023-05-05T19:39:28 | 116,588,201 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,850 |
py
|
from sys import argv
import numpy as np
import itertools
script, inputallelicdepth, outputfile = argv
print "Working on file: " + inputallelicdepth.split('/')[-1]
with open(inputallelicdepth, 'r') as infile1:
lines = infile1.read().splitlines()
del lines[0]
proportions_breakdown = {1:[], 2:[], 3:[], 4:[]}
proportions = []
for i in lines:
line = i.strip().split('\t')
ad = [float(j) for j in line[-1].split(',')]
adsum = sum(ad)
numbases = len(ad[0:-1])
if adsum != 0.0:
for k in ad[0:-1]:
proportions_breakdown[numbases].append(round((k/adsum),2))
proportions.append(round((k/adsum),2))
elif adsum == 0.0:
# proportions[numbases].append(0.00)
continue
# Count total proportions:
proportions_dict = {}
for i in np.arange(0,1.01, 0.01):
proportions_dict[str(i)] = proportions.count(i)
# Count proportions with 2, 3, and 4 bases separately:
proportions_2_dict = {}
proportions_3_dict = {}
proportions_4_dict = {}
for i in np.arange(0,1.01, 0.01):
proportions_2_dict[str(i)] = proportions_breakdown[2].count(i)
for i in np.arange(0,1.01, 0.01):
proportions_3_dict[str(i)] = proportions_breakdown[3].count(i)
for i in np.arange(0,1.01, 0.01):
proportions_4_dict[str(i)] = proportions_breakdown[4].count(i)
with open(outputfile, 'w') as outfile1:
outfile1.write('proportion\ttotal_count\tcount_2\tcount_3\tcount_4\n')
for keyt, key2, key3, key4 in itertools.izip(sorted(proportions_dict.keys()), sorted(proportions_2_dict.keys()), sorted(proportions_3_dict.keys()), sorted(proportions_4_dict.keys())):
outfile1.write(str(keyt) + '\t' + str(proportions_dict[keyt]) + '\t' + str(proportions_2_dict[key2]) + '\t' + str(proportions_3_dict[key3]) + '\t' + str(proportions_4_dict[key4]) + '\n')
# for key, value in sorted(proportions_dict.iteritems()):
# outfile1.write(str(key) + '\t' + str(value) + '\n')
|
[
"[email protected]"
] | |
88267b9d5edb8a48d3ceb3ce7f9c307f1a46e175
|
55965f592cb7e915cd68bd371ee1a6ad2a6e0247
|
/libmngmtsys.py
|
79288d746d1e8cdb428259f150297c49244931cb
|
[] |
no_license
|
Upasna4/Training
|
2b5b57fc3e5229304860f153db93d912a44472bf
|
33c6eeb565c422e40ea88d50af787f58b9f0da6d
|
refs/heads/master
| 2020-08-05T03:50:36.280910 | 2019-10-02T16:36:09 | 2019-10-02T16:36:09 | 212,383,151 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,733 |
py
|
memberData = {}
bookData = {}
borrowData = {}
m_id = 101
b_id = 201
print("Library Management System\n"
"1.Add Member\n"
"2.Add Book\n"
"3.Book Borrowing\n"
"4.Book Returning\n"
"5.Member Status\n"
"6.Book Status\n"
"7.Exit")
while True:
choice = int(input("Enter Choice: "))
if choice == 1:
print("Add Member Program")
loop1=True
while(loop1):
name = input("Member Name: ")
memberData.update({m_id: name}) #updates value of key and val
print("Member Added. Member id is: ", m_id)
m_id += 1 #incrementing value of m_id
while (True):
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
break
elif choice == 'n':
loop1 = False
break
else:
print("invalid choice")
loop1=False
continue
elif choice == 2:
print("Add Book Program")
while True:
name = input("Book Name: ")
qty = int(input("enter quantity"))
bookData.update({b_id: [name, qty]}) #dict ko update krna
print("Book Added. Book id is: ", b_id)
b_id += 1
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 3:
print("Book Borrowing Program")
while True:
m_id = int(input("Member id: "))
if m_id in memberData: #checks if member id in present in memberData dict
b_name = input("Book Name: ")
for b_id, b_name_qty in bookData.items(): #when we want both key and value
if b_name_qty[0] == b_name: #indexing is done coz we have a list here..at [0] we have name in list
if b_name_qty[1] > 0: #here we compare quantity as it is on 1st index..we see whether it is >0 or not
borrowData.update({m_id: b_id}) #update dict
bookData[b_id][1] -= 1 #decrement quantity of books
break
else:
print("Book out of stock")
else:
print("Book not present")
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 4:
print("Book Returning Program")
m_id = int(input("Member Id: "))
name = input("Book Name: ")
for b_id, b_name in borrowData.items():
if b_name == name:
bookData[b_id][1] += 1
borrowData.pop(m_id) #person is returning book so book will pop from borrowData dict
borrowData.update({m_id: b_id}) #dict is updated
break
else:
print("Book not present")
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 5:
print("Member Status Program")
m_id = int(input("Member Id: "))
if m_id in memberData: #to check mem status we check m_id is in memberData and borrowData or not
if m_id in borrowData: #if b_id is in borrowData then borrowData m se b_id nikalo
b_id = borrowData[m_id] #bid nikal ra h dict m se
print("Member Name: ", memberData[m_id]) #the value of this key is name
print("Allow Book Name: ", bookData[b_id][0]) #the val of this is bookname
elif choice == 6:
print("Book Status Program")
b_id = int(input("Book Id: "))
for m_id, m_name in memberData.items(): #valuefetch
if b_id in borrowData:
b_id = borrowData[m_id]
print("Member name:",memberData[m_id])
print("Book name:",bookData[b_id][0])
print("Book issue to user:", memberData[m_id])
elif choice == 7:
break
else:
print("invalid choice")
|
[
"[email protected]"
] | |
6cdaa4435e0e15d1f90e91b2cdd9468848c117bf
|
9a258d81d612b855e244e4a03594ebe312ff3268
|
/webapp/tests/test_urls.py
|
8a82dcab33b5fefed07c162dd7d7b024a90d642f
|
[
"MIT"
] |
permissive
|
erischon/p10_digitalocean
|
19fb39f7442e0eec669fbd1ef5b2d49464c37493
|
a850dfb97470da57117fa1dfc62c4614a602fe40
|
refs/heads/master
| 2023-04-27T16:52:04.158502 | 2021-05-15T22:44:34 | 2021-05-15T22:44:34 | 360,518,773 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 427 |
py
|
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from webapp.views import home_page, mentions
class WebappTestUrls(SimpleTestCase):
def test_home_url_is_resolved(self):
url = reverse('home')
self.assertEqual(resolve(url).func, home_page)
def test_mentions_url_is_resolved(self):
url = reverse('mentions')
self.assertEqual(resolve(url).func, mentions)
|
[
"[email protected]"
] | |
793fabc710ab61e60bc4ad701ef6d70a64ebffcc
|
5d0f91e3a4c75375a2ba9b12cf3cbd4350c2ccdf
|
/geopdf/__init__.py
|
11df3297614cf7a212aab066ac7d3ed89a52d353
|
[
"MIT"
] |
permissive
|
garnertb/geopdf
|
8fac6419e62db9d880d48bb4b202cfbf11729629
|
175073cb44a308513bdb6db32092dd806029afc0
|
refs/heads/master
| 2021-01-10T18:50:22.802931 | 2015-06-09T13:53:43 | 2015-06-09T13:53:43 | 29,563,939 | 1 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,121 |
py
|
# -*- coding: utf-8 -*-
"""Adds GeoPDF functionality to ReportLab"""
from reportlab.lib.colors import black
from reportlab.pdfbase.pdfdoc import PDFArray, PDFDictionary, PDFName, PDFString
from reportlab.pdfbase import pdfdoc
from reportlab.pdfgen import canvas
class GeoPDFBase(object, PDFDictionary):
"""
Base class for GeoPDF dicts.
"""
def __init__(self, dict=None):
"""dict should be namestring to value eg "a": 122 NOT pdfname to value NOT "/a":122"""
if dict is None:
self.dict = {}
else:
self.dict = dict.copy()
self.set_defaults()
def set_defaults(self):
"""
A hook for creating default values.
"""
return
def is_valid(self):
"""
Test the validity of the dict.
"""
return True
class Projection(GeoPDFBase):
"""
A Projection dict.
"""
def set_defaults(self):
self.dict.setdefault('ProjectionType', PDFString('GEOGRAPHIC'))
self.dict.setdefault('Type', PDFName('Projection'))
class LGIDict(GeoPDFBase):
"""
The LGI dict.
"""
def set_defaults(self):
self.dict.setdefault('Type', PDFString('LGIDict'))
self.dict.setdefault('Version', PDFString('2.1'))
self.dict.setdefault('Projection', Projection({'Datum': PDFString('WE')}))
def is_valid(self):
if not any(map(lambda key: key in self.dict, 'Registration CTM'.split())):
return False
for key, value in self.dict.items():
if hasattr(value, 'is_valid') and getattr(value, 'is_valid')() is False:
return False
return True
class GeoCanvas(canvas.Canvas, object):
LGIDict = PDFArray([])
def _startPage(self):
# now get ready for the next one
super(GeoCanvas, self)._startPage()
self.LGIDict = PDFArray([])
def showPage(self):
"""Close the current page and possibly start on a new page."""
# ensure a space at the end of the stream - Acrobat does
# not mind, but Ghostscript dislikes 'Qendstream' even if
# the length marker finishes after 'Q'
pageWidth = self._pagesize[0]
pageHeight = self._pagesize[1]
cM = self._cropMarks
code = self._code
if cM:
bw = max(0, getattr(cM, 'borderWidth', 36))
if bw:
markLast = getattr(cM, 'markLast', 1)
ml = min(bw, max(0, getattr(cM, 'markLength', 18)))
mw = getattr(cM, 'markWidth', 0.5)
mc = getattr(cM, 'markColor', black)
mg = 2 * bw - ml
cx0 = len(code)
if ml and mc:
self.saveState()
self.setStrokeColor(mc)
self.setLineWidth(mw)
self.lines([
(bw, 0, bw, ml),
(pageWidth + bw, 0, pageWidth + bw, ml),
(bw, pageHeight + mg, bw, pageHeight + 2 * bw),
(pageWidth + bw, pageHeight + mg, pageWidth + bw, pageHeight + 2 * bw),
(0, bw, ml, bw),
(pageWidth + mg, bw, pageWidth + 2 * bw, bw),
(0, pageHeight + bw, ml, pageHeight + bw),
(pageWidth + mg, pageHeight + bw, pageWidth + 2 * bw, pageHeight + bw)
])
self.restoreState()
if markLast:
# if the marks are to be drawn after the content
# save the code we just drew for later use
L = code[cx0:]
del code[cx0:]
cx0 = len(code)
bleedW = max(0, getattr(cM, 'bleedWidth', 0))
self.saveState()
self.translate(bw - bleedW, bw - bleedW)
if bleedW:
# scale everything
self.scale(1 + (2.0 * bleedW) / pageWidth, 1 + (2.0 * bleedW) / pageHeight)
# move our translation/expansion code to the beginning
C = code[cx0:]
del code[cx0:]
code[0:0] = C
self.restoreState()
if markLast:
code.extend(L)
pageWidth = 2 * bw + pageWidth
pageHeight = 2 * bw + pageHeight
code.append(' ')
page = pdfdoc.PDFPage()
page.__NoDefault__ = """Parent
MediaBox Resources Contents CropBox Rotate Thumb Annots B Dur Hid Trans AA
PieceInfo LastModified SeparationInfo ArtBox TrimBox BleedBox ID PZ
Trans LGIDict""".split()
page.pagewidth = pageWidth
page.pageheight = pageHeight
if getattr(self, 'LGIDict', None):
if len(self.LGIDict.sequence) == 1:
page.LGIDict = self.LGIDict.sequence[0]
else:
page.LGIDict = self.LGIDict
page.Rotate = self._pageRotation
page.hasImages = self._currentPageHasImages
page.setPageTransition(self._pageTransition)
page.setCompression(self._pageCompression)
if self._pageDuration is not None:
page.Dur = self._pageDuration
strm = self._psCommandsBeforePage + [self._preamble] + code + self._psCommandsAfterPage
page.setStream(strm)
self._setColorSpace(page)
self._setExtGState(page)
self._setXObjects(page)
self._setShadingUsed(page)
self._setAnnotations(page)
self._doc.addPage(page)
if self._onPage:
self._onPage(self._pageNumber)
self._startPage()
def addGeo(self, **kwargs):
"""
Adds the LGIDict to the document.
:param kwargs: Keyword arguments that are used to update the LGI Dictionary.
"""
lgi = LGIDict()
lgi.dict.update(kwargs)
if not lgi.is_valid():
return
pdf_obj = lgi.format(self._doc)
self.LGIDict.sequence.append(pdf_obj)
return pdf_obj
|
[
"[email protected]"
] | |
aa49a4d64508c9fa62c1e3f29026d15008e407f4
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/lib/surface/app/versions/delete.py
|
fe4a27d6de672df18ddf9b85bc4ecc86e88036db
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 |
Apache-2.0
| 2020-12-24T00:38:09 | 2017-01-18T00:05:44 |
Python
|
UTF-8
|
Python
| false | false | 4,568 |
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Delete command."""
import copy
from googlecloudsdk.api_lib.app import appengine_api_client
from googlecloudsdk.api_lib.app import service_util
from googlecloudsdk.api_lib.app import version_util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
from googlecloudsdk.core.util import text
class VersionsDeleteError(exceptions.Error):
"""Errors occurring when deleting versions."""
pass
class Delete(base.DeleteCommand):
"""Delete a specified version.
You cannot delete a version of a service that is currently receiving traffic.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To delete a specific version of a specific service, run:
$ {command} --service myService v1
To delete a named version across all services, run:
$ {command} v1
To delete multiple versions of a specific service, run:
$ {command} --service myService v1 v2
To delete multiple named versions across all services, run:
$ {command} v1 v2
""",
}
@staticmethod
def Args(parser):
parser.add_argument('versions', nargs='+', help=(
'The versions to delete (optionally filtered by the --service flag).'))
parser.add_argument('--service', '-s',
help=('If specified, only delete versions from the '
'given service.'))
def Run(self, args):
client = appengine_api_client.GetApiClient()
services = client.ListServices()
all_versions = client.ListVersions(services)
# Sort versions to make behavior deterministic enough for unit testing.
versions = sorted(version_util.GetMatchingVersions(all_versions,
args.versions,
args.service))
services_to_delete = []
for service in sorted(services):
if (len([v for v in all_versions if v.service == service.id]) ==
len([v for v in versions if v.service == service.id])):
services_to_delete.append(service)
for version in copy.copy(versions):
if version.service == service.id:
versions.remove(version)
for version in versions:
if version.traffic_split:
# TODO(user): mention `migrate` once it's implemented.
# TODO(b/32869800): collect info on all versions before raising.
raise VersionsDeleteError(
'Version [{version}] is currently serving {allocation:.2f}% of '
'traffic for service [{service}].\n\n'
'Please move all traffic away by deploying a new version with the'
'`--promote` argument or running `gcloud app services '
'set-traffic`.'.format(
version=version.id,
allocation=version.traffic_split * 100,
service=version.service))
if services_to_delete:
word = text.Pluralize(len(services_to_delete), 'service')
log.warn('Requested deletion of all existing versions for the following '
'{0}:'.format(word))
resource_printer.Print(services_to_delete, 'list', out=log.status)
console_io.PromptContinue(prompt_string=(
'\nYou cannot delete all versions of a service. Would you like to '
'delete the entire {0} instead?').format(word), cancel_on_no=True)
service_util.DeleteServices(client, services_to_delete)
if versions:
fmt = 'list[title="Deleting the following versions:"]'
resource_printer.Print(versions, fmt, out=log.status)
console_io.PromptContinue(cancel_on_no=True)
else:
if not services_to_delete:
log.warn('No matching versions found.')
version_util.DeleteVersions(client, versions)
|
[
"[email protected]"
] | |
e1ea4c169eac6a692d0243c2fe8e607a7bc281e2
|
2c74bb301f1ed83b79254944183ac5a18a639fdf
|
/tests/components/github/test_diagnostics.py
|
80dfaec24459735e6cd3e4ebee2a1a78979dbbc2
|
[
"Apache-2.0"
] |
permissive
|
Adminiuga/home-assistant
|
5bec93007ddac1a268cc359bf7e48530c5f73b38
|
dcf68d768e4f628d038f1fdd6e40bad713fbc222
|
refs/heads/dev
| 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 |
Apache-2.0
| 2023-02-22T06:14:31 | 2018-03-05T14:11:09 |
Python
|
UTF-8
|
Python
| false | false | 2,437 |
py
|
"""Test GitHub diagnostics."""
import json
from aiogithubapi import GitHubException
from aiohttp import ClientSession
from homeassistant.components.github.const import CONF_REPOSITORIES, DOMAIN
from homeassistant.core import HomeAssistant
from .common import setup_github_integration
from tests.common import MockConfigEntry, load_fixture
from tests.components.diagnostics import get_diagnostics_for_config_entry
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_entry_diagnostics(
hass: HomeAssistant,
hass_client: ClientSession,
mock_config_entry: MockConfigEntry,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test config entry diagnostics."""
mock_config_entry.options = {CONF_REPOSITORIES: ["home-assistant/core"]}
response_json = json.loads(load_fixture("graphql.json", DOMAIN))
response_json["data"]["repository"]["full_name"] = "home-assistant/core"
aioclient_mock.post(
"https://api.github.com/graphql",
json=response_json,
headers=json.loads(load_fixture("base_headers.json", DOMAIN)),
)
aioclient_mock.get(
"https://api.github.com/rate_limit",
json={"resources": {"core": {"remaining": 100, "limit": 100}}},
headers={"Content-Type": "application/json"},
)
await setup_github_integration(hass, mock_config_entry, aioclient_mock)
result = await get_diagnostics_for_config_entry(
hass,
hass_client,
mock_config_entry,
)
assert result["options"]["repositories"] == ["home-assistant/core"]
assert result["rate_limit"] == {
"resources": {"core": {"remaining": 100, "limit": 100}}
}
assert (
result["repositories"]["home-assistant/core"]["full_name"]
== "home-assistant/core"
)
async def test_entry_diagnostics_exception(
hass: HomeAssistant,
hass_client: ClientSession,
init_integration: MockConfigEntry,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test config entry diagnostics with exception for ratelimit."""
aioclient_mock.get(
"https://api.github.com/rate_limit",
exc=GitHubException("error"),
)
result = await get_diagnostics_for_config_entry(
hass,
hass_client,
init_integration,
)
assert (
result["rate_limit"]["error"]
== "Unexpected exception for 'https://api.github.com/rate_limit' with - error"
)
|
[
"[email protected]"
] | |
30cc1d1fc50d0f446d0341344fbc5cfd52d78242
|
9df89a1652d183d8fc654acd728f9a578d6d1912
|
/cli/psym/graphql/query/customers.py
|
cc9b41c460503454a4b358260df7649396259444
|
[
"BSD-3-Clause"
] |
permissive
|
duranrojasm/symphony
|
b37d54a134e29093edacb80442e204fc71a37fbe
|
55b3d0c20b669374303bafb10e9c96c734647c9c
|
refs/heads/main
| 2023-08-24T02:00:33.433220 | 2021-10-28T20:35:23 | 2021-10-28T20:35:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,209 |
py
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field as _field
from ...config import custom_scalars, datetime
from gql_client.runtime.variables import encode_variables
from gql import gql, Client
from gql.transport.exceptions import TransportQueryError
from functools import partial
from numbers import Number
from typing import Any, AsyncGenerator, Dict, List, Generator, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin, config
from ..fragment.customer import CustomerFragment, QUERY as CustomerFragmentQuery
# fmt: off
QUERY: List[str] = CustomerFragmentQuery + ["""
query CustomersQuery {
customers {
edges {
node {
...CustomerFragment
}
}
}
}
"""
]
class CustomersQuery:
@dataclass(frozen=True)
class CustomersQueryData(DataClassJsonMixin):
@dataclass(frozen=True)
class CustomerConnection(DataClassJsonMixin):
@dataclass(frozen=True)
class CustomerEdge(DataClassJsonMixin):
@dataclass(frozen=True)
class Customer(CustomerFragment):
pass
node: Optional[Customer]
edges: List[CustomerEdge]
customers: Optional[CustomerConnection]
# fmt: off
@classmethod
def execute(cls, client: Client) -> Optional[CustomersQueryData.CustomerConnection]:
variables: Dict[str, Any] = {}
new_variables = encode_variables(variables, custom_scalars)
response_text = client.execute(
gql("".join(set(QUERY))), variable_values=new_variables
)
res = cls.CustomersQueryData.from_dict(response_text)
return res.customers
# fmt: off
@classmethod
async def execute_async(cls, client: Client) -> Optional[CustomersQueryData.CustomerConnection]:
variables: Dict[str, Any] = {}
new_variables = encode_variables(variables, custom_scalars)
response_text = await client.execute_async(
gql("".join(set(QUERY))), variable_values=new_variables
)
res = cls.CustomersQueryData.from_dict(response_text)
return res.customers
|
[
"[email protected]"
] | |
faf45b629da2c9b6f878c086d6691fdf8be9c9f5
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/15/31/5.py
|
0b76face2b61578a0a63ae7ae2bee12b06fe88cd
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 745 |
py
|
import os
import sys
from collections import defaultdict
problem_id = 'A'
sys.setrecursionlimit(10**9)
input_path = '%s.in' % problem_id
output_path = '%s.out' % problem_id
def read_line():
line = ''
while len(line) == 0:
line = input_file.readline().strip()
return line
def write_line(line):
print line
return output_file.write(line + os.linesep)
def solve():
r, c, w = map(int, read_line().split(' '))
nc = (c / w) * r + (w - 1)
if c % w:
nc += 1
return '%s' % nc
input_file = open(input_path, "r")
output_file = open(output_path, "w+")
T = int(read_line())
for case_id in xrange(1, T + 1):
write_line("Case #%d: %s" % (case_id, solve()))
input_file.close()
output_file.close()
|
[
"[email protected]"
] | |
7d0b9e321fad687717ba261f712748cb57d968a3
|
7848ded2f7b1cf5cc33380d739e0ceee5718ffec
|
/imrunicorn/activity_log/migrations/0006_auto_20210218_0756.py
|
73aa939743b218d1fe05de35fdd5684fce3b3c7e
|
[] |
no_license
|
benspelledabc/djangosite
|
cbed1a7da3eb6ba6eee05897ec928b350831fc6b
|
fa8004b20f790f56fc69e9d158128a867be700f3
|
refs/heads/master
| 2023-04-17T19:24:48.908640 | 2021-05-02T19:05:38 | 2021-05-02T19:05:38 | 294,891,690 | 1 | 1 | null | 2021-05-02T19:05:38 | 2020-09-12T07:16:11 |
Python
|
UTF-8
|
Python
| false | false | 474 |
py
|
# Generated by Django 3.0.7 on 2021-02-18 12:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('activity_log', '0005_activity_sfw'),
]
operations = [
migrations.AlterModelOptions(
name='activityphotovalidation',
options={'ordering': ('-activity_log', 'id'), 'verbose_name': 'Activity Photo Validation', 'verbose_name_plural': 'Activity Photo Validations'},
),
]
|
[
"[email protected]"
] | |
88163ffa4c39f9c08b7cefc81c2eb7c2b7c7bed4
|
f146cef3f2172275c8d7f526dab92951fa50eb2c
|
/COURSE/group project -week9/backup -backend day3/backend/app/users/views.py
|
d0da6f0f9c4c96335aafbf7f3314c9c3e1305e26
|
[] |
no_license
|
mehranj73/Bootcamp
|
fed04d3858d6d0bc1cdad94e1f05bd4f7a47c0ec
|
bd575cd02329ad1ce21b05350380dfbf17cbdd89
|
refs/heads/master
| 2023-02-09T06:50:00.590751 | 2019-08-22T18:56:02 | 2019-08-22T18:56:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,413 |
py
|
from django.contrib.auth.models import User
from rest_framework import filters
from rest_framework.generics import RetrieveAPIView, ListCreateAPIView, ListAPIView, UpdateAPIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from app.permissions import IsOwnerOrReadOnly
from .serializers import MyProfileSerializer, UserSerializer, UserProfileSerializer, MyUserSerializer
from .models import UserProfile
#GET my profile
# URL 'me/'
class GetMyProfile(RetrieveAPIView):
# allow this action only to the user who owns the profile or to admin
#permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly,)
queryset = UserProfile.objects.all()
serializer_class = MyProfileSerializer
def get(self, request, *args, **kwargs):
user = self.request.user
me = user.user_profile
serializer = self.get_serializer(me)
return Response(serializer.data)
#GET: to get all users
# URL 'list/'
class GenericGetUsersView(ListCreateAPIView):
# queryset = User.objects.all()
serializer_class = UserSerializer
def get_queryset(self):
return User.objects.all()
#GET userprofile by user ID
# URL <int:pk>
class GetUserProfileById(RetrieveAPIView):
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
lookup_url_kwarg = 'pk'
#POST: update user profile - userprofile model part (in front end to be united in same page with "update user profile-user model part)
#URL 'me/update/user-profile/'
class UpdateUserProfileView(UpdateAPIView):
serializer_class = MyProfileSerializer
queryset = UserProfile.objects.all()
permission_classes = [
IsAuthenticated,
IsOwnerOrReadOnly,
]
def update(self, request, *args, **kwargs):
user = self.request.user
serializer = MyProfileSerializer(instance=user.user_profile, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response( "User profile updated.", status=200)
else:
return Response( "Unable to perform request. Please try again later.", status=400)
#POST: update user profile - user model part (in front end to be united in same page with "update user profile-userprofile model part)
#URL 'me/update/user-profile/'
class UpdateUserProfileViewMyUser(UpdateAPIView):
serializer_class = MyProfileSerializer
queryset = User.objects.all()
permission_classes = [
IsAuthenticated,
IsOwnerOrReadOnly,
]
def update(self, request, *args, **kwargs):
user = self.request.user
serializer = MyUserSerializer(instance=user, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response( "User profile updated.", status=200)
else:
return Response( "Unable to perform request. Please try again later.", status=400)
#GET: to search by username or first name or last name
class SearchUsers(ListAPIView):
"""
GET: Search users
in Postman add in Params key: search, value: string
"""
serializer_class = UserSerializer
queryset = User.objects.all()
filter_backends = (filters.SearchFilter,)
search_fields = ('username', 'first_name', 'last_name')
|
[
"[email protected]"
] | |
f027e2fef6d80f6cee29c3c460427d5ff4690d31
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_073/ch18_2020_03_09_13_23_07_056737.py
|
5580c305acffebf7d622cc6890b83f53b3de7ef7
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
def verifica_idade(idade):
if idade>=21:
return 'liberado EUA e BRASILl'
if idad>=1 and idade<18:
return 'Não está liberado'
else:
return 'esta liberado BRASIL'
|
[
"[email protected]"
] | |
14652fb38016928ddefc74fa43e0a8c3e8ada405
|
63b0fed007d152fe5e96640b844081c07ca20a11
|
/ABC/ABC200~ABC299/ABC245/d2.py
|
39575b7b194f9b94d10060fb30a0af67e9572081
|
[] |
no_license
|
Nikkuniku/AtcoderProgramming
|
8ff54541c8e65d0c93ce42f3a98aec061adf2f05
|
fbaf7b40084c52e35c803b6b03346f2a06fb5367
|
refs/heads/master
| 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 538 |
py
|
def polydiv(xs, ys):
xn = len(xs)
yn = len(ys)
zs = xs.copy()
qs = []
for _ in range(xn - yn + 1):
temp = zs[0] // ys[0]
for i in range(yn):
zs[i] -= temp * ys[i]
qs.append(temp)
zs = zs[1:]
if qs == []: qs = [0.]
return qs
n,m=map(int,input().split())
a=list(map(int,input().split()))
c=list(map(int,input().split()))
a=list(reversed(a))
c=list(reversed(c))
ans=[]
p=polydiv(c,a)
for i in range(len(p)):
ans.append(int(p[i]))
ans=list(reversed(ans))
print(*ans)
|
[
"[email protected]"
] | |
843d8fb2fb90c80110e6a1f94182e4440e561463
|
7a07d957316172fe78b341c6f5215df2ccdb24f6
|
/assignment/EasyAI_all_program.py
|
c6fbae635760a88672fcd1070d47c597c1a75d57
|
[] |
no_license
|
chandraprakashh/Python_with_AI
|
87ff4655c44eef9d0459cf0f2ceedabde88b0f1f
|
6d76eeea94e0cb7402330a2beea1fc4a7ab73e29
|
refs/heads/master
| 2020-07-18T18:18:06.463302 | 2019-12-11T08:20:12 | 2019-12-11T08:20:12 | 206,291,055 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,008 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 13 22:50:57 2019
@author: user
"""
# Code => 1
from easyAI import TwoPlayersGame, Human_Player, AI_Player, Negamax
class GameOfBones( TwoPlayersGame ):
def __init__(self, players):
self.players = players
self.pile = 20
self.nplayer = 1
def possible_moves(self): return ['1','2','3']
def make_move(self,move): self.pile -= int(move)
def win(self): return self.pile<=0
def is_over(self): return self.win()
def show(self): print ("%d bones left in the pile" % self.pile)
def scoring(self): return 100 if game.win() else 0
ai = Negamax(13)
game = GameOfBones( [ Human_Player(), AI_Player(ai) ] )
history = game.play()
# Code => 2
from easyAI import TwoPlayersGame, AI_Player, Negamax
from easyAI.Player import Human_Player
class GameController(TwoPlayersGame):
def __init__(self, players):
self.players = players
self.nplayer = 1
self.board = [0] * 9
def possible_moves(self):
return [a + 1 for a, b in enumerate(self.board) if b == 0]
def make_move(self, move):
self.board[int(move) - 1] = self.nplayer
def loss_condition(self):
possible_combinations = [[1,2,3], [4,5,6], [7,8,9],
[1,4,7], [2,5,8], [3,6,9], [1,5,9], [3,5,7]]
return any([all([(self.board[i-1] == self.nopponent)
for i in combination]) for combination in possible_combinations])
def is_over(self):
return (self.possible_moves() == []) or self.loss_condition()
def show(self):
print('\n'+'\n'.join([' '.join([['. ', 'O', 'X'][self.board[3*j + i]]
for i in range(3)]) for j in range(3)]))
def scoring(self):
return -100 if self.loss_condition() else 0
if __name__ == "__main__":
algorithm = Negamax(7)
GameController([Human_Player(), AI_Player(algorithm)]).play()
|
[
"[email protected]"
] | |
7678c21d2e011e118d23455f36514f5d73e162d6
|
8454441f899c3beb9fcea26cffc2f4c3cf75ff6a
|
/common/code/snippets/py/flask-get-header.py
|
a040c637e90ee07be18f7cd6ed97246a58f26c1e
|
[
"MIT"
] |
permissive
|
nevesnunes/env
|
4a837e8fcf4a6a597992103e0a0c3d0db93e1c78
|
f2cd7d884d46275a2fcb206eeeac5a8e176b12af
|
refs/heads/master
| 2023-08-22T15:49:35.897161 | 2023-08-15T13:51:08 | 2023-08-15T13:51:08 | 199,400,869 | 9 | 6 |
MIT
| 2023-06-22T10:59:51 | 2019-07-29T07:24:47 |
Python
|
UTF-8
|
Python
| false | false | 630 |
py
|
#!/usr/bin/env python3
from flask import Flask, request
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello!"
@app.route("/<path:text>", methods=["GET", "POST"])
def echo(text):
return f"You said (len = {len(text)}): {bytes(text, 'latin-1')}"
@app.after_request
def after(response):
red_foo = b"\x1b\x5b\x33\x31\x6d\x66\x6f\x6f\x1b\x28\x42\x1b\x5b\x6d"
response.headers["X-Foo"] = red_foo
response.headers["X-Bar"] = "".join(
[chr(x) if x not in (ord("\r"), ord("\n")) else "" for x in range(0, 255)]
)
return response
if __name__ == "__main__":
app.run(port=18123)
|
[
"[email protected]"
] | |
2ddaa2d8860b7299c64a636af17c11fbc5ebfa46
|
c04acaa6ee9c6a7c365e217bc78039fa9c77833e
|
/cuzquena/urls.py
|
785b7ed1280475deaaa389f28b11b64b4deafb40
|
[] |
no_license
|
danielhuamani/django-la-cuzquena
|
0386800d640b224d94b0fac2d83f999b60d7da85
|
a6f4aaf44775b27328d073a65f1d0f50eff51fad
|
refs/heads/master
| 2020-12-05T04:51:01.077860 | 2016-09-17T13:56:58 | 2016-09-17T13:56:58 | 67,900,351 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,190 |
py
|
"""cconline URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from filebrowser.sites import site
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^summernote/', include('django_summernote.urls')),
url(r'^admin/filebrowser/', include(site.urls)),
url(r'', include('my_apps.web.urls', namespace='web')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
8349477f2dc38370be2a6048b4ca40ce366e75e2
|
f3a4b4c7c39d2ed2959b410367e8abc66493772e
|
/laplacianFlux/r2_1_0/__init__.py
|
c64bf8efa3593dcacfa71e4abd9edc4f9e87754b
|
[] |
no_license
|
asimurzin/laplacianFlux
|
6800bc5aba29968f7784ce91a5a1503318fad246
|
83977d5ce967b87ed0203a143d19d88c9a5d7ed7
|
refs/heads/master
| 2020-03-29T20:22:44.143734 | 2012-07-01T19:36:36 | 2012-07-01T19:36:36 | 1,613,806 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,376 |
py
|
#!/usr/bin/env python
#--------------------------------------------------------------------------------------
## pythonFlu - Python wrapping for OpenFOAM C++ API
## Copyright (C) 2010- Alexey Petrov
## Copyright (C) 2009-2010 Pebble Bed Modular Reactor (Pty) Limited (PBMR)
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## See http://sourceforge.net/projects/pythonflu
##
## Author : Alexey PETROV
##
#----------------------------------------------------------------------------
from Foam import ref, man
#----------------------------------------------------------------------------
def _createFields( runTime, mesh ):
ref.ext_Info() << "Reading field T\n" << ref.nl
T = man.volScalarField( man.IOobject( ref.word( "T" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.MUST_READ,
ref.IOobject.AUTO_WRITE ),
mesh )
ref.ext_Info() << "Reading transportProperties\n" << ref.nl
transportProperties = man.IOdictionary( man.IOobject( ref.word( "transportProperties" ),
ref.fileName( runTime.constant() ),
mesh,
ref.IOobject.MUST_READ,
ref.IOobject.NO_WRITE ) )
ref.ext_Info() << "Reading diffusivity DT\n" << ref.nl
DT = ref.dimensionedScalar( transportProperties.lookup( ref.word( "DT" ) ) )
return T, transportProperties, DT
#--------------------------------------------------------------------------------------
def write( runTime, mesh, T ):
if runTime.outputTime():
gradT = ref.fvc.grad(T)
gradTx = ref.volScalarField( ref.IOobject( ref.word( "gradTx" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.NO_READ,
ref.IOobject.AUTO_WRITE ),
gradT.component( ref.vector.X ) )
gradTy = ref.volScalarField( ref.IOobject( ref.word( "gradTy" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.NO_READ,
ref.IOobject.AUTO_WRITE ),
gradT.component( ref.vector.Y ) )
gradTz = ref.volScalarField( ref.IOobject( ref.word( "gradTz" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.NO_READ,
ref.IOobject.AUTO_WRITE ),
gradT.component( ref.vector.Z ) )
runTime.write()
pass
#--------------------------------------------------------------------------------------
def main_standalone( argc, argv ):
args = ref.setRootCase( argc, argv )
runTime = man.createTime( args )
mesh = man.createMesh( runTime )
T, transportProperties, DT = _createFields( runTime, mesh )
simple = man.simpleControl( mesh )
ref.ext_Info() << "\nCalculating temperature distribution\n" << ref.nl
while runTime.loop() :
ref.ext_Info() << "Time = " << runTime.timeName() << ref.nl << ref.nl
while simple.correctNonOrthogonal():
ref.solve( ref.fvm.ddt( T ) - ref.fvm.laplacian( DT, T ) )
pass
write( runTime, mesh, T )
ref.ext_Info() << "ExecutionTime = " << runTime.elapsedCpuTime() << " s" << \
" ClockTime = " << runTime.elapsedClockTime() << " s" << ref.nl << ref.nl
pass
ref.ext_Info() << "End\n" << ref.nl
import os
return os.EX_OK
#--------------------------------------------------------------------------------------
import sys, os
from Foam import FOAM_VERSION
if FOAM_VERSION( ">=", "020100" ):
if __name__ == "__main__" :
argv = sys.argv
os._exit( main_standalone( len( argv ), argv ) )
pass
else:
from Foam.OpenFOAM import ext_Info
ref.ext_Info()<< "\nTo use this solver, It is necessary to SWIG OpenFoam2.1.0 or higher \n "
pass
#--------------------------------------------------------------------------------------
|
[
"[email protected]"
] | |
178ebfab22130821e12bb8c9157a0436f54acf48
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/109/usersdata/172/63370/submittedfiles/av2_p3_civil.py
|
4c68528d682769ee8dc9310c3e74e069e24ca4aa
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 498 |
py
|
# -*- coding: utf-8 -*-
import numpy as np
def somal(l,i):
soma=0
for j in range(0,l.shape[1],1):
soma=soma+l[i,j]
return (soma)
def somac(l,j):
soma=0
for i in range(0,l.shape[0],1):
soma=soma+l[i,j]
return (soma)
n=int(input('Tamanho: '))
g=int(input('Pl: '))
h=int(input('Pc: '))
l=np.zeros((n,n))
for i in range(0,l.shape[0],1):
for j in range(0,l.shape[1],1):
l[i,j]= int(input(' peso: '))
fim=somal(l,g)+somac(l,h)-(2*(l[g,h]))
print(fim)
|
[
"[email protected]"
] | |
6c6be5bb613ab1ba748008cf64ecb99a72b2ea86
|
814fd0bea5bc063a4e34ebdd0a5597c9ff67532b
|
/build/android/pylib/utils/mock_calls_test.py
|
1b474afd1ea1707910b1716170ec0f65c1c87e17
|
[
"BSD-3-Clause"
] |
permissive
|
rzr/chromium-crosswalk
|
1b22208ff556d69c009ad292bc17dca3fe15c493
|
d391344809adf7b4f39764ac0e15c378169b805f
|
refs/heads/master
| 2021-01-21T09:11:07.316526 | 2015-02-16T11:52:21 | 2015-02-16T11:52:21 | 38,887,985 | 0 | 0 |
NOASSERTION
| 2019-08-07T21:59:20 | 2015-07-10T15:35:50 |
C++
|
UTF-8
|
Python
| false | false | 5,078 |
py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of mock_calls.py.
"""
import logging
import os
import sys
import unittest
from pylib import constants
from pylib.utils import mock_calls
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
class _DummyAdb(object):
def __str__(self):
return '0123456789abcdef'
def Push(self, host_path, device_path):
logging.debug('(device %s) pushing %r to %r', self, host_path, device_path)
def IsOnline(self):
logging.debug('(device %s) checking device online', self)
return True
def Shell(self, cmd):
logging.debug('(device %s) running command %r', self, cmd)
return "nice output\n"
def Reboot(self):
logging.debug('(device %s) rebooted!', self)
class TestCaseWithAssertCallsTest(mock_calls.TestCase):
def setUp(self):
self.adb = _DummyAdb()
def ShellError(self):
def action(cmd):
raise ValueError('(device %s) command %r is not nice' % (self.adb, cmd))
return action
def get_answer(self):
logging.debug("called 'get_answer' of %r object", self)
return 42
def echo(self, thing):
logging.debug("called 'echo' of %r object", self)
return thing
def testCallTarget_succeds(self):
self.assertEquals(self.adb.Shell,
self.call_target(self.call.adb.Shell))
def testCallTarget_failsExternal(self):
with self.assertRaises(ValueError):
self.call_target(mock.call.sys.getcwd)
def testCallTarget_failsUnknownAttribute(self):
with self.assertRaises(AttributeError):
self.call_target(self.call.adb.Run)
def testCallTarget_failsIntermediateCalls(self):
with self.assertRaises(AttributeError):
self.call_target(self.call.adb.RunShell('cmd').append)
def testPatchCall_method(self):
self.assertEquals(42, self.get_answer())
with self.patch_call(self.call.get_answer, return_value=123):
self.assertEquals(123, self.get_answer())
self.assertEquals(42, self.get_answer())
def testPatchCall_attribute_method(self):
with self.patch_call(self.call.adb.Shell, return_value='hello'):
self.assertEquals('hello', self.adb.Shell('echo hello'))
def testPatchCall_global(self):
with self.patch_call(mock.call.os.getcwd, return_value='/some/path'):
self.assertEquals('/some/path', os.getcwd())
def testPatchCall_withSideEffect(self):
with self.patch_call(self.call.adb.Shell, side_effect=ValueError):
with self.assertRaises(ValueError):
self.adb.Shell('echo hello')
def testAssertCalls_succeeds_simple(self):
self.assertEquals(42, self.get_answer())
with self.assertCall(self.call.get_answer(), 123):
self.assertEquals(123, self.get_answer())
self.assertEquals(42, self.get_answer())
def testAssertCalls_succeeds_multiple(self):
with self.assertCalls(
(mock.call.os.getcwd(), '/some/path'),
(self.call.echo('hello'), 'hello'),
(self.call.get_answer(), 11),
self.call.adb.Push('this_file', 'that_file'),
(self.call.get_answer(), 12)):
self.assertEquals(os.getcwd(), '/some/path')
self.assertEquals('hello', self.echo('hello'))
self.assertEquals(11, self.get_answer())
self.adb.Push('this_file', 'that_file')
self.assertEquals(12, self.get_answer())
def testAsserCalls_succeeds_withAction(self):
with self.assertCall(
self.call.adb.Shell('echo hello'), self.ShellError()):
with self.assertRaises(ValueError):
self.adb.Shell('echo hello')
def testAssertCalls_fails_tooManyCalls(self):
with self.assertRaises(AssertionError):
with self.assertCalls(self.call.adb.IsOnline()):
self.adb.IsOnline()
self.adb.IsOnline()
def testAssertCalls_fails_tooFewCalls(self):
with self.assertRaises(AssertionError):
with self.assertCalls(self.call.adb.IsOnline()):
pass
def testAssertCalls_succeeds_extraCalls(self):
# we are not watching Reboot, so the assertion succeeds
with self.assertCalls(self.call.adb.IsOnline()):
self.adb.IsOnline()
self.adb.Reboot()
def testAssertCalls_fails_extraCalls(self):
self.watchCalls([self.call.adb.Reboot])
# this time we are also watching Reboot, so the assertion fails
with self.assertRaises(AssertionError):
with self.assertCalls(self.call.adb.IsOnline()):
self.adb.IsOnline()
self.adb.Reboot()
def testAssertCalls_succeeds_NoCalls(self):
self.watchMethodCalls(self.call.adb) # we are watching all adb methods
with self.assertCalls():
pass
def testAssertCalls_fails_NoCalls(self):
self.watchMethodCalls(self.call.adb)
with self.assertRaises(AssertionError):
with self.assertCalls():
self.adb.IsOnline()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
[
"[email protected]"
] | |
4a5f20033b2ce926b8c120facc7b1de246135d9c
|
c47e274f6af4d08bff65e360fb8a11b163dc34b2
|
/common/global_constants.py
|
7e184ce065f2d0ce801d87ae0ab50fb3d1e9079c
|
[
"BSD-3-Clause"
] |
permissive
|
nozberkaryaindonesia/ReadableWebProxy
|
6b66994c574dc0a70767397403c04f97bf2d07f0
|
82d14d8dfb23ef135a16f88274c14c7acc1162a5
|
refs/heads/master
| 2022-05-21T20:06:03.707617 | 2017-09-24T09:54:23 | 2017-09-24T09:54:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,460 |
py
|
GLOBAL_BAD_URLS = [
'//mail.google.com',
'/comments/feed/',
'/embed?',
'/osd.xml',
'/page/page/',
'/wp-json/',
'/wp-login.php',
'/xmlrpc.php',
'?openidserver=1',
'a.wikia-beacon.com',
'accounts.google.com',
'add.my.yahoo.com',
'addtoany.com',
'b.scorecardresearch.com',
'delicious.com',
'digg.com',
'edit.yahoo.com',
'facebook.com',
'fbcdn-',
'feeds.wordpress.com',
'gprofiles.js',
'javascript:void',
'netvibes.com',
'newsgator.com',
'paypal.com',
'pixel.wp.com',
'public-api.wordpress.com',
'r-login.wordpress.com',
'reddit.com',
'stumbleupon.com',
'technorati.com',
'topwebfiction.com',
'twitter.com',
'twitter.com/intent/',
'wretch.cc',
'ws-na.amazon-adsystem.com',
'www.addtoany.com'
'www.pinterest.com/pin/',
'www.wattpad.com/login?',
'www.tumblr.com/reblog/',
'www.paypalobjects.com',
# Tumblr can seriously go fuck itself with a rusty stake
'tumblr.com/widgets/',
'www.tumblr.com/login',
'://tumblr.com',
'&share=tumblr',
'/wp-content/plugins/',
'/wp-content/themes/',
'/wp-json/oembed/',
# At least one site (booksie) is serving the favicon with a mime-type
# of "text/plain", which then confuses the absolute crap out of the
# mime-type dispatcher.
# Since I'm not re-serving favicons anyways, just do not fetch them ever.
'favicon.ico',
# Try to not scrape inline images
';base64,',
"www.fashionmodeldirectory.com",
"www.watchingprivatepractice.com",
"Ebonyimages.jupiterimages.com",
# More garbage issues.
'"https',
'#comment-',
'/oembed/1.0/',
'&share=',
'replytocom=',
'?feed=rss2&page_id',
'?share=tumblr',
'?share=facebook',
'chasingadreamtranslations.com/?fp=',
# NFI where /this/ came from
'www.miforcampuspolice.com',
'tracking.feedpress.it',
'www.quantcast.com',
'mailto:',
'javascript:popupWindow(',
'en.blog.wordpress.com',
'counter.yadro.ru',
'/js/js/',
'/css/css/',
'/images/images/',
'ref=dp_brlad_entry',
'https:/www.',
'tumblr.com/oembed/1.0?',
]
GLOBAL_DECOMPOSE_BEFORE = [
{'name' : 'likes-master'}, # Bullshit sharing widgets
{'id' : 'jp-post-flair'},
{'class' : 'post-share-buttons'},
#{'class' : 'commentlist'}, # Scrub out the comments so we don't try to fetch links from them
#{'class' : 'comments'},
#{'id' : 'comments'},
]
GLOBAL_DECOMPOSE_AFTER = []
RSS_SKIP_FILTER = [
"www.baka-tsuki.org",
"re-monster.wikia.com",
'inmydaydreams.com',
'www.fanfiction.net',
'www.booksie.com',
'www.booksiesilk.com',
'www.fictionpress.com',
'storiesonline.net',
'www.fictionmania.tv',
'www.bestories.net',
'www.tgstorytime.com',
'www.nifty.org',
'www.literotica.com',
'pokegirls.org',
'www.asstr.org',
'www.mcstories.com',
'www.novelupdates.com',
'40pics.com',
'#comment-',
'?showComment=',
]
RSS_TITLE_FILTER = [
"by: ",
"comments on: ",
"comment on: ",
"comment on ",
]
# Goooooo FUCK YOURSELF
GLOBAL_INLINE_BULLSHIT = [
"This translation is property of Infinite Novel Translations.",
"This translation is property of Infinite NovelTranslations.",
"If you read this anywhere but at Infinite Novel Translations, you are reading a stolen translation.",
"<Blank>",
"<space>",
"<Blank>",
"<Blank>",
"please read only translator’s websitewww.novitranslation.com",
"please read only translator’s website www.novitranslation.com",
"Please do not host elsewhere but MBC and Yumeabyss",
'Original and most updated translations are from volaretranslations.',
'Please support the translator for Wild Consort by reading on volarenovels!',
'Original and most updated translations are from volaretranslations.',
'Original and most updated translations are from volaretranslations.',
"<StarveCleric>",
'(trytranslations.com at your service!)',
'Please do not host elsewhere but volare and Yumeabyss',
'[Follow the latest chapter at wuxiadream.com]',
'I slid my penis inside her. She squirmed a bit but YOU SICK FUCK STOP STEALING MY TRANSLATIONS', # siiiiigh
'I kissed her sweet anus once more before leaving', # siiiiiiiiiiiiigh
'(Watermark: read this translation only at shinku. xiaoxiaonovels.com)',
"<TLN: If you're reading this novel at any other site than Sousetsuka.com you might be reading an unedited, uncorrected version of the novel.>",
'Original and most updated translations are from volare. If read elsewhere, this chapter has been stolen. Please stop supporting theft.',
'*******If you are reading this on a place other than rinkagetranslation.com, this chapter has been stolen and is neither the most recent or complete chapter.*******',
'*******Read the chapters at rinkagetranslation.com. The chapters for this series will NOT be posted anywhere else other than on that site itself. If you are reading this from somewhere else then this is chapter has been stolen.*******',
'If you are reading this on a place other than rinkagetranslation.com, this chapter has been stolen and is neither the most recent or complete chapter.',
"Read The Lazy Swordmaster first on Lightnovelbastion.com (If you're reading this elsewhere, it has been stolen)",
"Read The Lazy Swordmaster on Lightnovelbastion.com",
"Property of © Fantasy-Books.live; outside of it, it is stolen.",
]
|
[
"[email protected]"
] | |
a43033cd1083b62dfa20f3914123e00835219987
|
c5a004f26bf249f888be3849114dd35dbd24cb24
|
/python/evalrescallers/tests/ten_k_validation_data_test.py
|
c9f2b7b50349d5124180fb1dad48982f96e4202e
|
[
"MIT"
] |
permissive
|
wangdi2014/tb-amr-benchmarking
|
f7cf331608cfe7b9cc8995906d991573323dc87a
|
276f4f7f30639dacc62b3e8e395b2d2ce8675089
|
refs/heads/master
| 2022-03-10T00:41:07.364006 | 2019-11-08T09:37:23 | 2019-11-08T09:37:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,515 |
py
|
import os
import unittest
from evalrescallers import ten_k_validation_data
modules_dir = os.path.dirname(os.path.abspath(ten_k_validation_data.__file__))
data_dir = os.path.join(modules_dir, 'tests', 'data', 'ten_k_validation_data')
class TestTenKValidationData(unittest.TestCase):
def test_load_sample_to_res_file(self):
'''test load_sample_to_res_file'''
expected_drugs = {'Isoniazid', 'Rifampicin', 'Ethambutol', 'Pyrazinamide'}
expected_data = {
'ena1': {'Isoniazid': 'n/a', 'Rifampicin': 'S', 'Ethambutol': 'R', 'Pyrazinamide': 'S'},
'ena2': {'Isoniazid': 'S', 'Rifampicin': 'U', 'Ethambutol': 'S', 'Pyrazinamide': 'S'},
}
infile = os.path.join(data_dir, 'load_sample_to_res_file.tsv')
got_drugs, got_data = ten_k_validation_data.load_sample_to_res_file(infile)
self.assertEqual(expected_drugs, got_drugs)
self.assertEqual(expected_data, got_data)
def test_load_sources_file(self):
'''test load_sources_file'''
infile = os.path.join(data_dir, 'load_sources_file.tsv')
expect = {
'ena1': ('source1', 'country1'),
'ena2': ('source1', 'country1'),
'ena3': ('source1', 'country2'),
'ena4': ('source2', 'country1'),
'ena5': ('source2', 'country2'),
}
got = ten_k_validation_data.load_sources_file(infile)
self.assertEqual(expect, got)
def test_sources_file_to_country_counts(self):
'''test sources_file_to_country_counts'''
infile = os.path.join(data_dir, 'sources_file_to_country_counts.tsv')
expect = {
'Country1': {'validate': 3, 'test': 0},
'Country2': {'validate': 1, 'test': 0},
'Germany': {'validate': 0, 'test': 1},
'UK': {'validate': 1, 'test': 2},
}
got = ten_k_validation_data.sources_file_to_country_counts(infile)
self.assertEqual(expect, got)
def test_load_all_data(self):
'''test load_all_data'''
expected_drugs = {'Quinolones', 'Isoniazid', 'Rifampicin', 'Ethambutol', 'Pyrazinamide', 'Amikacin', 'Capreomycin', 'Ciprofloxacin', 'Cycloserine', 'Ethionamide', 'Kanamycin', 'Linezolid', 'Moxifloxacin', 'Ofloxacin', 'PAS', 'Rifabutin', 'Streptomycin'}
got_drugs, got_pheno_validation, got_pheno_test, got_predict = ten_k_validation_data.load_all_data()
self.assertEqual(expected_drugs, got_drugs)
_, expect_pheno = ten_k_validation_data.load_sample_to_res_file(os.path.join(ten_k_validation_data.data_dir, '10k_validation.phenotype.tsv'))
_, expect_predict = ten_k_validation_data.load_sample_to_res_file(os.path.join(ten_k_validation_data.data_dir, '10k_validation.prediction.tsv'))
_, expect_more_pheno = ten_k_validation_data.load_sample_to_res_file(os.path.join(ten_k_validation_data.data_dir, '10k_validation.extra_phenotypes.tsv'))
expect_samples = set(expect_pheno.keys()).union(set(expect_more_pheno.keys()))
got_samples = set(expect_pheno.keys())
self.assertEqual(expect_samples, got_samples)
for pheno_dict in got_pheno_validation, got_pheno_test:
for sample in pheno_dict:
for d in expect_pheno, expect_more_pheno:
if sample in d:
for k, v in d[sample].items():
self.assertEqual(v, pheno_dict[sample][k])
self.assertEqual(expect_predict, got_predict)
|
[
"[email protected]"
] | |
0129e5f8bb4ef9510bef37bfe7c32a58b45a1089
|
6ec8e4271968cae715babe05029931d2c11df754
|
/run.py
|
3381b4ca954744143adb1172231fafc792c96a42
|
[
"MIT"
] |
permissive
|
lllhhhqqq/SPIRAL-tensorflow
|
040efe8af0fd3bc4d5f5ce2ed5474e6d732763f5
|
05ddfdc20c73a61cde46594bd6b7b7a2e255a44b
|
refs/heads/master
| 2020-03-08T08:57:45.938448 | 2018-04-03T15:32:19 | 2018-04-03T15:32:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,371 |
py
|
# -*- coding: future_fstrings -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from six.moves import shlex_quote
import utils as ut
def new_cmd(session, name, cmd, load_path, shell):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(shlex_quote(str(v)) for v in cmd)
return name, "tmux send-keys -t {}:{} {} Enter".format(session, name, shlex_quote(cmd))
def create_commands(session, args, shell='bash'):
ut.train.prepare_dirs(args)
actual_args = ut.io.get_cmd(as_list=True)
actual_cmd = ' '.join(actual_args)
# for launching the TF workers and for launching tensorboard
base_cmd = [
'CUDA_VISIBLE_DEVICES=',
sys.executable, 'main.py',
'--load_path', args.load_path,
'--start_port', args.start_port,
'--num_gpu', ut.misc.count_gpu(),
] + actual_args
cmds_map = [new_cmd(session, "ps", base_cmd + ["--job_name", "ps"], args.load_path, shell)]
if args.loss == 'l2':
gpu_task_num = 1
elif args.loss == 'gan':
gpu_task_num = 2
for i in range(args.num_workers):
if i < gpu_task_num: # gpu workers
cmd = base_cmd[1:]
else:
cmd = base_cmd[:]
cmd += ["--job_name", "worker", "--task", str(i)]
cmds_map += [new_cmd(session, "w-%d" % i, cmd, args.load_path, shell)]
tmp_tb_dir = "/".join(sys.executable.split('/')[:-1])
tmp_tb_path = os.path.join(tmp_tb_dir, "tensorboard")
if os.path.exists(tmp_tb_path):
tb = tmp_tb_dir + "/tensorboard"
else:
tb = "tensorboard"
tb_args = [tb, "--logdir", args.log_dir, "--port", "12345"]
cmds_map += [new_cmd(session, "tb", tb_args, args.load_path, shell)]
cmds_map += [new_cmd(session, "htop", ["htop"], args.load_path, shell)]
windows = [v[0] for v in cmds_map]
notes = []
cmds = []
notes += ["Use `tmux attach -t {}` to watch process output".format(session)]
notes += ["Use `tmux kill-session -t {}` to kill the job".format(session)]
notes += ["Point your browser to http://localhost:12345 to see Tensorboard"]
cmds += [
# kill any process using tensorboard's port
f"kill $( lsof -i:{args.tb_port} -t ) > /dev/null 2>&1",
# kill any processes using ps / worker ports
f"kill $( lsof -i:{args.start_port}-{args.num_workers + args.start_port} -t ) > /dev/null 2>&1",
f"tmux kill-session -t {session}",
f"tmux new-session -s {session} -n {windows[0]} -d {shell}",
]
for w in windows[1:]:
cmds += ["tmux new-window -t {} -n {} {}".format(session, w, shell)]
cmds += ["sleep 1"]
for window, cmd in cmds_map:
cmds += [cmd]
return cmds, notes
def run(args):
cmds, notes = create_commands("spiral", args)
if args.dry_run:
print("Dry-run mode due to -n flag, otherwise the following commands would be executed:")
else:
print("Executing the following commands:")
print("\n".join(cmds))
print("")
if not args.dry_run:
os.environ["TMUX"] = ""
os.system("\n".join(cmds))
print('\n'.join(notes))
if __name__ == "__main__":
from config import get_args
args = get_args()
run(args)
|
[
"[email protected]"
] | |
7c602f029e3a124f40432e96b024c8300417ae5b
|
f4b5721c6b3f5623e306d0aa9a95ec53461c1f89
|
/backend/src/gloader/xml/dom/html/HTMLTableRowElement.py
|
e18280ba18ad8af52f593f29fbe8bf83d5cc6ac0
|
[
"Apache-1.1",
"MIT"
] |
permissive
|
citelab/gini5
|
b53e306eb5dabf98e9a7ded3802cf2c646f32914
|
d095076113c1e84c33f52ef46a3df1f8bc8ffa43
|
refs/heads/uml-rename
| 2022-12-10T15:58:49.578271 | 2021-12-09T23:58:01 | 2021-12-09T23:58:01 | 134,980,773 | 12 | 11 |
MIT
| 2022-12-08T05:20:58 | 2018-05-26T17:16:50 |
Python
|
UTF-8
|
Python
| false | false | 3,711 |
py
|
########################################################################
#
# File Name: HTMLTableRowElement.py
#
#
"""
WWW: http://4suite.com/4DOM e-mail: [email protected]
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import implementation
from xml.dom import IndexSizeErr
from xml.dom.html.HTMLElement import HTMLElement
class HTMLTableRowElement(HTMLElement):
def __init__(self, ownerDocument, nodeName='TR'):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_align(self):
return string.capitalize(self.getAttribute('ALIGN'))
def _set_align(self,align):
self.setAttribute('ALIGN', align)
def _get_bgColor(self):
return self.getAttribute('BGCOLOR')
def _set_bgColor(self, color):
self.setAttribute('BGCOLOR', color)
def _get_cells(self):
cells = []
for child in self.childNodes:
if child.tagName in ['TD','TH']:
cells.append(child)
return implementation._4dom_createHTMLCollection(cells)
def _get_ch(self):
return self.getAttribute('CHAR')
def _set_ch(self, ch):
self.setAttribute('CHAR', ch)
def _get_chOff(self):
return self.getAttribute('CHAROFF')
def _set_chOff(self, offset):
self.setAttribute('CHAROFF', offset)
def _get_rowIndex(self):
#Get our index in the table
section = self.parentNode
if section == None:
return -1
table = section.parentNode
if table == None:
return -1
rows = table._get_rows()
return rows.index(self)
def _get_sectionRowIndex(self):
section = self.parentNode
if section == None:
return -1
rows = section._get_rows()
return rows.index(self)
def _get_vAlign(self):
return string.capitalize(self.getAttribute('VALIGN'))
def _set_vAlign(self, valign):
self.setAttribute('VALIGN', valign)
### Methods ###
def insertCell(self, index):
cells = self._get_cells()
if index < 0 or index > len(cells):
raise IndexSizeErr()
cell = self.ownerDocument.createElement('TD')
length = cells.length
if index == len(cells):
ref = None
elif index < len(cells):
ref = cells[index]
return self.insertBefore(cell, ref)
def deleteCell(self,index):
cells = self._get_cells()
if index < 0 or index >= len(cells):
raise IndexSizeErr()
self.removeChild(cells[index])
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update ({
'rowIndex' : _get_rowIndex,
'sectionRowIndex' : _get_sectionRowIndex,
'cells' : _get_cells,
'align' : _get_align,
'bgColor' : _get_bgColor,
'ch' : _get_ch,
'chOff' : _get_chOff,
'vAlign' : _get_vAlign,
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update ({
'align' : _set_align,
'bgColor' : _set_bgColor,
'ch' : _set_ch,
'chOff' : _set_chOff,
'vAlign' : _set_vAlign,
})
# Create the read-only list of attributes
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
|
[
"[email protected]"
] | |
82cd114c38d8767bd5493b1054b0112eb2f33b82
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02772/s117684727.py
|
af179f388db806b32c0635d0c096c78b0d0171ea
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 253 |
py
|
N = int(input())
A = list(map(int, input().split()))
even_numbers = [a for a in A if a % 2 == 0]
is_approved = all([even_num % 3 == 0 or even_num % 5 == 0 for even_num in even_numbers])
if is_approved:
print('APPROVED')
else:
print('DENIED')
|
[
"[email protected]"
] | |
26a00630aeba6a6ae67c356e67ad7108f664c08b
|
2aec9c5e8c72b731d3abf22f2a407fe09c1cde09
|
/ZQZ510/ZQZ510/pipelines.py
|
96d17ebedbe541b8ea71011896e82ef784f24a35
|
[] |
no_license
|
jiangyg/ZWFproject
|
8b24cc34970ae0a9c2a2b0039dc527c83a5862b5
|
aa35bc59566d92721f23d2dd00b0febd268ac2dd
|
refs/heads/master
| 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null |
UTF-8
|
Python
| false | false | 542 |
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
class Zqz510Pipeline(object):
def open_spider(self, spider):
self.file = open('./zqz_data.json', 'w+', encoding='utf-8')
def process_item(self, item, spider):
self.file.write(json.dumps(dict(item), ensure_ascii=False) + '\n')
return item
def close_spider(self, spider):
self.file.close()
|
[
"[email protected]"
] | |
560d28d47aec3beddae995957b47f2a586147262
|
153995fa868b4697d8d6b25379a16f9756604151
|
/student/migrations/0003_auto_20180530_1427.py
|
23c1d56c1bf88a956e612254eb17747ba36e63f8
|
[] |
no_license
|
Manju1313/django-school
|
816c13259654c4f57352add903cc13e3915f3724
|
1182de09e9b638a2a4f328024f6bc6807eff6029
|
refs/heads/master
| 2023-03-21T22:44:59.002131 | 2020-08-15T14:34:19 | 2020-08-15T14:34:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,414 |
py
|
# Generated by Django 2.0.4 on 2018-05-30 18:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0002_auto_20180530_1421'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='stu_phone_number',
),
migrations.AddField(
model_name='student',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AlterField(
model_name='guardian',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AlterField(
model_name='instructor',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
]
|
[
"[email protected]"
] | |
3cb9f0d148c54cbbe893c3e1c798c3bb23c70ffc
|
a3cc7286d4a319cb76f3a44a593c4a18e5ddc104
|
/lib/surface/compute/instances/delete_access_config.py
|
479bf531ec0ef199dca5ae411f4dd8aff59f1cff
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
jordanistan/Google-Cloud-SDK
|
f2c6bb7abc2f33b9dfaec5de792aa1be91154099
|
42b9d7914c36a30d1e4b84ae2925df7edeca9962
|
refs/heads/master
| 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 |
NOASSERTION
| 2023-08-22T01:12:24 | 2018-03-28T02:31:19 |
Python
|
UTF-8
|
Python
| false | false | 3,102 |
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for deleting access configs from virtual machine instances."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instances import flags
class DeleteAccessConfig(base.SilentCommand):
"""Delete an access configuration from a virtual machine network interface."""
detailed_help = {
'DESCRIPTION': """\
*{command}* is used to delete access configurations from network
interfaces of Google Compute Engine virtual machines. Access
configurations allow you to assign a public, external IP to a virtual
machine.
""",
'EXAMPLES': """\
To remove the externally accessible IP from a virtual machine named
``example-instance'' in zone ``us-central1-a'', run:
$ {command} example-instance --zone us-central1-a
""",
}
@staticmethod
def Args(parser):
flags.INSTANCE_ARG.AddArgument(parser)
parser.add_argument(
'--access-config-name',
default=constants.DEFAULT_ACCESS_CONFIG_NAME,
help="""\
Specifies the name of the access configuration to delete.
``{0}'' is used as the default if this flag is not provided.
""".format(constants.DEFAULT_ACCESS_CONFIG_NAME))
parser.add_argument(
'--network-interface',
default=constants.DEFAULT_NETWORK_INTERFACE,
action=arg_parsers.StoreOnceAction,
help="""\
Specifies the name of the network interface from which to delete the
access configuration. If this is not provided, then ``nic0'' is used
as the default.
""")
def Run(self, args):
"""Invokes request necessary for removing an access config."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
instance_ref = flags.INSTANCE_ARG.ResolveAsResource(
args, holder.resources,
scope_lister=flags.GetInstanceZoneScopeLister(client))
request = client.messages.ComputeInstancesDeleteAccessConfigRequest(
accessConfig=args.access_config_name,
instance=instance_ref.Name(),
networkInterface=args.network_interface,
project=instance_ref.project,
zone=instance_ref.zone)
return client.MakeRequests([(client.apitools_client.instances,
'DeleteAccessConfig', request)])
|
[
"[email protected]"
] | |
51076cbc05dfd34c93e5ff0d33ec683f7304252f
|
6cc795fef13e82a2e50f487740f5373b5a3f8549
|
/pyunlocbox/tests/__init__.py
|
7cae2d147d6d4ccbb8129886a11191b019a147e2
|
[
"BSD-3-Clause"
] |
permissive
|
epfl-lts2/pyunlocbox
|
7a14e97f7e46981ed6748bb5073d473f45af676e
|
ec84282096fa9154d8bdcc52bacc3531c9720779
|
refs/heads/master
| 2023-08-29T22:13:29.345251 | 2022-10-18T11:18:53 | 2022-10-18T11:18:53 | 17,248,167 | 98 | 28 |
BSD-3-Clause
| 2023-08-18T02:01:44 | 2014-02-27T12:33:31 |
Python
|
UTF-8
|
Python
| false | false | 445 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test suite for the pyunlocbox package, broken by modules.
"""
import unittest
from . import test_functions
from . import test_operators
from . import test_solvers
from . import test_acceleration
from . import test_docstrings
suite = unittest.TestSuite([
test_functions.suite,
test_operators.suite,
test_solvers.suite,
test_acceleration.suite,
test_docstrings.suite,
])
|
[
"[email protected]"
] | |
2f52dc55e8244d2992f25fe087aa779b5ee88b23
|
edfa045d12b8efb65de20261ff80a86160298e44
|
/contact/views.py
|
92ba8640b73aadf1add6ef04d0e028b1dae69786
|
[
"MIT"
] |
permissive
|
yusif763/Unistore-pro
|
1d559a89bb71f3db8b5d1e89df64ed7113f00f2a
|
41ad0fa209c79a201d3f6a7aa68ec0ace707dcad
|
refs/heads/main
| 2023-04-24T02:50:30.085011 | 2021-04-29T11:00:11 | 2021-04-29T11:00:11 | 362,782,688 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,954 |
py
|
from django.shortcuts import render,redirect
from contact.models import *
from contact.forms import ContactForm
from django.views.generic import (
ListView, DetailView,CreateView
)
from django.views.generic.edit import FormMixin
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
# Create your views here.
# def about_contact(request):
# form = ContactForm()
# # sub_form = SubscriberForm()
# if request.method == 'POST':
# form = ContactForm(data=request.POST)
# if form.is_valid():
# form.save()
# return redirect('/about-contact/')
# context = {
# # "sub_form":sub_form,
# 'form': form
# }
# return render(request , 'about_contact.html' , context)
class AboutContactView(CreateView):
form_class = ContactForm
# fields = '__all__'
# model = Contact
template_name = 'about_contact.html'
success_url = reverse_lazy('common:index')
def form_valid(self, form):
result = super(ContactView, self).form_valid(form)
messages.success(self.request, 'Sizin muracietiniz qebul edildi.')
return result
# def contact_page(request):
# form = ContactForm()
# # sub_form = SubscriberForm()
# if request.method == 'POST':
# form = ContactForm(data=request.POST)
# if form.is_valid():
# form.save()
# return redirect('/contact/')
# context = {
# # "sub_form":sub_form,
# 'form': form
# }
# return render(request, "contact.html", context)
class ContactView(CreateView):
form_class = ContactForm
# fields = '__all__'
# model = Contact
template_name = 'contact.html'
success_url = reverse_lazy('common:index')
def form_valid(self, form):
result = super(ContactView, self).form_valid(form)
messages.success(self.request, 'Sizin muracietiniz qebul edildi.')
return result
|
[
"[email protected]"
] | |
8cbb0199476d4a0ff738d2012c7bde1daee5d0e7
|
496e05014492b4bbecf9f15c40ae416c21e27a46
|
/src/outpost/django/api/serializers.py
|
f3b19c6cac6763725dbcfae9ac299911d7d02ba2
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
medunigraz/outpost_deprecated
|
b1ff802054c04cf989b3b660e132fa6a1c2a078c
|
bc88eaa3bb504d394fdf13f1131e40db27759c89
|
refs/heads/master
| 2022-01-23T15:46:34.859095 | 2019-05-21T08:38:11 | 2019-05-21T08:38:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,266 |
py
|
import re
from base64 import (
b64decode,
urlsafe_b64encode,
)
from pathlib import PurePosixPath
from uuid import uuid4
import six
from django.core.files.base import ContentFile
from drf_haystack.serializers import HaystackSerializer
from rest_framework.serializers import (
FileField,
IntegerField,
SerializerMethodField,
)
from outpost.django.geo import search_indexes as geo
from outpost.django.structure import search_indexes as structure
class AutocompleteSerializer(HaystackSerializer):
id = IntegerField(source='pk')
ctype = SerializerMethodField()
class Meta:
index_classes = [
geo.RoomIndex,
structure.OrganizationIndex,
structure.PersonIndex,
]
fields = [
'presentation',
'id',
'ctype',
'level_id',
'room_id',
'autocomplete',
]
ignore_fields = [
'text',
'autocomplete',
]
field_aliases = {
'q': 'autocomplete',
}
def get_ctype(self, obj):
return obj.content_type()
class Base64FileField(FileField):
"""
A Django REST framework field for handling file-uploads through raw post
data. It uses base64 for encoding and decoding the contents of the file.
Heavily based on
https://stackoverflow.com/a/28036805
"""
parser = re.compile(r'^data:(?P<mimetype>.*?);base64,')
def to_internal_value(self, raw):
# Check if this is a base64 string
if isinstance(raw, six.string_types):
header = self.parser.match(raw)
# Check if the base64 string is in the "data:" format
if header:
try:
decoded_file = b64decode(self.parser.sub('', raw))
except TypeError:
self.fail('invalid_image')
# Generate file name:
p = PurePosixPath()
uid = uuid4().bytes
u = urlsafe_b64encode(uid).decode('ascii').rstrip('=')
filename = p.joinpath(u).as_posix()
raw = ContentFile(decoded_file, name=filename)
return super(Base64FileField, self).to_internal_value(raw)
|
[
"[email protected]"
] | |
556cd12c5bcabb294fdef6cef5e233d27d08634b
|
b5ce6908490cfb8e6a1e1cbe4745d675122ddce0
|
/questions/search-insert-position/Solution.py
|
c0090acd08a2b839bf40909c0f07c328192ae1f5
|
[
"MIT"
] |
permissive
|
franklingu/leetcode-solutions
|
8895910f13208e1d8e604100d84c2dd35684cde4
|
7ad7e5c1c040510b7b7bd225ed4297054464dbc6
|
refs/heads/master
| 2023-01-09T01:34:08.097518 | 2023-01-02T02:05:35 | 2023-01-02T02:05:35 | 43,345,677 | 155 | 66 |
MIT
| 2020-10-02T03:41:36 | 2015-09-29T04:54:38 |
Python
|
UTF-8
|
Python
| false | false | 523 |
py
|
'''
Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
You may assume no duplicates in the array.
Example 1:
Input: [1,3,5,6], 5
Output: 2
Example 2:
Input: [1,3,5,6], 2
Output: 1
Example 3:
Input: [1,3,5,6], 7
Output: 4
Example 4:
Input: [1,3,5,6], 0
Output: 0
'''
import bisect
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
return bisect.bisect_left(nums, target)
|
[
"[email protected]"
] | |
1c8e3344ff726702de26bc95b86ffad4f8fa87df
|
cce5684e1bb9fea2df762c1afedb17b1795b7a5f
|
/pymcutil/selector/selectors/self_selector.py
|
03d75db75e9fd916c4c55f012ac4e9ca91173109
|
[
"MIT"
] |
permissive
|
Arcensoth/pymcutil
|
85071e5c3bbd25a47a1133bfa464f67126c62bdd
|
0c8f1efa4d611e92170ec48bedb160b1d00d0022
|
refs/heads/master
| 2020-03-18T05:02:06.769457 | 2018-07-02T00:45:51 | 2018-07-02T00:45:51 | 91,645,414 | 3 | 1 | null | 2017-07-12T15:56:12 | 2017-05-18T03:37:33 |
Python
|
UTF-8
|
Python
| false | false | 322 |
py
|
from pymcutil.selector.abc.selector import Selector
from pymcutil.symbols import selector_bases
from pymcutil.symbols.selector_bases.selector_bases import SelectorBase
class SelfSelector(Selector):
@property
def base(self) -> SelectorBase:
return selector_bases.self
self = SelfSelector
SELF = self()
|
[
"[email protected]"
] | |
84b921ebd67dca82253a50ee13baf4d2cb8fdb97
|
6646f6b92e9ff31f2f74b749ea12ace53cfc135c
|
/tests/unit/models/test_package_model.py
|
5d883c6352b89b74851372eb02d55c084db4b862
|
[] |
no_license
|
EricMontague/SponsorMatch
|
0a6685edb44b2694824d3d3a4d15dfcb42fdb68e
|
864aa3cfe25d74c2b97b9f09f45eb9fa10dac892
|
refs/heads/master
| 2022-12-08T22:43:21.684165 | 2021-03-19T00:50:06 | 2021-03-19T00:50:06 | 241,396,411 | 0 | 0 | null | 2022-12-08T03:38:23 | 2020-02-18T15:27:42 |
Python
|
UTF-8
|
Python
| false | false | 2,398 |
py
|
"""This module contains tests for the package model."""
import unittest
from tests.integration.testing_data import TestModelFactory
from app import create_app
from app.extensions import db
class PackageModelTestCase(unittest.TestCase):
"""Class to test the Package Model."""
def setUp(self):
"""Create application instance and insert necessary
information into the database before each test.
"""
self.app = create_app("testing", False)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
"""Pop application context, remove the db session,
and drop all tables in the database.
"""
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_package_sold_out(self):
"""Test to ensure that a package is correctly
recognized as sold out.
"""
role = TestModelFactory.create_role("Event Organizer")
user = TestModelFactory.create_user()
user.role = role
venue = TestModelFactory.create_venue()
event = TestModelFactory.create_event("Test Event", "live")
event.user = user
event.venue = venue
package = TestModelFactory.create_package(price=100, available_packages=10)
package.event = event
db.session.add_all([user, event, package])
db.session.commit()
package.num_purchased = package.available_packages
self.assertTrue(package.is_sold_out())
def test_package_num_sales(self):
"""Test to ensure that the number of packages purchased
is recorded correctly in the database.
"""
role = TestModelFactory.create_role("Event Organizer")
user = TestModelFactory.create_user()
user.role = role
venue = TestModelFactory.create_venue()
event = TestModelFactory.create_event("Test Event", "live")
event.user = user
event.venue = venue
package = TestModelFactory.create_package(price=100, available_packages=10)
package.event = event
db.session.add_all([user, event, package])
db.session.commit()
self.assertEqual(package.num_for_sale(), 10)
package.num_purchased += 1
self.assertEqual(package.num_for_sale(), 9)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
73a23e510d0db12d3463c18a0f24bc61535d211a
|
9d1b1d52f99b86bec0e74878c0535057115dc667
|
/pes/views.py
|
2c1c8ed73da304fb4070741309f11b3496348234
|
[] |
no_license
|
antocuni/pesranking
|
1f9b2bb8f03ba15f5f5d36ff6e70e0de8edc5002
|
574ecf8b5e49979adf709239a4df78de83acd039
|
refs/heads/master
| 2022-11-22T03:21:40.837305 | 2011-12-01T19:31:03 | 2011-12-01T19:31:03 | 275,815,179 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 194 |
py
|
from django.http import HttpResponseRedirect
from pes import models
def updateranking(request):
models.Match.updateranking()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
|
[
"[email protected]"
] | |
9dc4b494a28257793973cafba0d97492a5e21a0a
|
8dbb2a3e2286c97b1baa3ee54210189f8470eb4d
|
/kubernetes-stubs/client/models/v1beta1_volume_error.pyi
|
9f645d23474efed86f712f543da51fd09fa5d231
|
[] |
no_license
|
foodpairing/kubernetes-stubs
|
e4b0f687254316e6f2954bacaa69ff898a88bde4
|
f510dc3d350ec998787f543a280dd619449b5445
|
refs/heads/master
| 2023-08-21T21:00:54.485923 | 2021-08-25T03:53:07 | 2021-08-25T04:45:17 | 414,555,568 | 0 | 0 | null | 2021-10-07T10:26:08 | 2021-10-07T10:26:08 | null |
UTF-8
|
Python
| false | false | 518 |
pyi
|
import datetime
import typing
import kubernetes.client
class V1beta1VolumeError:
message: typing.Optional[str]
time: typing.Optional[datetime.datetime]
def __init__(
self,
*,
message: typing.Optional[str] = ...,
time: typing.Optional[datetime.datetime] = ...
) -> None: ...
def to_dict(self) -> V1beta1VolumeErrorDict: ...
class V1beta1VolumeErrorDict(typing.TypedDict, total=False):
message: typing.Optional[str]
time: typing.Optional[datetime.datetime]
|
[
"[email protected]"
] | |
688f5a27c17943c555fe537f43e8a91de0397e93
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/scattercarpet/_uid.py
|
e693f7c7608fa0de61de5dbd33659c52dd174a3f
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 |
MIT
| 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null |
UTF-8
|
Python
| false | false | 390 |
py
|
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="scattercarpet", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
[
"[email protected]"
] | |
073c78c464eb8c22be7697340798bdfb19009e7c
|
8adec48dfaee1cdfd6c7f4d2fb3038aa1c17bda6
|
/WProf/build/masters/master.chromium.chromiumos/master_gatekeeper_cfg.py
|
68b8a28fa503e0db0192a1a7d126068772a3feef
|
[] |
no_license
|
kusoof/wprof
|
ef507cfa92b3fd0f664d0eefef7fc7d6cd69481e
|
8511e9d4339d3d6fad5e14ad7fff73dfbd96beb8
|
refs/heads/master
| 2021-01-11T00:52:51.152225 | 2016-12-10T23:51:14 | 2016-12-10T23:51:14 | 70,486,057 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,526 |
py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master import gatekeeper
from master import master_utils
# This is the list of the builder categories and the corresponding critical
# steps. If one critical step fails, gatekeeper will close the tree
# automatically.
# Note: don't include 'update scripts' since we can't do much about it when
# it's failing and the tree is still technically fine.
chromium_categories_steps = {
'': ['update'],
'tester': [
'base_unittests',
#'browser_tests',
'cacheinvalidation_unittests',
'content_unittests',
'courgette_unittests',
'crypto_unittests',
'dbus_unittests',
'googleurl_unittests',
'installer_util_unittests',
#'interactive_ui_tests',
'ipc_tests',
'jingle_unittests',
'media_unittests',
'mini_installer_test',
'nacl_integration',
'net_unittests',
'printing_unittests',
'remoting_unittests',
'sbox_integration_tests',
'sbox_unittests',
'sbox_validation_tests',
'sizes',
'sql_unittests',
'start_crash_handler',
'sync_unittests',
'test_shell_tests',
'ui_unittests',
'unit_tests',
'views_unittests',
#'webkit_tests',
],
'compile': ['check_deps', 'compile', 'archive_build'],
'closer': ['BuildTarget'],
}
exclusions = {
}
forgiving_steps = ['update_scripts', 'update', 'svnkill', 'taskkill',
'archive_build', 'start_crash_handler']
close_chromiumos_categories_steps = {
'closer': [
'LKGMSync',
'BuildBoard',
'UnitTest',
],
}
warn_chromiumos_categories_steps = {
'watch': [
'UploadPrebuilts',
'Archive',
'VMTest',
],
}
warn_aura_chromiumos_categories_steps = {
'aurawatch': [
'Archive',
'BuildTarget',
'BuildBoard',
'UnitTest',
]
}
subject = ('buildbot %(result)s in %(projectName)s on %(builder)s, '
'revision %(revision)s')
warning_header = ('Please look at failure in "%(steps)s" on "%(builder)s" '
'and help out if you can')
def Update(config, active_master, alternate_master, c):
# chrome likely/possible failures to the chrome sheriffs, closing the
# chrome tree
c['status'].append(gatekeeper.GateKeeper(
fromaddr=active_master.from_address,
categories_steps=chromium_categories_steps,
exclusions=exclusions,
relayhost=config.Master.smtp,
subject=subject,
extraRecipients=active_master.tree_closing_notification_recipients,
lookup=master_utils.FilterDomain(),
forgiving_steps=forgiving_steps,
tree_status_url=active_master.tree_status_url,
sheriffs=['sheriff'],
use_getname=True))
# chromium os failures close the chromeOS tree
c['status'].append(gatekeeper.GateKeeper(
fromaddr=active_master.from_address,
categories_steps=close_chromiumos_categories_steps,
exclusions=exclusions,
relayhost=config.Master.smtp,
subject='Closer ' + subject,
extraRecipients=alternate_master.tree_closing_notification_recipients,
lookup=master_utils.FilterDomain(),
forgiving_steps=forgiving_steps,
tree_status_url=alternate_master.tree_status_url,
sheriffs=['sheriff_cros_mtv', 'sheriff_cros_nonmtv'],
use_getname=True))
# chromium os buried failures/flakiness to chrome OS folk
c['status'].append(gatekeeper.GateKeeper(
fromaddr=active_master.from_address,
categories_steps=warn_chromiumos_categories_steps,
exclusions=exclusions,
relayhost=config.Master.smtp,
subject='Warning ' + subject,
status_header=warning_header,
extraRecipients=[],
lookup=master_utils.FilterDomain(),
forgiving_steps=forgiving_steps,
tree_status_url=None,
sheriffs=['sheriff_cros_mtv', 'sheriff_cros_nonmtv'],
use_getname=True))
# while the Aura folk are in panic fast mode, let them know to help on
# failures that may be related to their special configs.
c['status'].append(gatekeeper.GateKeeper(
fromaddr=active_master.from_address,
categories_steps=warn_aura_chromiumos_categories_steps,
exclusions=exclusions,
relayhost=config.Master.smtp,
subject='Warning ' + subject,
status_header=warning_header,
extraRecipients=[],
lookup=master_utils.FilterDomain(),
forgiving_steps=forgiving_steps,
tree_status_url=None,
sheriffs=['sheriff_aura'],
use_getname=True))
|
[
"kusoof@kookaburra.(none)"
] |
kusoof@kookaburra.(none)
|
f0eefe22562432df713f9a164d1362e2892d2ea0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03495/s527457667.py
|
af89959a94f603b8e66e9c604d4ff5d4f266dce7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 316 |
py
|
n, k = map(int, input().split())
a = list(map(int, input().split()))
ball = {}
for i in a:
if i in ball:
ball[i] += 1
else:
ball[i] = 1
ball = sorted(ball.items(), key=lambda x: -x[1])
ans = 0
if len(ball) > k:
for i in range(k):
ans += ball[i][1]
ans = n - ans
print(ans)
|
[
"[email protected]"
] | |
2a46cba90659a56d1af070ee76242a046edd72a9
|
ff12b271c7538f0621b88e567b315d5bb44166af
|
/ambari_monitor/hbase_monitor/hbase_monitor_v2/conn_db.py
|
d483eee20dceb6a566d0b5d5b49a331740dd2f1d
|
[] |
no_license
|
witnesslq/big_data_operation
|
23ca6afd2f69fbe2b4f9debea4bd2f49f6d4a1c8
|
829422bfd3c52fbd99e0b54e3da7b9ac7ec4f3cd
|
refs/heads/main
| 2023-06-06T22:17:15.572951 | 2021-07-13T14:34:18 | 2021-07-13T14:34:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,630 |
py
|
#!/usr/bin/env python
# -*-coding:utf-8 -*-
# ***************************************************************************
# 文件名称:conn_db.py
# 功能描述:迁移Hive表
# 输 入 表:
# 输 出 表:
# 创 建 者:hyn
# 创建日期:20200808
# 修改日志:
# 修改日期:
# ***************************************************************************
# 程序调用格式:python conn_db.py
# ***************************************************************************
import os
import sys
from datetime import datetime
import datetime as date_time
import pymysql
mysql_sh = "mysql -h 172.19.168.22 -P 3308 -u zhao -pzhao zhao -e ' "
# 连接
def conn_db():
conn = pymysql.connect(host="192.168.195.233", port=20031, user="csapdmcfg", passwd="iEXIMt3w!TFL9vkO", db="csapdmcfg", charset="utf8")
return conn
# 查询数据
def select(sql):
conn = conn_db()
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
conn.close()
# print result
return result
# 插入及更新数据
def insert(sql):
conn = conn_db()
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
cursor.close()
conn.close()
# print type(result)
# print result
return result
# 批量插入及更新数据
def insert_batch(sql_list):
conn = conn_db()
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
# result = cursor.fetchall()
# print type(result)
# print result
return
|
[
"[email protected]"
] | |
9fbddab470ce95d6a31bb446fcd8a7ee812aa1d0
|
5399dd4580ea3f528753bc8b52a981743d62f8bb
|
/keras/keras26_LSTM_hamsu.py
|
10977c12c7ffbf0c14ef67ef7f6d8b6f2e3211d9
|
[] |
no_license
|
iwillbeaprogramer/Study
|
3ac7c118ffe3981d78b4ad263cb62432eae13970
|
3bfe571da5bbfc545b994e5878e217f9306bde14
|
refs/heads/main
| 2023-05-07T16:31:05.564973 | 2021-05-27T14:50:00 | 2021-05-27T14:50:00 | 324,044,441 | 8 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,598 |
py
|
# keras 23 _LSTM3_scale을 함수형으로 코딩
import numpy as np
x = np.array([[1,2,3],[2,3,4],[3,4,5],[4,5,6],[5,6,7],[6,7,8],[7,8,9],[8,9,10],[9,10,11],[10,11,12],[20,30,40],[30,40,50],[40,50,60]])
y = np.array([4,5,6,7,8,9,10,11,12,13,50,60,70])
x_pred = np.array([50,60,70]).reshape(1,3,1)
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.layers import Dense,LSTM,Input
from sklearn.metrics import r2_score
x = x.reshape(13,3,1)
inputs = Input(shape = (3,1))
lstm = LSTM(1024,activation='relu')(inputs)
dense1 = Dense(512,activation='relu')(lstm)
dense2 = Dense(256,activation='relu')(dense1)
dense3 = Dense(128,activation='relu')(dense2)
dense4 = Dense(64,activation='relu')(dense3)
dense5 = Dense(32,activation='relu')(dense4)
dense6 = Dense(8,activation='relu')(dense5)
dense7 = Dense(4,activation='relu')(dense6)
outputs = Dense(1)(dense7)
model = Model(inputs,outputs)
# model = Sequential()
# model.add(LSTM(1024,input_shape=(3,1),activation='relu'))
# model.add(Dense(512,activation='relu'))
# model.add(Dense(256,activation='relu'))
# model.add(Dense(128,activation='relu'))
# model.add(Dense(64,activation='relu'))
# model.add(Dense(32,activation='relu'))
# model.add(Dense(16,activation='relu'))
# model.add(Dense(8,activation='relu'))
# model.add(Dense(4,activation='relu'))
# model.add(Dense(1))
model.compile(loss='mse',optimizer='adam')
model.fit(x,y,epochs=500,batch_size=1)
loss = model.evaluate(x,y,batch_size=1)
y_pred = model.predict(x_pred)
print(y_pred)
print('loss : ',loss)
'''
[[81.13962]]
[[80.14889]]
loss : 0.05985087901353836
'''
|
[
"[email protected]"
] | |
1e4ec69660f5980e00461dbe5783a03c23174204
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4131/codes/1758_1580.py
|
c80833ecede95654db5b447bb5eb5803ca08197f
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 423 |
py
|
from numpy import*
v=array(eval(input("notas:")))*1.0
n=array(eval(input("alunos:")))
i=0
faltas=0
aprovados=0
reprovados=0
soma=0
while(i<size(v)):
if(v[i]==-1.0):
faltas+=1
if(v[i]>=6):
aprovados+=1
soma+=v[i]
if(v[i]<6.0 and v[i]!=-1.0):
reprovados+=1
soma+=v[i]
if(v[i]==max(v)):
nome = n[i]
i=i+1
print(faltas)
print(aprovados)
print(reprovados)
print(round(soma/(aprovados+reprovados),2))
print(nome)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.