blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
โ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
โ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1fd04150e2df747a09eb1f551b262a2e160f5b4
|
36c00fe2afff4818c937e312ce0c6a79f35e2a77
|
/7-kyu/reverse-words/python/solution.py
|
107e51e135f24323d7113ce7f0bcdbbd24cff6c5
|
[] |
no_license
|
p-lots/codewars
|
0a67b6ee4c91180ff78c648421b9d2d64463ddc3
|
535faeee475c6b398124d6f5002b0e111406e8bb
|
refs/heads/master
| 2023-08-23T22:14:33.635011 | 2023-08-23T13:30:37 | 2023-08-23T13:30:37 | 195,320,309 | 0 | 0 | null | 2023-05-09T19:25:50 | 2019-07-05T01:40:15 |
Python
|
UTF-8
|
Python
| false | false | 85 |
py
|
def reverse_words(text):
return ' '.join(word[::-1] for word in text.split(' '))
|
[
"[email protected]"
] | |
1483a7fc4dd3bd86a8bf52283e9408cab2f23767
|
5145773cf66978c2ea449fe5b3b125d1b363361b
|
/4-HeaderExchange/producer.py
|
7942e1f6678ad779b55ca7de4223a741449cecc7
|
[] |
no_license
|
MoeinGhbh/RabbitMQ-Type
|
62a0e297e914e88de56cdee2c893b36659e069c9
|
011e2368b2250393295ea9f0b42f397656b626a3
|
refs/heads/master
| 2022-07-18T07:03:09.385269 | 2020-05-20T17:36:11 | 2020-05-20T17:36:11 | 265,569,624 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,330 |
py
|
#!/home/ubd/anaconda3/envs/test_env/bin/python
import json
import pika
import sys
if len(sys.argv) < 4:
print("Call syntax: python SCRIPT_NAME PRODUCER_NAME MESSAGE_STRING HEADERS_VALUES")
exit()
producerName = sys.argv[1]
messageText = sys.argv[2]
headersValues = sys.argv[3:]
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='exchange_headers', exchange_type='headers')
headers = {}
number = 1
for headerValue in headersValues:
headers["K" + str(number)] = headerValue
number = number + 1
data = {
"producerName": producerName,
"headersValues": headersValues,
"headers": headers,
"messageText": messageText
}
message = json.dumps(data)
channel.basic_publish(
exchange='exchange_headers',
routing_key='',
body=message,
properties=pika.BasicProperties(
delivery_mode=2,
headers=headers
))
print("Producer name: %s, headers: %s, message text:%s" % (producerName, headers, message))
connection.close()
# python producer.py P1 'Message 1 from producer P1' A Producer name: P1, headers: {'K1': 'A'}, message text:{"producerName": "P1", "headersValues": ["A"], "headers": {"K1": "A"}, "messageText": "Message 1 from producer P1"}
|
[
"="
] |
=
|
83cc96a40f65ef2ec4447aa30734347ee89efba7
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/classification/Gluon_ResNet50_v1b_for_PyTorch/timm/utils/clip_grad.py
|
08d87f99e8c2c67f5a29b445f1e53bfd8c5a212c
|
[
"Apache-2.0",
"MIT",
"CC-BY-NC-4.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 1,415 |
py
|
# Copyright 2019 Ross Wightman
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from timm.utils.agc import adaptive_clip_grad
def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0):
""" Dispatch to gradient clipping method
Args:
parameters (Iterable): model parameters to clip
value (float): clipping value/factor/norm, mode dependant
mode (str): clipping mode, one of 'norm', 'value', 'agc'
norm_type (float): p-norm, default 2.0
"""
if mode == 'norm':
torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type)
elif mode == 'value':
torch.nn.utils.clip_grad_value_(parameters, value)
elif mode == 'agc':
adaptive_clip_grad(parameters, value, norm_type=norm_type)
else:
assert False, f"Unknown clip mode ({mode})."
|
[
"[email protected]"
] | |
8c845b5cd02687c170cbbbc4d79dc39d69c912cb
|
d210853ba6d1f3b5383a09e1b553c19083d78014
|
/geo/backend/delivery/migrations/0008_deliverypickpoint.py
|
904ed647eda3b40aff2a045c20036106af2d58a4
|
[] |
no_license
|
Hagen013/presidentwatches
|
f252c7995e39f6cffb6608e43f555abc32f6a9fc
|
b9ca72aef1db01262675274c83a5c5dff4d6e2da
|
refs/heads/master
| 2022-12-17T08:45:15.541869 | 2019-12-29T17:48:56 | 2019-12-29T17:48:56 | 162,160,435 | 0 | 0 | null | 2022-12-08T01:49:45 | 2018-12-17T16:36:05 |
HTML
|
UTF-8
|
Python
| false | false | 2,139 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-12-25 16:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('kladr', '0001_initial'),
('delivery', '0007_deliverysdekpoint'),
]
operations = [
migrations.CreateModel(
name='DeliveryPickPoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, verbose_name='ะะผั')),
('code', models.CharField(max_length=512, unique=True)),
('latitude', models.CharField(max_length=512)),
('longitude', models.CharField(max_length=512)),
('address', models.TextField()),
('description', models.TextField()),
('is_payment_by_card', models.BooleanField(default=True)),
('is_payment_by_cash', models.BooleanField(default=True)),
('pvz_type', models.PositiveIntegerField(choices=[(1, 'ะะะข'), (2, 'ะะะ')], default=1)),
('max_box_size', models.CharField(blank=True, max_length=512)),
('zone', models.IntegerField(choices=[(-1, 'ะะตัะะพะฝั'), (0, '0'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7')])),
('coefficient', models.FloatField(default=1.0)),
('tariff_type', models.PositiveIntegerField(choices=[(1, 'STANDART'), (2, 'PRIORITY')])),
('time_min', models.PositiveSmallIntegerField(default=None, null=True)),
('time_max', models.PositiveSmallIntegerField(default=None, null=True)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='delivery.PickPointCityList')),
('kladr', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kladr.Kladr')),
],
options={
'abstract': False,
},
),
]
|
[
"="
] |
=
|
18c616bb07a09bb2bc5c408dd2006960ffaad576
|
63dc2b63afa8848842470c52f10b9066445f239c
|
/manage.py
|
c281068744a6d18bb1595aa256bc0bb90e717125
|
[] |
no_license
|
wanderchan/hospital
|
724a5e03ef8d9d2fea8ef3da80646aefe9bdbd73
|
887a668f7d93f8271908facb37b4c6fef67e9700
|
refs/heads/master
| 2020-03-18T23:39:11.071002 | 2018-05-30T09:09:24 | 2018-05-30T09:09:24 | 129,866,541 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "biyesheji.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
ea3c672c2a79572cd0dd8b3564ad661cfdee586f
|
dd80a584130ef1a0333429ba76c1cee0eb40df73
|
/external/chromium_org/chrome/common/extensions/docs/server2/cron_servlet.py
|
efdab71fa6c7af2f2a7368db1856c7c81899b02c
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
karunmatharu/Android-4.4-Pay-by-Data
|
466f4e169ede13c5835424c78e8c30ce58f885c1
|
fcb778e92d4aad525ef7a995660580f948d40bc9
|
refs/heads/master
| 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 |
MIT
| 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null |
UTF-8
|
Python
| false | false | 11,814 |
py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import (
GetAppVersion, IsDeadlineExceededError, logservice)
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Gettable, Future
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def error(self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush()
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_cronlog.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cronlog.info('starting')
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix=''):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|.
'''
files = [name for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(delegate=Gettable(resolve))
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Rendering the public templates will also pull in all of the private
# templates.
results.append(request_files_in_dir(PUBLIC_TEMPLATES))
# Rendering the public templates will have pulled in the .js and
# manifest.json files (for listing examples on the API reference pages),
# but there are still images, CSS, etc.
results.append(request_files_in_dir(STATIC_DOCS, prefix='static'))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
if not IsDevServer():
# Fetch each individual sample file.
results.append(request_files_in_dir(EXAMPLES,
prefix='extensions/examples'))
# Fetch the zip file of each example (contains all the individual
# files).
example_zips = []
for root, _, files in trunk_fs.Walk(EXAMPLES):
example_zips.extend(
root + '.zip' for name in files if name == 'manifest.json')
results.append(_RequestEachItem(
'example zips',
example_zips,
lambda path: render('extensions/examples/' + path)))
# Resolve the hand-written Cron method futures.
title = 'resolving %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
for future in cron_futures:
future.Get()
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
results.append(False)
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_cronlog.error('uncaught error: %s' % traceback.format_exc())
raise
finally:
success = all(results)
_cronlog.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent revision, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentRevision())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_cronlog.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentRevision(self):
'''Gets the revision of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete revision so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetTrunk())
return head_fs.Stat('').version
def _CreateServerInstance(self, revision):
'''Creates a ServerInstance pinned to |revision|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, max_trunk_revision=revision)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider)
|
[
"[email protected]"
] | |
4456273aa82c8258872064b3a763b75cc4e52ad6
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2494/60617/259151.py
|
a17472a016cee01b922df70d039e6105a2e794df
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 910 |
py
|
def reversePair():
arr=eval(input())
print(merge(arr, 0, len(arr)-1))
def merge(arr, l , r):
count=0
if l>=r:
return count
mid=(l+r)//2
count+=merge(arr, l, mid)
count+=merge(arr, mid+1, r)
j=mid+1
for i in range(l, mid+1):
while j<=r and arr[i]>arr[j]*2:
j+=1
count+=j-(mid+1)
sort(arr, l, r)
return count
def sort(arr, l, r):
start=l
temp=[]
if l>=r:
return
mid=(l+r)//2
j=mid+1
while start<=mid and j<=r:
if arr[start]<=arr[j]:
temp.append(arr[start])
start+=1
else:
temp.append(arr[j])
j+=1
while start<=mid:
temp.append(arr[start])
start+=1
while j<=r:
temp.append(arr[j])
j+=1
for i in range(l, r+1):
arr[i]=temp[i-l]
return
if __name__=='__main__':
reversePair()
|
[
"[email protected]"
] | |
9d45f8122dec0c08974e10ec061fc337c8e6c17d
|
901b7d6e062d1cef5b48743c54953edae39a83ff
|
/backend/UserPost/migrations/0009_userpost_privacy.py
|
b60633a0256dfe938307b03eb8f0eea0aefb0fed
|
[] |
no_license
|
KaziMotiour/U-Profile-Social-media
|
e1104e266ab22cac70c60ecd1f0fb2d33eb26961
|
81dedeb4e16e4045ac84bf261bd512e429e6186f
|
refs/heads/master
| 2023-06-09T00:28:24.390944 | 2021-06-29T17:00:13 | 2021-06-29T17:00:13 | 324,720,540 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 505 |
py
|
# Generated by Django 3.1.5 on 2021-03-08 14:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserPost', '0008_auto_20210119_0307'),
]
operations = [
migrations.AddField(
model_name='userpost',
name='privacy',
field=models.CharField(blank=True, choices=[('public', 'public'), ('freind', 'freind'), ('onlyme', 'onlyme')], default='freind', max_length=20, null=True),
),
]
|
[
"[email protected]"
] | |
1c1cbb6fb019eda07db42749db4ea05edbf16e23
|
af3ec207381de315f4cb6dddba727d16d42d6c57
|
/dialogue-engine/test/programytest/storage/stores/sql/dao/test_map.py
|
d6ab20b3339a3459a181367867d45e52a42215eb
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mcf-yuichi/cotoba-agent-oss
|
02a5554fe81ce21517f33229101013b6487f5404
|
ce60833915f484c4cbdc54b4b8222d64be4b6c0d
|
refs/heads/master
| 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,597 |
py
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.storage.stores.sql.dao.map import Map
class MapTests(unittest.TestCase):
def test_init(self):
lookup1 = Map(name='map', key="key", value="value")
self.assertIsNotNone(lookup1)
self.assertEqual("<Map(id='n/a', name='map', key='key', value='value')>", str(lookup1))
lookup2 = Map(id=1, name='map', key="key", value="value")
self.assertIsNotNone(lookup2)
self.assertEqual("<Map(id='1', name='map', key='key', value='value')>", str(lookup2))
|
[
"[email protected]"
] | |
0cad79d129e122280773519919929db546c04b35
|
d6a43cbb975c0d5dd2465d6f09c43767d35c121a
|
/tests/data/full.atom.py
|
9296e0f4f1015c1a3f5fe7650a6966818e2ae740
|
[
"BSD-3-Clause"
] |
permissive
|
lemon24/reader
|
a76f5fd3f8dbf9d86e3627bbf9a60732414721cb
|
5e1682c9bfa36d341c03ab804adfb95cfc53f26e
|
refs/heads/master
| 2023-08-17T00:38:03.405077 | 2023-08-16T21:11:25 | 2023-08-16T21:11:25 | 115,272,183 | 349 | 24 |
BSD-3-Clause
| 2022-06-20T19:37:32 | 2017-12-24T15:36:43 |
Python
|
UTF-8
|
Python
| false | false | 2,327 |
py
|
import datetime
from reader import Content
from reader import Enclosure
from reader._types import EntryData
from reader._types import FeedData
feed = FeedData(
url=f'{url_base}full.atom',
updated=datetime.datetime(2003, 12, 13, 18, 30, 2),
title='Example Feed',
link='http://example.org/',
author='John Doe',
subtitle='all your examples are belong to us',
version='atom10',
)
entries = [
EntryData(
feed_url=feed.url,
id='urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a',
updated=datetime.datetime(2003, 12, 13, 18, 30, 2),
title='Atom-Powered Robots Run Amok',
link='http://example.org/2003/12/13/atom03',
author='John Doe',
published=datetime.datetime(2003, 12, 13, 17, 17, 51),
summary='Some text.',
content=(
# the text/plain type comes from feedparser
Content(value='content', type='text/plain'),
Content(value='content with type', type='text/whatever'),
Content(value='content with lang', type='text/plain', language='en'),
),
enclosures=(
# the text/html type comes from feedparser
Enclosure(href='http://example.org/enclosure', type='text/html'),
Enclosure(
href='http://example.org/enclosure-with-type', type='text/whatever'
),
Enclosure(
href='http://example.org/enclosure-with-length',
type='text/html',
length=1000,
),
Enclosure(
href='http://example.org/enclosure-with-bad-length', type='text/html'
),
)
+ (
# feedparser resolves empty href to the base,
# but only for Atom, and only if the base has a scheme(?);
# document this (somewhat pointless) behavior
(Enclosure(href=feed.url, type='text/html'),)
if feed.url.startswith('http')
else ()
),
),
EntryData(
feed_url=feed.url,
id='urn:uuid:00000000-cfb8-4ebb-aaaa-00000000000',
updated=datetime.datetime(2003, 12, 13, 0, 0, 0),
title='Atom-Powered Robots Run Amok Again',
# link comes from feedparser
link='urn:uuid:00000000-cfb8-4ebb-aaaa-00000000000',
),
]
|
[
"[email protected]"
] | |
a8b98c7f070d0e848c24f545f4bbe15d3ad0aeea
|
892dd32ee0be7135cd33c875b06dcc66307dcc99
|
/automation/MPTS/sample/TSMOnly.py
|
e8f67c541b758ced10e2ead3362d47a562c65066
|
[] |
no_license
|
cloudbytestorage/devops
|
6d21ed0afd752bdde8cefa448d4433b435493ffa
|
b18193b08ba3d6538277ba48253c29d6a96b0b4a
|
refs/heads/master
| 2020-05-29T08:48:34.489204 | 2018-01-03T09:28:53 | 2018-01-03T09:28:53 | 68,889,307 | 4 | 8 | null | 2017-11-30T08:11:39 | 2016-09-22T05:53:44 |
Python
|
UTF-8
|
Python
| false | false | 3,749 |
py
|
import json
import requests
import time
#NoofAccounts=_MyValue_
#NoofTSMs=_MyValue_
#NoofNFSVolumes=_MyValue_
#NoofISCSIVolumes=_MyValue_
#### Function(s) Declaration Begins
def sendrequest(url, querystring):
#print url+querystring
response = requests.get(
stdurl+querystring, verify=False
)
return(response);
def filesave(loglocation,permission,content):
f=open(loglocation,permission)
f.write(content.text)
f.close()
return;
def queryAsyncJobResult(jobid):
querycommand = 'command=queryAsyncJobResult&jobId=%s' %(jobid)
check_createTSM = sendrequest(stdurl, querycommand)
data = json.loads(check_createTSM.text)
status = data["queryasyncjobresultresponse"]["jobstatus"]
filesave("logs/queryAsyncJobResult.txt","w",check_createTSM)
if status == 0 :
print "Processing ..."
time.sleep(2);
queryAsyncJobResult(jobid);
else :
#print "status : "
return ;
#### Function(s) Declartion Ends
config = {}
with open('config.txt') as cfg:
config = json.load(cfg)
stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
#querycommand = 'command=%s' %(config['command'])
######## To Make A TSM Begins here
print "TSM Creation Begins"
for x in range(1, int(config['Number_of_TSMs'])+1):
#for x in range (1, NoofTSMs+1):
###Stage 1 to 8 ... Prior to that 2 commands are for listing.
#querycommand = 'command=createAccount&name=%s&description=%s' %(config['tsmName%d' %(x)], config['tsmDescription%d' %(x)])
querycommand = 'command=listHAPool'
resp_listHAPool = sendrequest(stdurl,querycommand)
filesave("logs/CurrentHAPoolList.txt","w",resp_listHAPool)
data = json.loads(resp_listHAPool.text)
hapools = data["listHAPoolResponse"]["hapool"]
for hapool in hapools:
if hapool['name'] == "%s" %(config['tsmPoolName%d' %(x)]):
pool_id = hapool['id']
break
#print "Poolid =" ,pool_id
querycommand = 'command=listAccount'
resp_listAccount = sendrequest(stdurl, querycommand)
filesave("logs/CurrentAccountList.txt", "w", resp_listAccount)
data = json.loads(resp_listAccount.text)
accounts = data["listAccountResponse"]["account"]
for account in accounts:
if account['name'] == "%s" %(config['tsmAccountName%d' %(x)]):
account_id = account['id']
break
#print "Accountid =", account_id
#Stage1 Command addTSM
querycommand = 'command=createTsm&accountid=%s&poolid=%s&name=%s&ipaddress=%s&subnet=%s&router=%s&dnsname=%s&dnsserver=%s&tntinterface=%s&gracecontrol=%s&graceallowed=%s&blocksize=%s&latency=%s&iopscontrol=%s&totaliops=%s&tpcontrol=%s&totalthroughput=%s&backuptpcontrol=%s&totalbackupthroughput=%s"asize=%s' %(account_id, pool_id, config['tsmName%d' %(x)], config['tsmIPAddress%d' %(x)], config['tsmSubnet%d' %(x)], config['tsmRouter%d' %(x)], config['tsmDNSName%d' %(x)], config['tsmDNSServer%d' %(x)], config['tsmTntInterface%d' %(x)], config['tsmGraceControl%d' %(x)], config['tsmGraceAllowed%d' %(x)], config['tsmBlocksize%d' %(x)], config['tsmLatency%d' %(x)], config['tsmIopsControl%d' %(x)], config['tsmTotalIops%d' %(x)], config['tsmTpControl%d' %(x)], config['tsmTotalThroughput%d' %(x)], config['tsmBackupTpcontrol%d' %(x)], config['tsmTotalBackupThroughput%d' %(x)], config['tsmQuotasize%d' %(x)])
resp_addTsm = sendrequest(stdurl, querycommand)
filesave("logs/AddTsm.txt", "w", resp_addTsm)
data = json.loads(resp_addTsm.text)
job_id = data["addTsmResponse"]["jobid"]
queryAsyncJobResult(job_id);
print "\nTSM %d Created\n" %(x);
print "TSM Creation Done"
##### TSM Creation ends here
|
[
"[email protected]"
] | |
1e98faea29ed35a5e871fd179b7c6cfae81638ac
|
61187748235cc5fd13f85d7227ec9c35b260729b
|
/djangocms_blog/south_migrations/0016_auto__add_field_blogconfigtranslation_object_name.py
|
22c47d916326d4ee5388ac0a63699c53f272884a
|
[] |
no_license
|
cluster-master/djangocms-blog
|
e5c71446f93a1fc54c1cacdc69d2dc5dfea36868
|
52f46f797641a2d9d2498a6cdb4739c008eb5ea9
|
refs/heads/develop
| 2021-01-14T14:02:34.526321 | 2016-01-04T12:49:47 | 2016-01-04T12:49:47 | 47,971,627 | 0 | 0 | null | 2015-12-14T11:39:54 | 2015-12-14T11:39:54 | null |
UTF-8
|
Python
| false | false | 19,616 |
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from djangocms_blog.settings import get_setting
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BlogConfigTranslation.object_name'
db.add_column('djangocms_blog_blogconfig_translation', 'object_name',
self.gf('django.db.models.fields.CharField')(default=get_setting('DEFAULT_OBJECT_NAME'), max_length=234),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BlogConfigTranslation.object_name'
db.delete_column('djangocms_blog_blogconfig_translation', 'object_name')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'object_name': 'Permission', 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'blank': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.Group']", 'related_name': "'user_set'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.Permission']", 'related_name': "'user_set'", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['cms.CMSPlugin']", 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['cms.Placeholder']"}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'cmsplugin_filer_image.thumbnailoption': {
'Meta': {'object_name': 'ThumbnailOption', 'ordering': "('width', 'height')"},
'crop': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'upscale': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'object_name': 'ContentType', 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'djangocms_blog.authorentriesplugin': {
'Meta': {'object_name': 'AuthorEntriesPlugin'},
'app_config': ('aldryn_apphooks_config.fields.AppHookConfigField', [], {'null': 'True', 'to': "orm['djangocms_blog.BlogConfig']", 'blank': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'primary_key': 'True', 'to': "orm['cms.CMSPlugin']", 'unique': 'True'}),
'latest_posts': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
'djangocms_blog.blogcategory': {
'Meta': {'object_name': 'BlogCategory'},
'app_config': ('aldryn_apphooks_config.fields.AppHookConfigField', [], {'null': 'True', 'to': "orm['djangocms_blog.BlogConfig']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['djangocms_blog.BlogCategory']", 'blank': 'True'})
},
'djangocms_blog.blogcategorytranslation': {
'Meta': {'db_table': "'djangocms_blog_blogcategory_translation'", 'object_name': 'BlogCategoryTranslation', 'unique_together': "[('language_code', 'slug'), ('language_code', 'master')]"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['djangocms_blog.BlogCategory']", 'related_name': "'translations'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'blank': 'True', 'max_length': '50'})
},
'djangocms_blog.blogconfig': {
'Meta': {'object_name': 'BlogConfig'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'djangocms_blog.blogconfigtranslation': {
'Meta': {'db_table': "'djangocms_blog_blogconfig_translation'", 'object_name': 'BlogConfigTranslation', 'unique_together': "[('language_code', 'master')]"},
'app_title': ('django.db.models.fields.CharField', [], {'max_length': '234'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['djangocms_blog.BlogConfig']", 'related_name': "'translations'"}),
'object_name': ('django.db.models.fields.CharField', [], {'default': "'Post'", 'max_length': '234'})
},
'djangocms_blog.genericblogplugin': {
'Meta': {'object_name': 'GenericBlogPlugin'},
'app_config': ('aldryn_apphooks_config.fields.AppHookConfigField', [], {'null': 'True', 'to': "orm['djangocms_blog.BlogConfig']", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'primary_key': 'True', 'to': "orm['cms.CMSPlugin']", 'unique': 'True'})
},
'djangocms_blog.latestpostsplugin': {
'Meta': {'object_name': 'LatestPostsPlugin'},
'app_config': ('aldryn_apphooks_config.fields.AppHookConfigField', [], {'null': 'True', 'to': "orm['djangocms_blog.BlogConfig']", 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['djangocms_blog.BlogCategory']", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'primary_key': 'True', 'to': "orm['cms.CMSPlugin']", 'unique': 'True'}),
'latest_posts': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
'djangocms_blog.post': {
'Meta': {'object_name': 'Post', 'ordering': "('-date_published', '-date_created')"},
'app_config': ('aldryn_apphooks_config.fields.AppHookConfigField', [], {'null': 'True', 'to': "orm['djangocms_blog.BlogConfig']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['auth.User']", 'related_name': "'djangocms_blog_post_author'", 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['djangocms_blog.BlogCategory']", 'related_name': "'blog_posts'"}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['cms.Placeholder']", 'related_name': "'post_content'"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_published_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'on_delete': 'models.SET_NULL', 'null': 'True', 'to': "orm['filer.Image']", 'related_name': "'djangocms_blog_post_image'", 'blank': 'True'}),
'main_image_full': ('django.db.models.fields.related.ForeignKey', [], {'on_delete': 'models.SET_NULL', 'null': 'True', 'to': "orm['cmsplugin_filer_image.ThumbnailOption']", 'related_name': "'djangocms_blog_post_full'", 'blank': 'True'}),
'main_image_thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'on_delete': 'models.SET_NULL', 'null': 'True', 'to': "orm['cmsplugin_filer_image.ThumbnailOption']", 'related_name': "'djangocms_blog_post_thumbnail'", 'blank': 'True'}),
'publish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'blank': 'True'})
},
'djangocms_blog.posttranslation': {
'Meta': {'db_table': "'djangocms_blog_post_translation'", 'object_name': 'PostTranslation', 'unique_together': "[('language_code', 'slug'), ('language_code', 'master')]"},
'abstract': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['djangocms_blog.Post']", 'related_name': "'translations'"}),
'meta_description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meta_title': ('django.db.models.fields.CharField', [], {'default': "''", 'blank': 'True', 'max_length': '255'}),
'post_text': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "''", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'blank': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['filer.Folder']", 'related_name': "'all_files'", 'blank': 'True'}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'blank': 'True', 'max_length': '255'}),
'original_filename': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['auth.User']", 'related_name': "'owned_files'", 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['contenttypes.ContentType']", 'related_name': "'polymorphic_filer.file_set+'"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'blank': 'True', 'max_length': '40'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'object_name': 'Folder', 'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['auth.User']", 'related_name': "'filer_owned_folders'", 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['filer.Folder']", 'related_name': "'children'", 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image'},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'default_caption': ('django.db.models.fields.CharField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'primary_key': 'True', 'to': "orm['filer.File']", 'unique': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'null': 'True', 'blank': 'True', 'max_length': '64'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'", 'object_name': 'Site', 'ordering': "('domain',)"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['djangocms_blog']
|
[
"[email protected]"
] | |
8021b7fcb0d5d9321c202c7efa6aa736d098d313
|
2f9c2bb2c8d32368f90ef798c08848cec4ea2ebd
|
/tests/unit/flow/test_asyncflow.py
|
5cf46aaf6c1d714e2dbd80eeda95f8804342fc62
|
[
"Apache-2.0"
] |
permissive
|
automation555/jina
|
9e0aafd9d894bd5995f091ea0f8566a9ed0f781d
|
337526c00265190fc45235b80df10c0a75b51c09
|
refs/heads/master
| 2023-06-03T04:33:18.460871 | 2021-06-17T08:51:21 | 2021-06-17T08:51:21 | 377,765,051 | 0 | 0 |
Apache-2.0
| 2021-06-17T08:55:30 | 2021-06-17T08:50:48 |
Python
|
UTF-8
|
Python
| false | false | 5,500 |
py
|
import asyncio
import time
import numpy as np
import pytest
from jina import Document, Flow
from jina.flow.asyncio import AsyncFlow
from jina.logging.profile import TimeContext
from jina.types.document.generators import from_ndarray
from jina.types.request import Response
from tests import validate_callback
num_docs = 5
def validate(req):
assert len(req.docs) == num_docs
assert req.docs[0].blob.ndim == 1
# TODO(Deepankar): with `restful: True` few of the asyncio tests are flaky.
# Though it runs fine locally, results in - `RuntimeError - Event loop closed` in CI (Disabling for now)
def documents(start_index, end_index):
for i in range(start_index, end_index):
with Document() as doc:
doc.text = 'this is text'
doc.tags['id'] = 'id in tags'
doc.tags['inner_dict'] = {'id': 'id in inner_dict'}
with Document() as chunk:
chunk.text = 'text in chunk'
chunk.tags['id'] = 'id in chunk tags'
doc.chunks.append(chunk)
yield doc
@pytest.mark.asyncio
@pytest.mark.parametrize('restful', [False])
@pytest.mark.parametrize('flow_cls', [Flow, AsyncFlow])
async def test_run_async_flow(restful, mocker, flow_cls):
r_val = mocker.Mock()
with flow_cls(restful=restful, asyncio=True).add() as f:
async for r in f.index(
from_ndarray(np.random.random([num_docs, 4])), on_done=r_val
):
assert isinstance(r, Response)
validate_callback(r_val, validate)
async def async_input_function():
for _ in range(num_docs):
yield Document(content=np.random.random([4]))
await asyncio.sleep(0.1)
async def async_input_function2():
for _ in range(num_docs):
yield Document(content=np.random.random([4]))
await asyncio.sleep(0.1)
@pytest.mark.asyncio
@pytest.mark.parametrize('restful', [False])
@pytest.mark.parametrize(
'inputs',
[
async_input_function,
async_input_function(),
async_input_function2(),
async_input_function2,
],
)
async def test_run_async_flow_async_input(restful, inputs, mocker):
r_val = mocker.Mock()
with AsyncFlow(asyncio=True).add() as f:
async for r in f.index(inputs, on_done=r_val):
assert isinstance(r, Response)
validate_callback(r_val, validate)
async def run_async_flow_5s(restful):
# WaitDriver pause 5s makes total roundtrip ~5s
from jina import Executor, requests
class Wait5s(Executor):
@requests
def foo(self, **kwargs):
print('im called!')
time.sleep(5)
with Flow(restful=restful, asyncio=True).add(uses=Wait5s) as f:
async for r in f.index(
from_ndarray(np.random.random([num_docs, 4])),
on_done=validate,
):
assert isinstance(r, Response)
async def sleep_print():
# total roundtrip takes ~5s
print('heavylifting other io-bound jobs, e.g. download, upload, file io')
await asyncio.sleep(5)
print('heavylifting done after 5s')
async def concurrent_main(restful):
# about 5s; but some dispatch cost, can't be just 5s, usually at <7s
await asyncio.gather(run_async_flow_5s(restful), sleep_print())
async def sequential_main(restful):
# about 10s; with some dispatch cost , usually at <12s
await run_async_flow_5s(restful)
await sleep_print()
@pytest.mark.asyncio
@pytest.mark.parametrize('restful', [False])
async def test_run_async_flow_other_task_sequential(restful):
with TimeContext('sequential await') as t:
await sequential_main(restful)
assert t.duration >= 10
@pytest.mark.asyncio
@pytest.mark.parametrize('restful', [False])
async def test_run_async_flow_other_task_concurrent(restful):
with TimeContext('concurrent await') as t:
await concurrent_main(restful)
# some dispatch cost, can't be just 5s, usually at 7~8s, but must <10s
assert t.duration < 10
@pytest.mark.asyncio
@pytest.mark.parametrize('return_results', [False])
@pytest.mark.parametrize('restful', [False])
@pytest.mark.parametrize('flow_cls', [Flow, AsyncFlow])
async def test_return_results_async_flow(return_results, restful, flow_cls):
with flow_cls(
restful=restful, asyncio=True, return_results=return_results
).add() as f:
async for r in f.index(from_ndarray(np.random.random([10, 2]))):
assert isinstance(r, Response)
@pytest.mark.asyncio
@pytest.mark.parametrize('return_results', [False, True])
@pytest.mark.parametrize('restful', [False])
@pytest.mark.parametrize('flow_api', ['delete', 'index', 'update', 'search'])
@pytest.mark.parametrize('flow_cls', [Flow, AsyncFlow])
async def test_return_results_async_flow_crud(
return_results, restful, flow_api, flow_cls
):
with flow_cls(
restful=restful, asyncio=True, return_results=return_results
).add() as f:
async for r in getattr(f, flow_api)(documents(0, 10)):
assert isinstance(r, Response)
@pytest.mark.asyncio
@pytest.mark.parametrize('flow_cls', [Flow, AsyncFlow])
async def test_async_flow_empty_data(flow_cls):
from jina import Executor, requests
class MyExec(Executor):
@requests
def foo(self, parameters, **kwargs):
assert parameters['hello'] == 'world'
with flow_cls(asyncio=True).add(uses=MyExec) as f:
async for r in f.post('/hello', parameters={'hello': 'world'}):
assert isinstance(r, Response)
|
[
"[email protected]"
] | |
cd9ba9cd628293fc4ec5050ca228e80392fb1503
|
e2992452a3c52f4cbbc64e1686128ad464b71d16
|
/libMe/util/EncodeUtil.py
|
f03ef111ddbb46dd976e0044504b8005c05cc0da
|
[] |
no_license
|
MaGuiSen/studyScrapy
|
6b84605a15027ffc24501d690666f419ebb379fd
|
03604bafe19e55db12677a4af388c8a9198ca572
|
refs/heads/master
| 2021-01-17T04:30:23.858217 | 2017-08-30T01:50:08 | 2017-08-30T01:50:08 | 95,433,695 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,279 |
py
|
# -*- coding: utf-8 -*-
def getCoding(strInput):
"""
่ทๅ็ผ็ ๆ ผๅผ
"""
if isinstance(strInput, unicode):
return "unicode"
try:
strInput.decode("utf8")
return 'utf8'
except:
pass
try:
strInput.decode("gbk")
return 'gbk'
except:
pass
def toUnicode(strInput):
"""
ๅพๅฐunicode
:return:
"""
strCodingFmt = getCoding(strInput)
if strCodingFmt == "utf8":
return strInput.decode('utf8')
elif strCodingFmt == "unicode":
return strInput
elif strCodingFmt == "gbk":
return strInput.decode("gbk")
def tran2UTF8(strInput):
"""
่ฝฌๅไธบutf8ๆ ผๅผ
"""
strCodingFmt = getCoding(strInput)
if strCodingFmt == "utf8":
return strInput
elif strCodingFmt == "unicode":
return strInput.encode("utf8")
elif strCodingFmt == "gbk":
return strInput.decode("gbk").encode("utf8")
def tran2GBK(strInput):
"""
่ฝฌๅไธบgbkๆ ผๅผ
"""
strCodingFmt = getCoding(strInput)
if strCodingFmt == "gbk":
return strInput
elif strCodingFmt == "unicode":
return strInput.encode("gbk")
elif strCodingFmt == "utf8":
return strInput.decode("utf8").encode("gbk")
|
[
"[email protected]"
] | |
a47b02fdaab7172d06df7b342db19356ab519101
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/4_set/ๆปๅจๆฟๆขๆดๆฐ/Distinct Coin Sums.py
|
a98649e5d5c1c416fb9cc5913814f3c670f65be5
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null |
UTF-8
|
Python
| false | false | 919 |
py
|
class Solution:
def solve(self, coins, quantities):
"""Return the number of distinct coin sum values you can get by using non-empty group of these coins."""
dp = set([0])
for index, coin in enumerate(coins):
ndp = set()
for pre in dp:
for count in range(quantities[index] + 1):
ndp.add(pre + coin * count)
dp = ndp
return len(dp) - 1
def solve2(self, coins, quantities):
dp = 1
for coin, count in zip(coins, quantities):
for _ in range(count):
# ็ธๅฝไบ้ๅๅนถ้ๆไฝ
dp |= dp << coin
return bin(dp).count('1') - 1
print(Solution().solve([4, 2, 1], [1, 2, 1]))
# We can have the following distinct coin sums
# [1]
# [2]
# [1, 2]
# [4]
# [1, 4]
# [2, 4]
# [1, 2, 4]
# [2, 2, 4]
# [1, 2, 2, 4]
|
[
"[email protected]"
] | |
92fa1609950bf1451a3c35e5f8b6d7ea0e503658
|
67117705720a3e3d81253ba48c1826d36737b126
|
/Wk10_STRANDS/evaluate_random.py
|
5fd8aa91b959b382d35421fe4925a13425d89ebb
|
[] |
no_license
|
pyliut/Rokos2021
|
41f0f96bc396b6e8a5e268e31a38a4a4b288c370
|
70753ab29afc45766eb502f91b65cc455e6055e1
|
refs/heads/main
| 2023-08-13T17:29:30.013829 | 2021-09-26T19:01:35 | 2021-09-26T19:01:35 | 382,092,802 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,266 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 15:15:57 2021
@author: pyliu
"""
import pandas as pd
import numpy as np
import scipy as sp
import time
import random
from random_prior import *
from evaluate_prior import *
def evaluate_random(edge,df_test, df_train, filename_train = "tsc_map.yaml",
metric = "difference", cutoff=100, max_obs = 10,
plot_graph = False, verbose = True,
random_state = None):
"""
Wrapper for random_prior.py & evaluate_prior.py
Max_obs is a scalar INT
"""
mean_test, var_test, prior, t_op_prior, edge_prior = random_prior(df_train,
filename = filename_train,metric = metric,
cutoff=cutoff, max_obs = max_obs,
plot_graph = plot_graph,
random_state = random_state)
ks_random, n_random = evaluate_prior(edge, df_test,
mean_test, var_test, prior, t_op_prior,
metric = metric, verbose = verbose,
random_state = random_state)
return ks_random, n_random
|
[
"[email protected]"
] | |
cce02dbf8c91ec4ea667196ff84e70b817f7c157
|
b08870f8fe7b3cf1bbab3c52a7bacbb36ee1dcc6
|
/verp/stock/doctype/quick_stock_balance/quick_stock_balance.py
|
34d21c079b2527cb9d590283dc3ba5f86886df3f
|
[] |
no_license
|
vsadminpk18/verpfinalversion
|
7148a64fe6134e2a6371470aceb1b57cc4b5a559
|
93d164b370ad9ca0dd5cda0053082dc3abbd20da
|
refs/heads/master
| 2023-07-13T04:11:59.211046 | 2021-08-27T06:26:48 | 2021-08-27T06:26:48 | 400,410,611 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,120 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from verp.stock.utils import get_stock_balance, get_stock_value_on
class QuickStockBalance(Document):
pass
@frappe.whitelist()
def get_stock_item_details(warehouse, date, item=None, barcode=None):
out = {}
if barcode:
out["item"] = frappe.db.get_value(
"Item Barcode", filters={"barcode": barcode}, fieldname=["parent"])
if not out["item"]:
frappe.throw(
_("Invalid Barcode. There is no Item attached to this barcode."))
else:
out["item"] = item
barcodes = frappe.db.get_values("Item Barcode", filters={"parent": out["item"]},
fieldname=["barcode"])
out["barcodes"] = [x[0] for x in barcodes]
out["qty"] = get_stock_balance(out["item"], warehouse, date)
out["value"] = get_stock_value_on(warehouse, date, out["item"])
out["image"] = frappe.db.get_value("Item",
filters={"name": out["item"]}, fieldname=["image"])
return out
|
[
"[email protected]"
] | |
6f7e66a72531e3e9d15c5337c50890545e6c8b34
|
ffc1cc3bb7b68335b115122fdc7924fc4e31d528
|
/pro42.py
|
c6c957f50c694f62e26d9d6bbc341b486cd588f5
|
[] |
no_license
|
Rihanashariff/swathi24
|
dba1dd3c3d2ff583ae431b432e0ef262bfeb3ac3
|
2b0d21f2febdd2a563e8f0affeebd5ca7a5821b8
|
refs/heads/master
| 2020-07-02T05:28:32.199982 | 2019-06-29T08:22:10 | 2019-06-29T08:22:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 164 |
py
|
#split arra
n,k=map(int,input().split())
l=list(map(int,input().split()))
if k==1:
print(min(l))
elif k==2:
print(max(l[0],l[n-1]))
else:
print(max(l))
|
[
"[email protected]"
] | |
eb754b8caa1a4935595aabf735b2d83766c294f8
|
c5f58af61e3577ded52acda210f4f664651b598c
|
/template/mmdetection/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py
|
b089c0d4ad86f3f7240624bb3b70c23518049957
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hojihun5516/object-detection-level2-cv-02
|
0a4ee5cea9a77ef5d43fb61a4b37fe3a87cb0eac
|
bc8a08286935b31b8e7e597c4b1ca2cbbaeb9109
|
refs/heads/master
| 2023-08-31T09:50:59.150971 | 2021-10-16T15:00:19 | 2021-10-16T15:00:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,113 |
py
|
_base_ = [
"../_base_/models/mask_rcnn_r50_fpn.py",
"../_base_/datasets/coco_instance.py",
"../_base_/schedules/schedule_1x.py",
"../_base_/default_runtime.py",
]
model = dict(
rpn_head=dict(
anchor_generator=dict(type="LegacyAnchorGenerator", center_offset=0.5),
bbox_coder=dict(type="LegacyDeltaXYWHBBoxCoder"),
loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0),
),
roi_head=dict(
bbox_roi_extractor=dict(
type="SingleRoIExtractor", roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=2, aligned=False)
),
mask_roi_extractor=dict(
type="SingleRoIExtractor", roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=2, aligned=False)
),
bbox_head=dict(
bbox_coder=dict(type="LegacyDeltaXYWHBBoxCoder"),
loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0),
),
),
# model training and testing settings
train_cfg=dict(rpn_proposal=dict(max_per_img=2000), rcnn=dict(assigner=dict(match_low_quality=True))),
)
|
[
"[email protected]"
] | |
dc5c63b9b7018214aec3114408ab7a45adf6bb20
|
e5cb4f8e6a350b511080e28a4acb4f5fd264f5f9
|
/emission/core/wrapper/confirmedtrip.py
|
3d6357e93f31c0b398c5fbf82d183913e3bd25db
|
[
"BSD-3-Clause"
] |
permissive
|
jf87/e-mission-server
|
cde78e92badc640f307e979bd044da814c31de02
|
2e5fb715b6b9ec88c2c938de08a659d11fac34a6
|
refs/heads/master
| 2021-08-09T00:08:58.838672 | 2021-06-21T08:31:22 | 2021-06-21T08:31:22 | 163,435,995 | 0 | 0 |
BSD-3-Clause
| 2020-08-09T13:32:08 | 2018-12-28T17:50:22 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,095 |
py
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import emission.core.wrapper.trip as ecwt
import emission.core.wrapper.wrapperbase as ecwb
class Confirmedtrip(ecwt.Trip):
props = ecwt.Trip.props
props.update({"raw_trip": ecwb.WrapperBase.Access.WORM,
"cleaned_trip": ecwb.WrapperBase.Access.WORM,
# the confirmed section that is the "primary"
# https://github.com/e-mission/e-mission-docs/issues/476#issuecomment-738120752
"primary_section": ecwb.WrapperBase.Access.WORM,
"inferred_primary_mode": ecwb.WrapperBase.Access.WORM,
# the user input will have all `manual/*` entries
# let's make that be somewhat flexible instead of hardcoding into the data model
"user_input": ecwb.WrapperBase.Access.WORM
})
def _populateDependencies(self):
super(Confirmedtrip, self)._populateDependencies()
|
[
"[email protected]"
] | |
5e8021a95005100a27855482ebdecd37028530ca
|
15fbf63eedc5a19836ff198bd2b80117e356955a
|
/stickerfinder/telegram/callback_handlers/report.py
|
738bd113d52daf995eb74a28b3ab42892717e2cd
|
[
"MIT"
] |
permissive
|
drmhdh/sticker-finder
|
cf2656160bee45d1b51ddda130a766f48fecfe8a
|
688656a4b67c1e5057c0d1dc5b21201f0466b7ab
|
refs/heads/master
| 2022-01-26T14:39:45.366537 | 2019-05-02T20:32:11 | 2019-05-02T20:32:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,608 |
py
|
"""Module for handling sticker set voting task buttons."""
from stickerfinder.models import Task
from stickerfinder.helper.maintenance import check_maintenance_chat
from stickerfinder.helper.callback import CallbackResult
from stickerfinder.helper.telegram import call_tg_func
from stickerfinder.helper.keyboard import get_report_keyboard
def handle_report_ban(session, action, query, payload, chat, tg_chat):
"""Handle the ban button of voting tasks in maintenance channels."""
task = session.query(Task).get(payload)
if CallbackResult(action).name == 'ban':
task.sticker_set.banned = True
call_tg_func(query, 'answer', ['Set tagged as nsfw'])
else:
task.sticker_set.banned = False
call_tg_func(query, 'answer', ['Set no longer tagged as nsfw'])
session.commit()
keyboard = get_report_keyboard(task)
call_tg_func(query.message, 'edit_reply_markup', [], {'reply_markup': keyboard})
def handle_report_nsfw(session, action, query, payload, chat, tg_chat):
"""Handle the nsfw button of voting tasks in maintenance channels."""
task = session.query(Task).get(payload)
if CallbackResult(action).name == 'ban':
task.sticker_set.nsfw = True
call_tg_func(query, 'answer', ['Set banned'])
else:
task.sticker_set.nsfw = False
call_tg_func(query, 'answer', ['Set unbanned'])
session.commit()
keyboard = get_report_keyboard(task)
call_tg_func(query.message, 'edit_reply_markup', [], {'reply_markup': keyboard})
def handle_report_furry(session, action, query, payload, chat, tg_chat):
"""Handle the furry button of voting tasks in maintenance channels."""
task = session.query(Task).get(payload)
if CallbackResult(action).name == 'ban':
task.sticker_set.furry = True
call_tg_func(query, 'answer', ['Set tagged as furry'])
else:
task.sticker_set.furry = False
call_tg_func(query, 'answer', ['Set tagged as furry'])
session.commit()
keyboard = get_report_keyboard(task)
call_tg_func(query.message, 'edit_reply_markup', [], {'reply_markup': keyboard})
def handle_report_next(session, action, query, payload, chat, tg_chat):
"""Handle the nextbutton of voting tasks in maintenance channels."""
task = session.query(Task).get(payload)
if not task.reviewed:
task.reviewed = True
check_maintenance_chat(session, tg_chat, chat)
try:
keyboard = get_report_keyboard(task)
call_tg_func(query.message, 'edit_reply_markup', [], {'reply_markup': keyboard})
except: # noqa
return
|
[
"[email protected]"
] | |
1928029f64bfc831e014cbfda0d1844428cc4025
|
524591f2c4f760bc01c12fea3061833847a4ff9a
|
/arm/opt/ros/kinetic/lib/python2.7/dist-packages/shape_msgs/msg/_SolidPrimitive.py
|
cd7fab91d3d5ac29f8339b1ff910e358710e246d
|
[
"BSD-3-Clause"
] |
permissive
|
Roboy/roboy_plexus
|
6f78d45c52055d97159fd4d0ca8e0f32f1fbd07e
|
1f3039edd24c059459563cb81d194326fe824905
|
refs/heads/roboy3
| 2023-03-10T15:01:34.703853 | 2021-08-16T13:42:54 | 2021-08-16T13:42:54 | 101,666,005 | 2 | 4 |
BSD-3-Clause
| 2022-10-22T13:43:45 | 2017-08-28T16:53:52 |
C++
|
UTF-8
|
Python
| false | false | 5,767 |
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from shape_msgs/SolidPrimitive.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SolidPrimitive(genpy.Message):
_md5sum = "d8f8cbc74c5ff283fca29569ccefb45d"
_type = "shape_msgs/SolidPrimitive"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# Define box, sphere, cylinder, cone
# All shapes are defined to have their bounding boxes centered around 0,0,0.
uint8 BOX=1
uint8 SPHERE=2
uint8 CYLINDER=3
uint8 CONE=4
# The type of the shape
uint8 type
# The dimensions of the shape
float64[] dimensions
# The meaning of the shape dimensions: each constant defines the index in the 'dimensions' array
# For the BOX type, the X, Y, and Z dimensions are the length of the corresponding
# sides of the box.
uint8 BOX_X=0
uint8 BOX_Y=1
uint8 BOX_Z=2
# For the SPHERE type, only one component is used, and it gives the radius of
# the sphere.
uint8 SPHERE_RADIUS=0
# For the CYLINDER and CONE types, the center line is oriented along
# the Z axis. Therefore the CYLINDER_HEIGHT (CONE_HEIGHT) component
# of dimensions gives the height of the cylinder (cone). The
# CYLINDER_RADIUS (CONE_RADIUS) component of dimensions gives the
# radius of the base of the cylinder (cone). Cone and cylinder
# primitives are defined to be circular. The tip of the cone is
# pointing up, along +Z axis.
uint8 CYLINDER_HEIGHT=0
uint8 CYLINDER_RADIUS=1
uint8 CONE_HEIGHT=0
uint8 CONE_RADIUS=1
"""
# Pseudo-constants
BOX = 1
SPHERE = 2
CYLINDER = 3
CONE = 4
BOX_X = 0
BOX_Y = 1
BOX_Z = 2
SPHERE_RADIUS = 0
CYLINDER_HEIGHT = 0
CYLINDER_RADIUS = 1
CONE_HEIGHT = 0
CONE_RADIUS = 1
__slots__ = ['type','dimensions']
_slot_types = ['uint8','float64[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
type,dimensions
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SolidPrimitive, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.type is None:
self.type = 0
if self.dimensions is None:
self.dimensions = []
else:
self.type = 0
self.dimensions = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.type))
length = len(self.dimensions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.dimensions))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.type,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.dimensions = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.type))
length = len(self.dimensions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.dimensions.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.type,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
|
[
"[email protected]"
] | |
752b3698e3219d1ca10929b4519afa6a755b629c
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_210/139.py
|
f73e8657befbc6ae924e000ab345415d1206ec8b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 699 |
py
|
from math import pi
f = open('ans.txt', 'w')
c = int(input())
for i in range(1,c+1):
c, j = map(int, input().split())
if (c == 1 and j == 0) or (j == 1 and c == 0):
a, b = map(int, input().split())
f.write(f"Case #{i}: 2\n")
elif c == 2 or j == 2:
a, b = map(int, input().split())
a2, b2 = map(int, input().split())
if (0 < b2 +1440 - a <= 720) or (0 < b+1440-a2 <= 720)or (0 < b2-a <= 720) or (0 < b-a2 <= 720):
f.write(f"Case #{i}: 2\n")
else:
f.write(f"Case #{i}: 4\n")
else:
a, b = map(int, input().split())
a2, b2 = map(int, input().split())
f.write(f"Case #{i}: 2\n")
f.close()
|
[
"[email protected]"
] | |
7b651e837a4110a20976f6d98064b699397c31ba
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/tree-big-8344.py
|
644eb649e65a97fd424f5ddf28558341f9fda473
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 23,292 |
py
|
# Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + $INT
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
|
[
"[email protected]"
] | |
28759365923d8f64d0f27f0cc86e033caa637517
|
ebc7607785e8bcd6825df9e8daccd38adc26ba7b
|
/python/baekjoon/2.algorithm/implementation/๋ฐฑ์ค_์ฒญ์๋
_์์ด.py
|
ec642c5b7e07bf253b3c70c6b6aca33a2a3575c5
|
[] |
no_license
|
galid1/Algorithm
|
18d1b72b0d5225f99b193e8892d8b513a853d53a
|
5bd69e73332f4dd61656ccdecd59c40a2fedb4b2
|
refs/heads/master
| 2022-02-12T07:38:14.032073 | 2022-02-05T08:34:46 | 2022-02-05T08:34:46 | 179,923,655 | 3 | 0 | null | 2019-06-14T07:18:14 | 2019-04-07T05:49:06 |
Python
|
UTF-8
|
Python
| false | false | 2,421 |
py
|
import sys, copy
def get_fish_idx(x, y, fish_list):
for idx, fish in enumerate(fish_list):
if fish[2][0] == x and fish[2][1] == y:
return idx
# ํด๋น ์ขํ์ ๋ฌผ๊ณ ๊ธฐ๊ฐ ์๋ ๊ฒฝ์ฐ
return -1
def is_valid(x, y):
return 0 <= x < 4 and 0 <= y < 4
def move_fishes(sx, sy, fish_list):
global ds
for idx, fish in enumerate(fish_list):
cur_d = fish[1]
cx, cy = fish[2]
for _ in range(8):
nx, ny = cx+ds[cur_d][0], cy + ds[cur_d][1]
# ์ด๋ ๊ฐ๋ฅ
if (nx != sx or ny != sy) and is_valid(nx, ny):
will_fish_idx = get_fish_idx(nx, ny, fish_list)
# ์ด๋ํ๋ ค๋ ์นธ์ ๋ฌผ๊ณ ๊ธฐ ์กด์ฌ
if will_fish_idx != -1:
fish_list[will_fish_idx][2] = [cx, cy]
fish_list[idx][1] = cur_d
fish_list[idx][2] = [nx, ny]
break
cur_d = (cur_d + 1) % 8
def solve(sx, sy, sd, eat_fish_sum, fish_list):
global ds, ans
move_fishes(sx, sy, fish_list)
cx, cy = sx, sy
while True:
nsx, nsy = cx+ds[sd][0], cy+ds[sd][1]
# ๋์ด์ ์ด๋ ๋ถ๊ฐ
if not is_valid(nsx, nsy) or not fish_list:
ans = max(ans, eat_fish_sum)
return
will_eat_fish_idx = get_fish_idx(nsx, nsy, fish_list)
# ์ด๋ํ๋ ค๋ ์นธ์ ๋ฌผ๊ณ ๊ธฐ ์กด์ฌ
if will_eat_fish_idx != -1:
will_eat_fish_num = fish_list[will_eat_fish_idx][0]
will_eat_fish_dir = fish_list[will_eat_fish_idx][1]
n_fish_list = copy.deepcopy(fish_list)
del n_fish_list[will_eat_fish_idx]
solve(nsx, nsy, will_eat_fish_dir, eat_fish_sum + will_eat_fish_num, n_fish_list)
cx, cy = nsx, nsy
# ์
๋ ฅ
ds = [[-1, 0], [-1, -1], [0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1]]
fishes = []
for i in range(4):
infos = list(map(int, sys.stdin.readline().strip().split(" ")))
for j in range(0, 8, 2):
# ๋ฒํธ, ๋ฐฉํฅ ์์น
fishes.append([infos[j], infos[j + 1] - 1, [i, j // 2]])
fishes.sort(key=lambda fish: fish[0])
zero_zero_fish_idx = get_fish_idx(0, 0, fishes)
eat_fish_sum = fishes[zero_zero_fish_idx][0]
sd = fishes[zero_zero_fish_idx][1]
del fishes[zero_zero_fish_idx]
ans = eat_fish_sum
solve(0, 0, sd, eat_fish_sum, fishes)
print(ans)
|
[
"[email protected]"
] | |
a247dd35bfeefdfd7695bf5a39e69db77ff084f0
|
fc61c9d899ef9c8ddadac0875ab738df28be272e
|
/python/python-online-lec/ch4/python_advanced/ch04-01.py
|
cee48df2e2ff9a08a9daa072f373c86f7ad177cc
|
[
"MIT"
] |
permissive
|
hbyyy/TIL
|
4908b91d880df268f5f7b8a43703f673183f2e8b
|
e89ae2913a8a38eb7f480a9ec2324c3ac11e309e
|
refs/heads/master
| 2022-12-21T04:34:13.896078 | 2021-05-24T13:19:57 | 2021-05-24T13:19:57 | 219,916,943 | 0 | 0 |
MIT
| 2022-12-12T05:15:04 | 2019-11-06T05:12:55 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 215 |
py
|
# first class function
print(set(dir(iter([1,2,3,4,5]))) - set(dir(range(10))) )
def factorial(n: int) -> int:
if n == 1:
return n
return n * factorial(n-1)
print([*map(factorial, range(1, 6))])
|
[
"[email protected]"
] | |
d0b6e06ab67e1a7dec9b3228f774cbce4c4df7db
|
e59e711e81536f027b10f7033a698ab9e39d489c
|
/Geometry/CaloEventSetup/python/HFNoseTopology_cfi.py
|
031fe99595ff33182b0da4158489904226849541
|
[
"Apache-2.0"
] |
permissive
|
AndreaBellora/cmssw
|
73e5f668dfd188bfedcb532b2bd364c486561b03
|
26cf1918ad4133f8178e303a68d97fc58fbeac9f
|
refs/heads/master
| 2023-07-20T11:44:57.079220 | 2018-07-31T12:08:32 | 2018-07-31T12:08:32 | 143,017,412 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 347 |
py
|
import FWCore.ParameterSet.Config as cms
#
# This cfi should be included to build the HGCal Topologies
#
HFNoseTopologyBuilder = cms.ESProducer("HGCalTopologyBuilder",
Name = cms.string("HFNoseSensitive"),
Type = cms.int32(6)
)
|
[
"[email protected]"
] | |
8cadcdab49c891621863861846c7f5fde5ecd503
|
bebba3fb1dfc13a2220f06997c4bc8da42ef8e87
|
/smashlib/ipy3x/nbconvert/exporters/exporter.py
|
d26742223d298ae99c2575eefc7c9c1304833070
|
[
"MIT"
] |
permissive
|
mattvonrocketstein/smash
|
b48b93c3419637f615c7ac3386b04ae756e1fadc
|
98acdc27ab72ca80d9a7f63a54c0d52f126a8009
|
refs/heads/master
| 2021-01-18T23:23:59.340206 | 2016-07-14T01:28:17 | 2016-07-14T01:28:17 | 2,813,958 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,329 |
py
|
"""This module defines a base Exporter class. For Jinja template-based export,
see templateexporter.py.
"""
from __future__ import print_function, absolute_import
import io
import os
import copy
import collections
import datetime
from IPython.config.configurable import LoggingConfigurable
from IPython.config import Config
from IPython import nbformat
from IPython.utils.traitlets import MetaHasTraits, Unicode, List
from IPython.utils.importstring import import_item
from IPython.utils import text, py3compat
class ResourcesDict(collections.defaultdict):
def __missing__(self, key):
return ''
class Exporter(LoggingConfigurable):
"""
Class containing methods that sequentially run a list of preprocessors on a
NotebookNode object and then return the modified NotebookNode object and
accompanying resources dict.
"""
file_extension = Unicode(
'.txt', config=True,
help="Extension of the file that should be written to disk"
)
# MIME type of the result file, for HTTP response headers.
# This is *not* a traitlet, because we want to be able to access it from
# the class, not just on instances.
output_mimetype = ''
# Configurability, allows the user to easily add filters and preprocessors.
preprocessors = List(config=True,
help="""List of preprocessors, by name or namespace, to enable.""")
_preprocessors = List()
default_preprocessors = List(['IPython.nbconvert.preprocessors.coalesce_streams',
'IPython.nbconvert.preprocessors.SVG2PDFPreprocessor',
'IPython.nbconvert.preprocessors.ExtractOutputPreprocessor',
'IPython.nbconvert.preprocessors.CSSHTMLHeaderPreprocessor',
'IPython.nbconvert.preprocessors.RevealHelpPreprocessor',
'IPython.nbconvert.preprocessors.LatexPreprocessor',
'IPython.nbconvert.preprocessors.ClearOutputPreprocessor',
'IPython.nbconvert.preprocessors.ExecutePreprocessor',
'IPython.nbconvert.preprocessors.HighlightMagicsPreprocessor'],
config=True,
help="""List of preprocessors available by default, by name, namespace,
instance, or type.""")
def __init__(self, config=None, **kw):
"""
Public constructor
Parameters
----------
config : config
User configuration instance.
"""
with_default_config = self.default_config
if config:
with_default_config.merge(config)
super(Exporter, self).__init__(config=with_default_config, **kw)
self._init_preprocessors()
@property
def default_config(self):
return Config()
def from_notebook_node(self, nb, resources=None, **kw):
"""
Convert a notebook from a notebook node instance.
Parameters
----------
nb : :class:`~IPython.nbformat.NotebookNode`
Notebook node (dict-like with attr-access)
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
**kw
Ignored (?)
"""
nb_copy = copy.deepcopy(nb)
resources = self._init_resources(resources)
if 'language' in nb['metadata']:
resources['language'] = nb['metadata']['language'].lower()
# Preprocess
nb_copy, resources = self._preprocess(nb_copy, resources)
return nb_copy, resources
def from_filename(self, filename, resources=None, **kw):
"""
Convert a notebook from a notebook file.
Parameters
----------
filename : str
Full filename of the notebook file to open and convert.
"""
# Pull the metadata from the filesystem.
if resources is None:
resources = ResourcesDict()
if not 'metadata' in resources or resources['metadata'] == '':
resources['metadata'] = ResourcesDict()
basename = os.path.basename(filename)
notebook_name = basename[:basename.rfind('.')]
resources['metadata']['name'] = notebook_name
modified_date = datetime.datetime.fromtimestamp(
os.path.getmtime(filename))
resources['metadata'][
'modified_date'] = modified_date.strftime(text.date_format)
with io.open(filename, encoding='utf-8') as f:
return self.from_notebook_node(nbformat.read(f, as_version=4), resources=resources, **kw)
def from_file(self, file_stream, resources=None, **kw):
"""
Convert a notebook from a notebook file.
Parameters
----------
file_stream : file-like object
Notebook file-like object to convert.
"""
return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw)
def register_preprocessor(self, preprocessor, enabled=False):
"""
Register a preprocessor.
Preprocessors are classes that act upon the notebook before it is
passed into the Jinja templating engine. preprocessors are also
capable of passing additional information to the Jinja
templating engine.
Parameters
----------
preprocessor : preprocessor
"""
if preprocessor is None:
raise TypeError('preprocessor')
isclass = isinstance(preprocessor, type)
constructed = not isclass
# Handle preprocessor's registration based on it's type
if constructed and isinstance(preprocessor, py3compat.string_types):
# Preprocessor is a string, import the namespace and recursively call
# this register_preprocessor method
preprocessor_cls = import_item(preprocessor)
return self.register_preprocessor(preprocessor_cls, enabled)
if constructed and hasattr(preprocessor, '__call__'):
# Preprocessor is a function, no need to construct it.
# Register and return the preprocessor.
if enabled:
preprocessor.enabled = True
self._preprocessors.append(preprocessor)
return preprocessor
elif isclass and isinstance(preprocessor, MetaHasTraits):
# Preprocessor is configurable. Make sure to pass in new default for
# the enabled flag if one was specified.
self.register_preprocessor(preprocessor(parent=self), enabled)
elif isclass:
# Preprocessor is not configurable, construct it
self.register_preprocessor(preprocessor(), enabled)
else:
# Preprocessor is an instance of something without a __call__
# attribute.
raise TypeError('preprocessor')
def _init_preprocessors(self):
"""
Register all of the preprocessors needed for this exporter, disabled
unless specified explicitly.
"""
self._preprocessors = []
# Load default preprocessors (not necessarly enabled by default).
for preprocessor in self.default_preprocessors:
self.register_preprocessor(preprocessor)
# Load user-specified preprocessors. Enable by default.
for preprocessor in self.preprocessors:
self.register_preprocessor(preprocessor, enabled=True)
def _init_resources(self, resources):
# Make sure the resources dict is of ResourcesDict type.
if resources is None:
resources = ResourcesDict()
if not isinstance(resources, ResourcesDict):
new_resources = ResourcesDict()
new_resources.update(resources)
resources = new_resources
# Make sure the metadata extension exists in resources
if 'metadata' in resources:
if not isinstance(resources['metadata'], ResourcesDict):
resources['metadata'] = ResourcesDict(resources['metadata'])
else:
resources['metadata'] = ResourcesDict()
if not resources['metadata']['name']:
resources['metadata']['name'] = 'Notebook'
# Set the output extension
resources['output_extension'] = self.file_extension
return resources
def _preprocess(self, nb, resources):
"""
Preprocess the notebook before passing it into the Jinja engine.
To preprocess the notebook is to apply all of the
Parameters
----------
nb : notebook node
notebook that is being exported.
resources : a dict of additional resources that
can be accessed read/write by preprocessors
"""
# Do a copy.deepcopy first,
# we are never safe enough with what the preprocessors could do.
nbc = copy.deepcopy(nb)
resc = copy.deepcopy(resources)
# Run each preprocessor on the notebook. Carry the output along
# to each preprocessor
for preprocessor in self._preprocessors:
nbc, resc = preprocessor(nbc, resc)
return nbc, resc
|
[
"matthewvonrocketstein@gmail-dot-com"
] |
matthewvonrocketstein@gmail-dot-com
|
a802683d34b658d3d0748f9a476e3ecdc62a1c39
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/carbon/common/lib/markdown/extensions/def_list.py
|
4f0de89912416b74ca84996e379a5edc82dfedb1
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,372 |
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\markdown\extensions\def_list.py
import re
import markdown
from markdown.util import etree
class DefListProcessor(markdown.blockprocessors.BlockProcessor):
RE = re.compile('(^|\\n)[ ]{0,3}:[ ]{1,3}(.*?)(\\n|$)')
NO_INDENT_RE = re.compile('^[ ]{0,3}[^ :]')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
terms = [ l.strip() for l in block[:m.start()].split('\n') if l.strip() ]
block = block[m.end():]
no_indent = self.NO_INDENT_RE.match(block)
if no_indent:
d, theRest = block, None
else:
d, theRest = self.detab(block)
if d:
d = '%s\n%s' % (m.group(2), d)
else:
d = m.group(2)
sibling = self.lastChild(parent)
if not terms and sibling.tag == 'p':
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling and sibling.tag == 'dl':
dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
dl = etree.SubElement(parent, 'dl')
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
ITEM_TYPES = ['dd']
LIST_TYPES = ['dl']
def create_item(self, parent, block):
dd = markdown.etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('defindent', DefListIndentProcessor(md.parser), '>indent')
md.parser.blockprocessors.add('deflist', DefListProcessor(md.parser), '>ulist')
def makeExtension(configs = {}):
return DefListExtension(configs=configs)
|
[
"[email protected]"
] | |
7c87c227337384c92c3b5bf13a083449e40b2b7e
|
a982d8f9fd99c4af077dcafc1092a0f3779d9b39
|
/web_pjt/web_pjt/articles/views.py
|
1390a35c247ee3cc9ccd0f2dabc9c68159866dd9
|
[] |
no_license
|
do-park/DDDJ
|
cbe4586a583618b616eaab19014ba16e69b38f06
|
22b53bb4822d7e0b61f5a8226bbadb9b9cb20ea0
|
refs/heads/master
| 2023-01-12T14:30:16.303107 | 2020-11-07T16:09:11 | 2020-11-07T16:09:11 | 273,131,598 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,077 |
py
|
from django.shortcuts import render, redirect, get_object_or_404
from .models import Article, Comment
from movies.models import Movie
from .forms import ArticleForm, CommentForm
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from datetime import timedelta
import datetime
from django.utils import timezone
def index(request):
articles = Article.objects.order_by('-pk')
check_now = timezone.now() # ํ์ฌ ์๊ฐ ๊ธฐ์ค์ผ๋ก 6์๊ฐ ์ด๋ด์ ์์ฑํ ๊ธ์๋ new๋ฅผ ๋์ฐ๊ธฐ ์ํ timedelta๊ฐ
check_delta = timezone.now() - timedelta(hours=6) # ์ด ๊ฐ์ index.html๋ก ๋๊ฒจ html๋จ์์ ์ฒ๋ฆฌํ๋ค.
paginator = Paginator(articles, 15) # ์ซ์๋ง ๋ณ๊ฒฝํ๋ฉด ํ ํ์ด์ง์ ๋ค์ด๊ฐ ๊ธ ์๋ฅผ ๋ณ๊ฒฝํ ์ ์์
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'articles': articles,
'page_obj': page_obj,
'check_delta' : check_delta,
'check_now' : check_now,
}
return render(request, 'articles/index.html', context)
@login_required
def create(request):
if request.method == 'POST':
form = ArticleForm(request.POST)
if form.is_valid():
article = form.save(commit=False)
article.user = request.user
article.save()
return redirect('articles:detail', article.pk)
else:
form = ArticleForm()
context = {
'form': form,
}
return render(request, 'articles/form.html', context)
@login_required
def detail(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
movie = get_object_or_404(Movie, pk=article.movie_title.pk)
form = CommentForm()
context = {
'article': article,
'movie': movie,
'form': form,
}
return render(request, 'articles/detail.html', context)
@login_required
def update(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.user == article.user:
if request.method == "POST":
form = ArticleForm(request.POST, instance=article)
if form.is_valid():
updated = form.save()
return redirect('articles:detail', updated.pk)
else:
form = ArticleForm(instance=article)
context = {
'form': form,
}
return render(request, 'articles/form.html', context)
else:
return redirect('articles:detail', article_pk)
@login_required
def delete(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.user == article.user:
article.delete()
return redirect('articles:index')
else:
return redirect('articles:detail', article_pk)
@login_required
def comment_create(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.article = article
comment.user = request.user
comment.save()
return redirect('articles:detail', article.pk)
@login_required
def comment_delete(request, article_pk, comment_pk):
comment = get_object_or_404(Comment, pk=comment_pk)
if request.user == comment.user:
comment.delete()
return redirect('articles:detail', article_pk)
@login_required
def comment_update(request, article_pk, comment_pk):
comment = get_object_or_404(Comment, pk=comment_pk)
if request.user == comment.user:
form = CommentForm(request.POST, instance=comment)
if request.method == 'POST':
if form.is_valid():
form.save()
return redirect('articles:detail', article_pk)
else:
form = CommentForm(instance=comment)
context = {
'form': form,
}
return render(request, 'articles/form.html', context)
else:
return redirect('articles:detail', article_pk)
@login_required
def search(request):
kwd = request.COOKIES['kwd'] # articles/index.html์์ ์ ์ฅํ ํค์๋๋ฅผ ์ฟ ํค์์ ๊บผ๋ธ๋ค.
articles = Article.objects.filter(title__contains=kwd).order_by('-pk') # ํค์๋๋ฅผ ์ ๋ชฉ์ ํฌํจํ๋ ๊ธ ๊ฒ์ํด pk ์ญ์์ผ๋ก ์ ๋ ฌ
paginator = Paginator(articles, 15)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'kwd': kwd,
'articles': articles,
'page_obj': page_obj,
}
return render(request, 'articles/index.html', context)
@login_required
def best(request):
articles = Article.objects.filter(rank=10).order_by('-pk')
paginator = Paginator(articles, 15)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'articles': articles,
'page_obj': page_obj,
}
return render(request, 'articles/index.html', context)
@login_required
def worst(request):
articles = Article.objects.filter(rank=0).order_by('-pk')
paginator = Paginator(articles, 15)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'articles': articles,
'page_obj': page_obj,
}
return render(request, 'articles/index.html', context)
|
[
"[email protected]"
] | |
e61c31f37c9964c7ffd39869a8d2ecb2b8a7ced8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_alibis.py
|
d0171b2a3add283c5bec1d670d6db8781a0e1ca8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
from xai.brain.wordbase.nouns._alibi import _ALIBI
#calss header
class _ALIBIS(_ALIBI, ):
def __init__(self,):
_ALIBI.__init__(self)
self.name = "ALIBIS"
self.specie = 'nouns'
self.basic = "alibi"
self.jsondata = {}
|
[
"[email protected]"
] | |
38be2c172ab1d3154432b59199e2a909e3901223
|
3c982f9e570f8185770adbfb0540e22f6d0f3c28
|
/mp_calc/virtenv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/pydev_console_utils.py
|
06baf7e8706b5da9a647febad28a88751277f730
|
[
"MIT"
] |
permissive
|
azkmee/fip_powerx_mini_projects
|
48bcf4adca952e70dcb61603eb5ac60522dafebc
|
fbf3385f11893160770896236ee012927cb092d4
|
refs/heads/master
| 2023-07-11T03:01:43.433303 | 2021-08-04T13:42:31 | 2021-08-04T13:42:31 | 392,509,672 | 0 | 0 |
MIT
| 2021-08-04T13:42:32 | 2021-08-04T01:37:54 |
Python
|
UTF-8
|
Python
| false | false | 23,983 |
py
|
import os
import sys
import traceback
from _pydev_bundle.pydev_imports import xmlrpclib, _queue, Exec
from _pydev_bundle._pydev_calltip_util import get_description
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle.pydevd_constants import (IS_JYTHON, dict_iter_items, NEXT_VALUE_SEPARATOR, get_global_debugger,
silence_warnings_decorator)
from contextlib import contextmanager
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_utils import interrupt_main_thread
try:
import cStringIO as StringIO # may not always be available @UnusedImport
except:
try:
import StringIO # @Reimport
except:
import io as StringIO
# =======================================================================================================================
# BaseStdIn
# =======================================================================================================================
class BaseStdIn:
def __init__(self, original_stdin=sys.stdin, *args, **kwargs):
try:
self.encoding = sys.stdin.encoding
except:
# Not sure if it's available in all Python versions...
pass
self.original_stdin = original_stdin
try:
self.errors = sys.stdin.errors # Who knew? sys streams have an errors attribute!
except:
# Not sure if it's available in all Python versions...
pass
def readline(self, *args, **kwargs):
# sys.stderr.write('Cannot readline out of the console evaluation\n') -- don't show anything
# This could happen if the user had done input('enter number).<-- upon entering this, that message would appear,
# which is not something we want.
return '\n'
def write(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def flush(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def read(self, *args, **kwargs):
# in the interactive interpreter, a read and a readline are the same.
return self.readline()
def close(self, *args, **kwargs):
pass # expected in StdIn
def __iter__(self):
# BaseStdIn would not be considered as Iterable in Python 3 without explicit `__iter__` implementation
return self.original_stdin.__iter__()
def __getattr__(self, item):
# it's called if the attribute wasn't found
if hasattr(self.original_stdin, item):
return getattr(self.original_stdin, item)
raise AttributeError("%s has no attribute %s" % (self.original_stdin, item))
# =======================================================================================================================
# StdIn
# =======================================================================================================================
class StdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, interpreter, host, client_port, original_stdin=sys.stdin):
BaseStdIn.__init__(self, original_stdin)
self.interpreter = interpreter
self.client_port = client_port
self.host = host
def readline(self, *args, **kwargs):
# Ok, callback into the client to get the new input
try:
server = xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
requested_input = server.RequestInput()
if not requested_input:
return '\n' # Yes, a readline must return something (otherwise we can get an EOFError on the input() call).
else:
# readline should end with '\n' (not doing so makes IPython 5 remove the last *valid* character).
requested_input += '\n'
return requested_input
except KeyboardInterrupt:
raise # Let KeyboardInterrupt go through -- #PyDev-816: Interrupting infinite loop in the Interactive Console
except:
return '\n'
def close(self, *args, **kwargs):
pass # expected in StdIn
#=======================================================================================================================
# DebugConsoleStdIn
#=======================================================================================================================
class DebugConsoleStdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, py_db, original_stdin):
'''
:param py_db:
If None, get_global_debugger() is used.
'''
BaseStdIn.__init__(self, original_stdin)
self._py_db = py_db
self._in_notification = 0
def __send_input_requested_message(self, is_started):
try:
py_db = self._py_db
if py_db is None:
py_db = get_global_debugger()
if py_db is None:
return
cmd = py_db.cmd_factory.make_input_requested_message(is_started)
py_db.writer.add_command(cmd)
except Exception:
pydev_log.exception()
@contextmanager
def notify_input_requested(self):
self._in_notification += 1
if self._in_notification == 1:
self.__send_input_requested_message(True)
try:
yield
finally:
self._in_notification -= 1
if self._in_notification == 0:
self.__send_input_requested_message(False)
def readline(self, *args, **kwargs):
with self.notify_input_requested():
return self.original_stdin.readline(*args, **kwargs)
def read(self, *args, **kwargs):
with self.notify_input_requested():
return self.original_stdin.read(*args, **kwargs)
class CodeFragment:
def __init__(self, text, is_single_line=True):
self.text = text
self.is_single_line = is_single_line
def append(self, code_fragment):
self.text = self.text + "\n" + code_fragment.text
if not code_fragment.is_single_line:
self.is_single_line = False
# =======================================================================================================================
# BaseInterpreterInterface
# =======================================================================================================================
class BaseInterpreterInterface:
def __init__(self, mainThread, connect_status_queue=None):
self.mainThread = mainThread
self.interruptable = False
self.exec_queue = _queue.Queue(0)
self.buffer = None
self.banner_shown = False
self.connect_status_queue = connect_status_queue
self.mpl_modules_for_patching = {}
self.init_mpl_modules_for_patching()
def build_banner(self):
return 'print({0})\n'.format(repr(self.get_greeting_msg()))
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
def init_mpl_modules_for_patching(self):
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
self.mpl_modules_for_patching = {
"matplotlib": lambda: activate_matplotlib(self.enableGui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab
}
def need_more_for_code(self, source):
# PyDev-502: PyDev 3.9 F2 doesn't support backslash continuations
# Strangely even the IPython console is_complete said it was complete
# even with a continuation char at the end.
if source.endswith('\\'):
return True
if hasattr(self.interpreter, 'is_complete'):
return not self.interpreter.is_complete(source)
try:
# At this point, it should always be single.
# If we don't do this, things as:
#
# for i in range(10): print(i)
#
# (in a single line) don't work.
# Note that it won't give an error and code will be None (so, it'll
# use execMultipleLines in the next call in this case).
symbol = 'single'
code = self.interpreter.compile(source, '<input>', symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
return False
if code is None:
# Case 2
return True
# Case 3
return False
def need_more(self, code_fragment):
if self.buffer is None:
self.buffer = code_fragment
else:
self.buffer.append(code_fragment)
return self.need_more_for_code(self.buffer.text)
def create_std_in(self, debugger=None, original_std_in=None):
if debugger is None:
return StdIn(self, self.host, self.client_port, original_stdin=original_std_in)
else:
return DebugConsoleStdIn(py_db=debugger, original_stdin=original_std_in)
def add_exec(self, code_fragment, debugger=None):
# In case sys.excepthook called, use original excepthook #PyDev-877: Debug console freezes with Python 3.5+
# (showtraceback does it on python 3.5 onwards)
sys.excepthook = sys.__excepthook__
try:
original_in = sys.stdin
try:
help = None
if 'pydoc' in sys.modules:
pydoc = sys.modules['pydoc'] # Don't import it if it still is not there.
if hasattr(pydoc, 'help'):
# You never know how will the API be changed, so, let's code defensively here
help = pydoc.help
if not hasattr(help, 'input'):
help = None
except:
# Just ignore any error here
pass
more = False
try:
sys.stdin = self.create_std_in(debugger, original_in)
try:
if help is not None:
# This will enable the help() function to work.
try:
try:
help.input = sys.stdin
except AttributeError:
help._input = sys.stdin
except:
help = None
if not self._input_error_printed:
self._input_error_printed = True
sys.stderr.write('\nError when trying to update pydoc.help.input\n')
sys.stderr.write('(help() may not work -- please report this as a bug in the pydev bugtracker).\n\n')
traceback.print_exc()
try:
self.start_exec()
if hasattr(self, 'debugger'):
self.debugger.enable_tracing()
more = self.do_add_exec(code_fragment)
if hasattr(self, 'debugger'):
self.debugger.disable_tracing()
self.finish_exec(more)
finally:
if help is not None:
try:
try:
help.input = original_in
except AttributeError:
help._input = original_in
except:
pass
finally:
sys.stdin = original_in
except SystemExit:
raise
except:
traceback.print_exc()
finally:
sys.__excepthook__ = sys.excepthook
return more
def do_add_exec(self, codeFragment):
'''
Subclasses should override.
@return: more (True if more input is needed to complete the statement and False if the statement is complete).
'''
raise NotImplementedError()
def get_namespace(self):
'''
Subclasses should override.
@return: dict with namespace.
'''
raise NotImplementedError()
def __resolve_reference__(self, text):
"""
:type text: str
"""
obj = None
if '.' not in text:
try:
obj = self.get_namespace()[text]
except KeyError:
pass
if obj is None:
try:
obj = self.get_namespace()['__builtins__'][text]
except:
pass
if obj is None:
try:
obj = getattr(self.get_namespace()['__builtins__'], text, None)
except:
pass
else:
try:
last_dot = text.rindex('.')
parent_context = text[0:last_dot]
res = pydevd_vars.eval_in_context(parent_context, self.get_namespace(), self.get_namespace())
obj = getattr(res, text[last_dot + 1:])
except:
pass
return obj
def getDescription(self, text):
try:
obj = self.__resolve_reference__(text)
if obj is None:
return ''
return get_description(obj)
except:
return ''
def do_exec_code(self, code, is_single_line):
try:
code_fragment = CodeFragment(code, is_single_line)
more = self.need_more(code_fragment)
if not more:
code_fragment = self.buffer
self.buffer = None
self.exec_queue.put(code_fragment)
return more
except:
traceback.print_exc()
return False
def execLine(self, line):
return self.do_exec_code(line, True)
def execMultipleLines(self, lines):
if IS_JYTHON:
more = False
for line in lines.split('\n'):
more = self.do_exec_code(line, True)
return more
else:
return self.do_exec_code(lines, False)
def interrupt(self):
self.buffer = None # Also clear the buffer when it's interrupted.
try:
if self.interruptable:
# Fix for #PyDev-500: Console interrupt can't interrupt on sleep
interrupt_main_thread(self.mainThread)
self.finish_exec(False)
return True
except:
traceback.print_exc()
return False
def close(self):
sys.exit(0)
def start_exec(self):
self.interruptable = True
def get_server(self):
if getattr(self, 'host', None) is not None:
return xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
else:
return None
server = property(get_server)
def ShowConsole(self):
server = self.get_server()
if server is not None:
server.ShowConsole()
def finish_exec(self, more):
self.interruptable = False
server = self.get_server()
if server is not None:
return server.NotifyFinished(more)
else:
return True
def getFrame(self):
xml = StringIO.StringIO()
hidden_ns = self.get_ipython_hidden_vars_dict()
xml.write("<xml>")
xml.write(pydevd_xml.frame_vars_to_xml(self.get_namespace(), hidden_ns))
xml.write("</xml>")
return xml.getvalue()
@silence_warnings_decorator
def getVariable(self, attributes):
xml = StringIO.StringIO()
xml.write("<xml>")
val_dict = pydevd_vars.resolve_compound_var_object_fields(self.get_namespace(), attributes)
if val_dict is None:
val_dict = {}
for k, val in dict_iter_items(val_dict):
val = val_dict[k]
evaluate_full_value = pydevd_xml.should_evaluate_full_value(val)
xml.write(pydevd_vars.var_to_xml(val, k, evaluate_full_value=evaluate_full_value))
xml.write("</xml>")
return xml.getvalue()
def getArray(self, attr, roffset, coffset, rows, cols, format):
name = attr.split("\t")[-1]
array = pydevd_vars.eval_in_context(name, self.get_namespace(), self.get_namespace())
return pydevd_vars.table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format)
def evaluate(self, expression):
xml = StringIO.StringIO()
xml.write("<xml>")
result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace())
xml.write(pydevd_vars.var_to_xml(result, expression))
xml.write("</xml>")
return xml.getvalue()
@silence_warnings_decorator
def loadFullValue(self, seq, scope_attrs):
"""
Evaluate full value for async Console variables in a separate thread and send results to IDE side
:param seq: id of command
:param scope_attrs: a sequence of variables with their attributes separated by NEXT_VALUE_SEPARATOR
(i.e.: obj\tattr1\tattr2NEXT_VALUE_SEPARATORobj2\attr1\tattr2)
:return:
"""
frame_variables = self.get_namespace()
var_objects = []
vars = scope_attrs.split(NEXT_VALUE_SEPARATOR)
for var_attrs in vars:
if '\t' in var_attrs:
name, attrs = var_attrs.split('\t', 1)
else:
name = var_attrs
attrs = None
if name in frame_variables:
var_object = pydevd_vars.resolve_var_object(frame_variables[name], attrs)
var_objects.append((var_object, name))
else:
var_object = pydevd_vars.eval_in_context(name, frame_variables, frame_variables)
var_objects.append((var_object, name))
from _pydevd_bundle.pydevd_comm import GetValueAsyncThreadConsole
py_db = getattr(self, 'debugger', None)
if py_db is None:
py_db = get_global_debugger()
if py_db is None:
from pydevd import PyDB
py_db = PyDB()
t = GetValueAsyncThreadConsole(py_db, self.get_server(), seq, var_objects)
t.start()
def changeVariable(self, attr, value):
def do_change_variable():
Exec('%s=%s' % (attr, value), self.get_namespace(), self.get_namespace())
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_change_variable)
def connectToDebugger(self, debuggerPort, debugger_options=None):
'''
Used to show console with variables connection.
Mainly, monkey-patches things in the debugger structure so that the debugger protocol works.
'''
if debugger_options is None:
debugger_options = {}
env_key = "PYDEVD_EXTRA_ENVS"
if env_key in debugger_options:
for (env_name, value) in dict_iter_items(debugger_options[env_key]):
existing_value = os.environ.get(env_name, None)
if existing_value:
os.environ[env_name] = "%s%c%s" % (existing_value, os.path.pathsep, value)
else:
os.environ[env_name] = value
if env_name == "PYTHONPATH":
sys.path.append(value)
del debugger_options[env_key]
def do_connect_to_debugger():
try:
# Try to import the packages needed to attach the debugger
import pydevd
from _pydev_imps._pydev_saved_modules import threading
except:
# This happens on Jython embedded in host eclipse
traceback.print_exc()
sys.stderr.write('pydevd is not available, cannot connect\n')
from _pydevd_bundle.pydevd_constants import set_thread_id
from _pydev_bundle import pydev_localhost
set_thread_id(threading.currentThread(), "console_main")
VIRTUAL_FRAME_ID = "1" # matches PyStackFrameConsole.java
VIRTUAL_CONSOLE_ID = "console_main" # matches PyThreadConsole.java
f = FakeFrame()
f.f_back = None
f.f_globals = {} # As globals=locals here, let's simply let it empty (and save a bit of network traffic).
f.f_locals = self.get_namespace()
self.debugger = pydevd.PyDB()
self.debugger.add_fake_frame(thread_id=VIRTUAL_CONSOLE_ID, frame_id=VIRTUAL_FRAME_ID, frame=f)
try:
pydevd.apply_debugger_options(debugger_options)
self.debugger.connect(pydev_localhost.get_localhost(), debuggerPort)
self.debugger.prepare_to_run()
self.debugger.disable_tracing()
except:
traceback.print_exc()
sys.stderr.write('Failed to connect to target debugger.\n')
# Register to process commands when idle
self.debugrunning = False
try:
import pydevconsole
pydevconsole.set_debug_hook(self.debugger.process_internal_commands)
except:
traceback.print_exc()
sys.stderr.write('Version of Python does not support debuggable Interactive Console.\n')
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_connect_to_debugger)
return ('connect complete',)
def handshake(self):
if self.connect_status_queue is not None:
self.connect_status_queue.put(True)
return "PyCharm"
def get_connect_status_queue(self):
return self.connect_status_queue
def hello(self, input_str):
# Don't care what the input string is
return ("Hello eclipse",)
def enableGui(self, guiname):
''' Enable the GUI specified in guiname (see inputhook for list).
As with IPython, enabling multiple GUIs isn't an error, but
only the last one's main loop runs and it may not work
'''
def do_enable_gui():
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("PyDev console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_enable_gui)
def get_ipython_hidden_vars_dict(self):
return None
# =======================================================================================================================
# FakeFrame
# =======================================================================================================================
class FakeFrame:
'''
Used to show console with variables connection.
A class to be used as a mock of a frame.
'''
|
[
"[email protected]"
] | |
e6fac2153aac284d9530a47ff933793ad517f344
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/keyvault/v20180214/secret.py
|
982008275fc51e43c717bc589bec037402e201f5
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 |
Apache-2.0
| 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null |
UTF-8
|
Python
| false | false | 10,674 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SecretArgs', 'Secret']
@pulumi.input_type
class SecretArgs:
def __init__(__self__, *,
properties: pulumi.Input['SecretPropertiesArgs'],
resource_group_name: pulumi.Input[str],
vault_name: pulumi.Input[str],
secret_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Secret resource.
:param pulumi.Input['SecretPropertiesArgs'] properties: Properties of the secret
:param pulumi.Input[str] resource_group_name: The name of the Resource Group to which the vault belongs.
:param pulumi.Input[str] vault_name: Name of the vault
:param pulumi.Input[str] secret_name: Name of the secret
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags that will be assigned to the secret.
"""
pulumi.set(__self__, "properties", properties)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "vault_name", vault_name)
if secret_name is not None:
pulumi.set(__self__, "secret_name", secret_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def properties(self) -> pulumi.Input['SecretPropertiesArgs']:
"""
Properties of the secret
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: pulumi.Input['SecretPropertiesArgs']):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group to which the vault belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> pulumi.Input[str]:
"""
Name of the vault
"""
return pulumi.get(self, "vault_name")
@vault_name.setter
def vault_name(self, value: pulumi.Input[str]):
pulumi.set(self, "vault_name", value)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the secret
"""
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The tags that will be assigned to the secret.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Secret(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
properties: Optional[pulumi.Input[pulumi.InputType['SecretPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secret_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Resource information with extended details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['SecretPropertiesArgs']] properties: Properties of the secret
:param pulumi.Input[str] resource_group_name: The name of the Resource Group to which the vault belongs.
:param pulumi.Input[str] secret_name: Name of the secret
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags that will be assigned to the secret.
:param pulumi.Input[str] vault_name: Name of the vault
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecretArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource information with extended details.
:param str resource_name: The name of the resource.
:param SecretArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecretArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
properties: Optional[pulumi.Input[pulumi.InputType['SecretPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secret_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecretArgs.__new__(SecretArgs)
if properties is None and not opts.urn:
raise TypeError("Missing required property 'properties'")
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["secret_name"] = secret_name
__props__.__dict__["tags"] = tags
if vault_name is None and not opts.urn:
raise TypeError("Missing required property 'vault_name'")
__props__.__dict__["vault_name"] = vault_name
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:keyvault/v20180214:Secret"), pulumi.Alias(type_="azure-native:keyvault:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20161001:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20161001:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20180214preview:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20180214preview:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20190901:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20190901:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20200401preview:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20200401preview:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20210401preview:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20210401preview:Secret")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Secret, __self__).__init__(
'azure-native:keyvault/v20180214:Secret',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Secret':
"""
Get an existing Secret resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SecretArgs.__new__(SecretArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Secret(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Azure location of the key vault resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the key vault resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.SecretPropertiesResponse']:
"""
Properties of the secret
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Mapping[str, str]]:
"""
Tags assigned to the key vault resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type of the key vault resource.
"""
return pulumi.get(self, "type")
|
[
"[email protected]"
] | |
5f7d39150e878a66ec23e062dd7d70b9bcd804e6
|
2af1e6357f51d0d08b1a991e2bd922b7bdc8c0b6
|
/baekjoon/not_accepted/1167 ํธ๋ฆฌ์ ์ง๋ฆ_floyd-warshall_2.py
|
40e4929db54865cabac4b0a5397d10b91363bfe1
|
[] |
no_license
|
grasshopperTrainer/coding_practice
|
530e9912b10952c866d35d69f12c99b96959a22d
|
d1e5e6d6fa3f71f1a0105940fff1785068aec8b0
|
refs/heads/master
| 2023-06-01T13:30:15.362657 | 2021-06-08T08:40:15 | 2021-06-08T08:40:15 | 267,359,225 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 795 |
py
|
# reducing memory usage
# time out
from sys import stdin
def solution(N, routes):
MAX = 10_000
tree = {}
for r in routes:
depart = r[0]
it = iter(r[1:])
for n in it:
tree[(depart, n)] = next(it)
for m in range(1, N+1):
for i in range(1, N+1):
for j in range(1, N+1):
if i != j and (i, m) in tree and (m, j) in tree:
t = tree[(i, m)] + tree[(m, j)]
if tree.setdefault((i, j), MAX) > t:
tree[(i, j)] = t
return max(tree.values())
N, routes = 0, []
for i, row in enumerate(stdin.readlines()):
if i == 0:
N = int(row)
else:
routes.append([int(c) for c in row.strip().split(' ')[:-1]])
print(solution(N, routes))
|
[
"[email protected]"
] | |
6a03f62b4905e3c7d1cdbf69926f721ac85d51c6
|
3344e1489e1c8616181314a6524ff593f642c2c9
|
/dl/chapter2/or_func.py
|
6a2fcb88fe740c410fecab9cb1a821c2f0694d73
|
[] |
no_license
|
21nkyu/dl
|
2f0fb8405f7e10bd02e31efa9334921b8df97f97
|
cdcbd3a2bedaa4542d7dbacbf027396fc70ce3f4
|
refs/heads/main
| 2023-08-31T16:38:36.615165 | 2021-11-03T15:20:22 | 2021-11-03T15:20:22 | 422,246,633 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 181 |
py
|
import numpy as np
def or_func(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(x*w)+b
if tmp <= 0:
return 0
return 1
|
[
"[email protected]"
] | |
eb9292b595bef60e166068873d143b42ad54a2d8
|
e86dbbe3f0650b4d1f4039211e4702859b6b5bfa
|
/pyscf/delta_scf/scf_s.py
|
aff04e8034bf02661bdd32a2210175d2181c7620
|
[] |
no_license
|
sapatha2/cuo
|
07a568b021b69e6448763d232b5f63857f9e2932
|
006b190ae29de0af24c0fd905186ccda0c9ade94
|
refs/heads/master
| 2020-03-24T02:01:50.269432 | 2019-06-14T17:52:53 | 2019-06-14T17:52:53 | 142,359,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,932 |
py
|
#PySCF input file for CuO calculations
import json
from pyscf import gto,scf,mcscf, fci,lo,ci,cc
from pyscf.scf import ROHF,ROKS,UHF,UKS, addons
import numpy as np
import pandas as pd
df=json.load(open("trail.json"))
charge=0
#Additional states
S=[1,1,1,1,1]
symm_dict=[
{'A1':(5,5),'E1x':(3,3),'E1y':(3,2),'E2x':(1,1),'E2y':(1,1)},
{'A1':(6,5),'E1x':(3,3),'E1y':(2,2),'E2x':(1,1),'E2y':(1,1)}, #(pi -> s)
{'A1':(6,6),'E1x':(3,2),'E1y':(2,2),'E2x':(1,1),'E2y':(1,1)}, #(2pi -> 2s)
{'A1':(5,4),'E1x':(3,3),'E1y':(3,3),'E2x':(1,1),'E2y':(1,1)}, #(z -> pi)
{'A1':(5,5),'E1x':(3,3),'E1y':(3,3),'E2x':(1,1),'E2y':(1,0)}, #(dd -> pi)
]
datacsv={}
datacsv={}
for nm in['run','method','basis','pseudopotential','bond-length','S','E','conv']:
datacsv[nm]=[]
for run in range(len(S)):
for r in [1.725]:
for method in ['B3LYP']:
for basis in ['vdz','vtz']:
for el in ['Cu']:
if(S[run]>0):
molname=el+'O'
mol=gto.Mole()
mol.ecp={}
mol.basis={}
for e in [el,'O']:
mol.ecp[e]=gto.basis.parse_ecp(df[e]['ecp'])
mol.basis[e]=gto.basis.parse(df[e][basis])
mol.charge=charge
mol.spin=S[run]
mol.build(atom="%s 0. 0. 0.; O 0. 0. %g"%(el,r),verbose=4,symmetry=True)
if basis=='vdz':
#These are the orbitals for which we want to read-in an initial DM guess
TM_3s_orbitals = []
TM_4s_orbitals = []
TM_3p_orbitals = []
TM_3d_orbitals = []
O_2s_orbitals = []
O_2p_orbitals = []
aos=mol.ao_labels()
print('')
print('AO labels')
print(aos)
print('')
for i,x in enumerate(aos):
#Find the TM 3s labels
if (('3s' in x) and (el in x)):
TM_3s_orbitals.append(i)
#Find the TM 4s labels
if (('4s' in x) and (el in x)):
TM_4s_orbitals.append(i)
#Find the TM 3p labels
if (('3p' in x) and (el in x)):
TM_3p_orbitals.append(i)
#Find the TM 3d labels
if (('3d' in x) and (el in x)):
TM_3d_orbitals.append(i)
#Find the O 2s labels
if (('2s' in x) and ('O' in x)):
O_2s_orbitals.append(i)
#Find the O 2p labels
if (('2p' in x) and ('O' in x)):
O_2p_orbitals.append(i)
#There should be 5 3d TM orbitals. Let's check this!
assert len(TM_3d_orbitals)==5
##############################################################################################
if("U" in method):
if("HF" in method):
m=UHF(mol)
else:
m=UKS(mol)
m.xc=method[1:]
else:
if(method=="ROHF"):
m=ROHF(mol)
else:
m=ROKS(mol)
m.xc=method
##############################################################################################
dm=np.zeros(m.init_guess_by_minao().shape)
#The 3s is always doubly-occupied for the TM atom
for s in TM_3s_orbitals:
for spin in [0,1]:
dm[spin,s,s]=1
#The 4s is always at least singly-occupied for the TM atom
for s in TM_4s_orbitals:
dm[0,s,s]=1
#Control the 4s double-occupancy
if (el=='Cr'):
for s in TM_4s_orbitals:
print('We are singly filling this 4s-orbital: '+np.str(aos[s]) )
dm[1,s,s]=0
#Always doubly-occupy the 3p orbitals for the TM atom
for p in TM_3p_orbitals:
for s in [0,1]:
dm[s,p,p]=1
#Control the 3d occupancy for CrO...
if (el=='Cr'):
for i,d in enumerate(TM_3d_orbitals):
#These are the 3d orbitals we want to fill to get the correct symmetry
if ( ('xy' in aos[d]) or ('yz' in aos[d]) or ('z^2' in aos[d]) or ('x2-y2' in aos[d]) ):
print('We are singly filling this d-orbital: '+np.str(aos[d]) )
dm[0,d,d]=1
m.chkfile=el+basis+"_r"+str(r)+"_s"+str(S[run])+"_"+method+"_"+str(run)+".chk"
m.irrep_nelec = symm_dict[run]
m.max_cycle=100
m = addons.remove_linear_dep_(m)
m.conv_tol=1e-6
#Only need an initial guess for CrO and CuO...
if (el=='Cr' or el=='Cu'):
total_energy=m.kernel(dm)
else:
total_energy=m.kernel()
#Compute the Mulliken orbital occupancies...
m.analyze()
assert(np.sum(m.mo_occ)==25)
#Once we get past the vdz basis, just read-in the existing chk file...
else:
##############################################################################################
if("U" in method):
if("HF" in method):
m=UHF(mol)
else:
m=UKS(mol)
m.xc=method[1:]
else:
if(method=="ROHF"):
m=ROHF(mol)
else:
m=ROKS(mol)
m.xc=method
##############################################################################################
dm=m.from_chk(el+'vdz'+"_r"+str(r)+"_s"+str(S[run])+"_"+method+"_"+str(run)+".chk")
m.chkfile=el+basis+"_r"+str(r)+"_s"+str(S[run])+"_"+method+"_"+str(run)+".chk"
m.irrep_nelec = symm_dict[run]
m.max_cycle=100
m = addons.remove_linear_dep_(m)
m.conv_tol=1e-6
total_energy=m.kernel(dm)
m.analyze()
assert(np.sum(m.mo_occ)==25)
datacsv['run'].append(run)
datacsv['bond-length'].append(r)
datacsv['S'].append(S[run])
datacsv['method'].append(method)
datacsv['basis'].append(basis)
datacsv['pseudopotential'].append('trail')
datacsv['E'].append(total_energy)
datacsv['conv'].append(m.converged)
pd.DataFrame(datacsv).to_csv("cuo.csv",index=False)
|
[
"[email protected]"
] | |
c4cdc5f902e454c3555367530422548c79313419
|
319d66c48f51e3d98e9df953d406a6f545b72363
|
/Python/TwoStrings.py
|
93cdab9452eab2fbcc476f7b088f5e8746fc5d76
|
[
"Apache-2.0"
] |
permissive
|
WinrichSy/HackerRank-Solutions
|
291bc7a32dc4d9569d7028d6d665e86869fbf952
|
ed928de50cbbbdf0aee471630f6c04f9a0f69a1f
|
refs/heads/master
| 2022-07-18T15:43:48.865714 | 2020-05-16T00:21:56 | 2020-05-16T00:21:56 | 255,453,554 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 612 |
py
|
#Two Strings
#https://www.hackerrank.com/challenges/two-strings/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the twoStrings function below.
def twoStrings(s1, s2):
s1_set = list(set(s1))
s2_set = list(set(s2))
for i in s1_set:
if i in s2_set:
return 'YES'
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
s1 = input()
s2 = input()
result = twoStrings(s1, s2)
fptr.write(result + '\n')
fptr.close()
|
[
"[email protected]"
] | |
970a27ad5a6f180d6f567b10a24309540a2f364f
|
45de3aa97525713e3a452c18dcabe61ac9cf0877
|
/src/secondaires/familier/types/barre_attache.py
|
697fc617228ad33fb007e25b34c48b074ec5d852
|
[
"BSD-3-Clause"
] |
permissive
|
stormi/tsunami
|
95a6da188eadea3620c70f7028f32806ee2ec0d1
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
refs/heads/master
| 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,565 |
py
|
# -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type barre d'attache."""
from primaires.interpreteur.editeur.entier import Entier
from primaires.objet.types.base import BaseType
class BarreAttache(BaseType):
"""Type d'objet: barre d'attache."""
nom_type = "barre d'attache"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
BaseType.__init__(self, cle)
self.nb_places = 1
self.etendre_editeur("b", "nombre de places", Entier,
self, "nb_places")
def travailler_enveloppes(self, enveloppes):
"""Travail sur les enveloppes"""
nb = enveloppes["b"]
nb.apercu = "{objet.nb_places}"
nb.prompt = "Nombre de places disponibles sur cette barre " \
"d'attache : "
nb.aide_courte = \
"Entrez le |ent|nombre de places|ff| disponibles sur cette " \
"barre d'attache\nou |cmd|/|ff| pour revenir ร la fenรชtre " \
"parente.\n\nNombre de places actuel : {objet.nb_places}"
|
[
"[email protected]"
] | |
7be2abeab2df7f1d5bc04459eee30d4129b87489
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-fractionDigits-1-1.py
|
8d454b0ee6aa32933dfcdcb7ea28358723d58032
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| 2023-07-25T14:19:04 | 2020-02-10T21:59:47 |
Python
|
UTF-8
|
Python
| false | false | 298 |
py
|
from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_fraction_digits_1_xsd.nistschema_sv_iv_atomic_integer_fraction_digits_1 import NistschemaSvIvAtomicIntegerFractionDigits1
obj = NistschemaSvIvAtomicIntegerFractionDigits1(
value=-999999999999999999
)
|
[
"[email protected]"
] | |
28c4653058cf8e268d6f9df167b0f7f1436718d3
|
5f9ebed60f6f2568b7c4a34505ff9e36b77968f3
|
/figure_paper_xpol_composites.py
|
7ef77c6a90cb4f682d27bf7c45b258a63c37460c
|
[] |
no_license
|
rvalenzuelar/xpol_vis
|
3497b990694f033c711b7a4e2f0c199d46567efd
|
181fd831afaafa7e7018a2425b0ee9b2a820f649
|
refs/heads/master
| 2020-04-05T14:09:37.028300 | 2018-04-26T20:20:32 | 2018-04-26T20:20:32 | 50,193,902 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,449 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 22 11:50:10 2016
@author: raul
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import xpol_tta_analysis as xta
import numpy as np
import mpl_toolkits.axisartist as AA
import matplotlib as mpl
from matplotlib.gridspec import GridSpecFromSubplotSpec as gssp
from rvtools import add_colorbar
mpl.rcParams['font.size'] = 15
'''
use:
%run -i figure_paper_xpol_composites.py
if instances do not exist in iPython namespace
then create them
'''
# params = dict(wdir_surf=130,wdir_wprof=170,
# rain_czd=0.25,nhours=2)
params = dict(wdir_thres=150,
rain_czd=0.25,
nhours=2
)
try:
xall
except NameError:
''' 02Feb04 (12) is excluded for RHIs only internally in
the process method
'''
xall = xta.process(case=[8, 9, 10, 11, 12, 13, 14], params=params)
scale = 1.2
fig = plt.figure(figsize=(7.5*scale, 11*scale))
gs0 = gridspec.GridSpec(1, 2,
wspace=0.01)
height_ratios = [2.5, 1, 2.5, 1]
gs00 = gssp(4, 1,
subplot_spec=gs0[0],
height_ratios=height_ratios,
hspace=0)
gs01 = gssp(4, 1,
subplot_spec=gs0[1],
height_ratios=height_ratios,
hspace=0)
ax0 = plt.subplot(gs00[0],gid='(a)')
ax1 = plt.subplot(gs01[0],gid='(b)')
ax2 = plt.subplot(gs00[1],gid='(c)')
ax3 = plt.subplot(gs01[1],gid='(d)')
ax4 = plt.subplot(gs00[2],gid='(e)')
ax5 = plt.subplot(gs01[2],gid='(f)')
ax6 = plt.subplot(gs00[3],gid='(g)')
ax7 = plt.subplot(gs01[3],gid='(h)')
axes = [ax0, ax1, ax2, ax3, ax4, ax5, ax6, ax7]
cvalues1 = range(-30,34,4)
cvalues2 = range(0,32,2)
ax0.text(0.5, 1.05, 'TTA',transform=ax0.transAxes,
fontsize=15,weight='bold')
ax1.text(0.5, 1.05, 'NO-TTA',transform=ax1.transAxes,
ha='center',fontsize=15,weight='bold')
xall.plot(ax=ax0, name='contourf',mode='ppi',target='vr',
cbar=dict(loc='right',invisible=True),
terrain=True,bmap=True,qc=True,
cvalues=cvalues1)
xall.plot(ax=ax1, name='contourf',mode='ppi',target='vr',
cbar=dict(loc='right',label='[m/s]'),
cvalues=cvalues1,
terrain=True,bmap=True,qc=True,
tta=False)
xall.plot(ax=ax2, name='contourf',mode='rhi',target='vr',
cbar=dict(loc='right',invisible=True),
cvalues=cvalues2,
qc=True,
xticklabs=False)
xall.plot(ax=ax3, name='contourf',mode='rhi',target='vr',
cbar=dict(loc='right',label='[m/s]',labelpad=13),
cvalues=cvalues2,
xticklabs=False,
yticklabs=False,
qc=True,
tta=False)
xall.plot(ax=ax4, name='contourf',mode='ppi',target='z',
cbar=dict(loc='right',invisible=True),
terrain=True,bmap=True,qc=True,
sector=range(135,180),
cvalues=cvalues1)
hdtm = xall.plot(ax=ax5, name='contourf',mode='ppi',target='z',
cbar=dict(loc='right',label='[%]'),
cvalues=cvalues1,
terrain=True,bmap=True,qc=True,
sector=range(135,180),
tta=False)
xall.plot(ax=ax6, name='contourf',mode='rhi',target='z',
cbar=dict(loc='right',invisible=True),
qc=True,
cvalues=cvalues2)
xall.plot(ax=ax7, name='contourf',mode='rhi',target='z',
cbar=dict(loc='right',invisible=True),
cvalues=cvalues2,
yticklabs=False,
qc=True,
tta=False)
''' add axis id '''
for ax in axes:
gid = ax.get_gid()
if gid in ['(a)','(b)','(e)','(f)']:
ax.text(0.9,0.93,gid,size=14,
weight='bold',
transform=ax.transAxes,
color='w')
else:
ax.text(0.9,0.82,gid,size=14,
weight='bold',
transform=ax.transAxes)
if gid in ['(c)','(d)']:
ax.set_xlabel('')
''' make floating axis colorbar for terrain '''
# [left, bott, wid, hgt]
axaa = AA.Axes(fig,[-0.38,0.74,0.5,0.1])
axaa.tick_params(labelsize=25)
add_colorbar(axaa,hdtm,label='',
ticks=range(0,1001,1000),
ticklabels=['0','1.0'])
fig.add_axes(axaa)
axaa.remove() # leave only colorbar
ax0.text(-0.15, 0.93,'[km]',transform=ax0.transAxes)
''' add PPI arrows '''
def arrow_end(st_co,r,az):
en_co=[st_co[0],st_co[1]]
en_co[0]+=r*np.sin(np.radians(az))
en_co[1]+=r*np.cos(np.radians(az))
return (en_co[0],en_co[1])
tta_arrows={'arrow1':{'c0':(140,115),'az':300},
'arrow2':{'c0':(120,98),'az':325},
'arrow3':{'c0':(90,93),'az':340},
'arrow4':{'c0':(60,98),'az':345},
'arrow5':{'c0':(35,105),'az':350},
'arrow6':{'c0':(15,115),'az':355},
}
ntta_arrows={'arrow1':{'c0':(130,115),'az':335},
'arrow2':{'c0':(105,112),'az':350},
'arrow3':{'c0':(80,115),'az':0},
'arrow4':{'c0':(55,123),'az':5},
'arrow5':{'c0':(35,130),'az':5},
'arrow6':{'c0':(15,140),'az':10},
}
# scale = 4.1 # use for output figure
# #scale = 1.0 # use for iPython figure
# length = 30
# arrows=[tta_arrows,ntta_arrows]
# axes = [axes[0],axes[1]]
# for ax,arrow in zip(axes,arrows):
# for _,arr in arrow.iteritems():
# c0 = tuple(v*scale for v in arr['c0'])
# az = arr['az']
# ax.annotate("",
# xy = arrow_end(c0,length*scale,az),
# xytext = c0,
# xycoords = 'axes pixels',
# textcoords = 'axes pixels',
# zorder = 1,
# arrowprops = dict(shrinkA=6,
# shrinkB=6,
# fc='w',
# ec='k',
# lw=1))
#
# ''' single arrows '''
# c0 = tuple(v*scale for v in (145,34))
# axes[0].annotate("",
# xy = arrow_end(c0,length*scale,355),
# xytext = c0,
# xycoords = 'axes pixels',
# textcoords = 'axes pixels',
# zorder = 1,
# arrowprops = dict(shrinkA=6,
# shrinkB=6,
# fc='w',
# ec='k',
# lw=1))
# c0 = tuple(v*scale for v in (140,67))
# axes[1].annotate("",
# xy = arrow_end(c0,length*scale,0),
# xytext = c0,
# xycoords = 'axes pixels',
# textcoords = 'axes pixels',
# zorder = 1,
# arrowprops = dict(shrinkA=6,
# shrinkB=6,
# fc='w',
# ec='k',
# lw=1))
#
# ''' add RHI arrow '''
# ax2.annotate("",
# xy = (150*scale, 25*scale),
# xytext = (25*scale,3*scale),
# xycoords='axes pixels',
# textcoords='axes pixels',
# arrowprops=dict(shrinkA=5,
# shrinkB=5,
# fc="w", ec="k",
# connectionstyle="arc3,rad=-0.1"))
plt.show()
# fname='/home/raul/Desktop/fig_xpol_composite.png'
# plt.savefig(fname, dpi=300, format='png',papertype='letter',
# bbox_inches='tight')
|
[
"[email protected]"
] | |
14ae6c673eef938af1474c912a457a9e27db17f9
|
2cb3447b55b3a298ba744f2fe67aaae16938c66d
|
/xmldirector/plonecore/demo/xmldocument.py
|
36c34ddcd099b1a6dd349501b5b31e08ddfeddd3
|
[] |
no_license
|
tecumsehmaverick/xmldirector.plonecore
|
e2bc8fe0c065f73bd27438184cf4552c2df668e8
|
30f28878d6de1ffd1baf2fa0e6d903a7da204c7b
|
refs/heads/master
| 2020-12-28T20:30:29.720830 | 2015-01-23T17:01:34 | 2015-01-23T17:01:34 | 29,760,631 | 0 | 1 | null | 2015-01-24T01:01:02 | 2015-01-24T01:01:02 | null |
UTF-8
|
Python
| false | false | 1,337 |
py
|
# -*- coding: utf-8 -*-
################################################################
# xmldirector.plonecore
# (C) 2014, Andreas Jung, www.zopyx.com, Tuebingen, Germany
################################################################
"""
A sample Dexterity content-type implementation using
all XML field types.
"""
from zope.interface import implements
from plone.dexterity.content import Item
from plone.supermodel import model
from xmldirector.plonecore.i18n import MessageFactory as _
from xmldirector.plonecore.dx import dexterity_base
from xmldirector.plonecore.dx.xml_binary import XMLBinary
from xmldirector.plonecore.dx.xml_image import XMLImage
from xmldirector.plonecore.dx.xml_field import XMLText
from xmldirector.plonecore.dx.xpath_field import XMLXPath
class IXMLDocument(model.Schema):
xml_content = XMLText(
title=_(u'XML Content'),
required=False
)
xml_xpath = XMLXPath(
title=_(u'XML XPath expression'),
description=_(u'Format: field=<fieldname>,xpath=<xpath expression>'),
required=False
)
xml_binary = XMLBinary(
title=_(u'XML Binary'),
required=False
)
xml_image = XMLImage(
title=_(u'XML Image'),
required=False
)
class XMLDocument(Item, dexterity_base.Mixin):
implements(IXMLDocument)
|
[
"[email protected]"
] | |
8dc8c99c234c8352a74622536467e1147f3e3197
|
474c7eab287cb3ebd3788a1cac72a6dffa9941bc
|
/Leetcode/ana.py
|
5747471217ef057955d52e3a9d2c7d38e609badf
|
[] |
no_license
|
shenlant314/Reptile
|
bf85b7d8e19defa65a6a30140732bf37222e98da
|
42f5ea8681f8c477de9db109e9a0d5dba2dfccae
|
refs/heads/master
| 2023-04-17T06:07:52.455713 | 2021-04-19T23:39:02 | 2021-04-19T23:39:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,558 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 4 20:54:36 2020
@author: Lee
"""
from pymongo import MongoClient
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Geo,Line,WordCloud,Pie,Parallel,PictorialBar,Bar,Polar
from pyecharts.globals import ChartType, SymbolType
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', None)
db=MongoClient().leetcode
data=pd.DataFrame(list(db['2'].find()))
data=data.drop(columns='_id',axis=0)
data['acRate']=data['acRate'].apply(lambda x:float(x[:-1]))
# ๆๅคๅๆฌข
like=data.sort_values(by="likes",ascending=False)
print(like)
bar1=(
Bar()
.add_xaxis(list(like['title'])[:10])
.add_yaxis("", list(like['likes'])[:10], category_gap="50%")
.set_global_opts(title_opts=opts.TitleOpts(title="Leetcode็น่ตๆๅค็ๅ้้ข"),
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-15,font_weight='bold')))
)
bar1.render('Leetcode็น่ตๆๅค็ๅ้้ข.html')
# ๆ้ซac็
print(data.sort_values(by="acRate",ascending=False))
# ๆๅคๆไบค
print(data.sort_values(by="totalSubmissionRaw",ascending=False))
# ้พๅบฆๅ็ฑป
data_diff=data.groupby('difficulty').mean()
print(data_diff)
data_diff=data.groupby('difficulty').count()
print(data_diff)
data_diff=data.groupby('difficulty')
for a,b in data_diff:
print(b.sort_values(by="likes",ascending=False))
print(b.sort_values(by="acRate",ascending=False))
print(b.sort_values(by="totalSubmissionRaw",ascending=False))
|
[
"[email protected]"
] | |
e95e5d6df88dae462e0f7709393296bff7e29fa5
|
9d131148c2189f73e63b76c7a097cd4cd64ee18f
|
/analyze.py
|
1e3e75212c4eaa4f94c68423f94ae41c15889a9d
|
[] |
no_license
|
osmmaz/Twitter_Analytics
|
2e6f51b5f6be58d4dd0d6474c9a1b309b85cb099
|
829a27a038a5ff8270378cace4bfaa4b6a778bb9
|
refs/heads/master
| 2020-04-15T00:43:18.497042 | 2017-08-26T14:55:46 | 2017-08-26T14:55:46 | 164,250,306 | 1 | 0 | null | 2019-01-05T20:51:56 | 2019-01-05T20:51:56 | null |
UTF-8
|
Python
| false | false | 5,441 |
py
|
'''
Author: Adil Moujahid
Description: Script for analyzing tweets to compare the popularity of 3 programming languages: Python, Javascript and ruby
Reference: http://adilmoujahid.com/posts/2014/07/twitter-analytics/
'''
import json
import pandas as pd
import matplotlib.pyplot as plt
import re
def word_in_text(word, text):
word = word.lower()
text = text.lower()
match = re.search(word, text)
if match:
return True
return False
def extract_link(text):
regex = r'https?://[^\s<>"]+|www\.[^\s<>"]+'
match = re.search(regex, text)
if match:
return match.group()
return ''
def main():
# Reading Tweets
print('Reading Tweets\n')
tweets_data_path = 'output.txt'
tweets_data = []
tweets_file = open(tweets_data_path, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
# Structuring Tweets
print('Structuring Tweets\n')
tweets = pd.DataFrame()
tweets['text'] = list([tweet['text'] for tweet in tweets_data])
tweets['lang'] = list([tweet['lang'] for tweet in tweets_data])
tweets['country'] = list([tweet['place']['country'] if tweet['place'] != None else None for tweet in tweets_data])
# Analyzing Tweets by Language
print('Analyzing tweets by language\n')
tweets_by_lang = tweets['lang'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Languages', fontsize=15)
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Top 5 languages', fontsize=15, fontweight='bold')
tweets_by_lang[:5].plot(ax=ax, kind='bar', color='red')
plt.savefig('tweet_by_lang', format='png')
# Analyzing Tweets by Country
print('Analyzing tweets by country\n')
tweets_by_country = tweets['country'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Countries', fontsize=15)
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Top 5 countries', fontsize=15, fontweight='bold')
tweets_by_country[:5].plot(ax=ax, kind='bar', color='blue')
plt.savefig('tweet_by_country', format='png')
# Adding programming languages columns to the tweets DataFrame
print('Adding programming languages tags to the data\n')
tweets['python'] = tweets['text'].apply(lambda tweet: word_in_text('python', tweet))
tweets['javascript'] = tweets['text'].apply(lambda tweet: word_in_text('javascript', tweet))
tweets['ruby'] = tweets['text'].apply(lambda tweet: word_in_text('ruby', tweet))
# Analyzing Tweets by programming language: First attempt
print('Analyzing tweets by programming language: First attempt\n')
prg_langs = ['python', 'javascript', 'ruby']
tweets_by_prg_lang = [tweets['python'].value_counts()[True], tweets['javascript'].value_counts()[True],
tweets['ruby'].value_counts()[True]]
x_pos = list(range(len(prg_langs)))
width = 0.8
fig, ax = plt.subplots()
plt.bar(x_pos, tweets_by_prg_lang, width, alpha=1, color='g')
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Ranking: python vs. javascript vs. ruby (Raw data)', fontsize=10, fontweight='bold')
ax.set_xticks([p + 0.4 * width for p in x_pos])
ax.set_xticklabels(prg_langs)
plt.grid()
plt.savefig('tweet_by_prg_language_1', format='png')
# Targeting relevant tweets
print('Targeting relevant tweets\n')
tweets['programming'] = tweets['text'].apply(lambda tweet: word_in_text('programming', tweet))
tweets['tutorial'] = tweets['text'].apply(lambda tweet: word_in_text('tutorial', tweet))
tweets['relevant'] = tweets['text'].apply(
lambda tweet: word_in_text('programming', tweet) or word_in_text('tutorial', tweet))
# Analyzing Tweets by programming language: Second attempt
print('Analyzing tweets by programming language: First attempt\n')
import IPython
IPython.embed()
def get_value_counts(tweets, language):
try:
return tweets[tweets['relevant'] == True][language].value_counts()[True]
except KeyError:
return 0
tweets_by_prg_lang = [get_value_counts(tweets, 'python'),
get_value_counts(tweets, 'javascript'),
get_value_counts(tweets, 'ruby')]
x_pos = list(range(len(prg_langs)))
width = 0.8
fig, ax = plt.subplots()
plt.bar(x_pos, tweets_by_prg_lang, width, alpha=1, color='g')
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Ranking: python vs. javascript vs. ruby (Relevant data)', fontsize=10, fontweight='bold')
ax.set_xticks([p + 0.4 * width for p in x_pos])
ax.set_xticklabels(prg_langs)
plt.grid()
plt.savefig('tweet_by_prg_language_2', format='png')
# Extracting Links
tweets['link'] = tweets['text'].apply(lambda tweet: extract_link(tweet))
tweets_relevant = tweets[tweets['relevant'] == True]
tweets_relevant_with_link = tweets_relevant[tweets_relevant['link'] != '']
print('\nBelow are some Python links that we extracted\n')
print(tweets_relevant_with_link[tweets_relevant_with_link['python'] == True]['link'].head())
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
32f0ebd68a4cf905217afbd34ce240f7c6c03b8e
|
5a42ce780721294d113335712d45c62a88725109
|
/project/graphdata/module/yiyiyuan/model/yi_loan_flows.py
|
df4e31090b6b58f8024d0997f2403261bc3c5356
|
[] |
no_license
|
P79N6A/project_code
|
d2a933d53deb0b4e0bcba97834de009e7bb78ad0
|
1b0e863ff3977471f5a94ef7d990796a9e9669c4
|
refs/heads/master
| 2020-04-16T02:06:57.317540 | 2019-01-11T07:02:05 | 2019-01-11T07:02:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 767 |
py
|
# -*- coding: utf-8 -*-
# ๆณจๆ่ฟ้ไฝฟ็จไบ้ฟ้ไบๆฌๅฐๅบ็้่ฎฏๅฝ
# sqlacodegen mysql://root:123!@#@127.0.0.1/xhh_test --outfile yyy.py --flask
from lib.application import db
from .base_model import BaseModel
class YiUserLoanFlow(db.Model, BaseModel):
__bind_key__ = 'own_yiyiyuan'
__tablename__ = 'yi_user_loan_flows'
id = db.Column(db.Integer, primary_key=True)
loan_id = db.Column(db.Integer, nullable=False)
admin_id = db.Column(db.Integer, nullable=False)
loan_status = db.Column(db.Integer)
relative = db.Column(db.String(1024))
reason = db.Column(db.String(1024))
create_time = db.Column(db.DateTime)
admin_name = db.Column(db.String(64))
type = db.Column(db.Integer, server_default=db.FetchedValue())
|
[
"[email protected]"
] | |
b706a298261f243da96e72be47215ae11ce4a86f
|
4a9dada02c749e9e5277fe1e35357d7b2b28ad5c
|
/้กพๅคฉๅช2018010980/ๆไฝ็ณป็ปๅฎ้ช/ไฝไธ2.py
|
780d1e608f2afcdbef5a6f093500ede8eb25ba41
|
[] |
no_license
|
wanghan79/2020_Option_System
|
631cc80f52829390a128a86677de527472470348
|
f37b870614edf7d85320da197d932df2f25a5720
|
refs/heads/master
| 2021-01-09T13:10:05.630685 | 2020-07-10T03:30:39 | 2020-07-10T03:30:39 | 242,312,271 | 13 | 9 | null | 2020-07-04T16:13:11 | 2020-02-22T09:12:56 |
Python
|
UTF-8
|
Python
| false | false | 308 |
py
|
# !/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
Author: Ty.Gu
Purpose: os.system
Created: 24/6/2020
"""
# ไฝไธ2. ้็จpython่ฏญ่จๅฎ็ฐwindowsๅฝไปค่ก่ฐ็จ๏ผๆ็คบ๏ผ้็จPythonๅ
็ฝฎๅทฅๅ
ทๅ
os.system
import os
os.system('cd D:\\QQ & mkdir test' )
a = os.system('python ไฝไธ1.py')
print(a)
|
[
"[email protected]"
] | |
111386fe4bfdf0ed187f70cc4815af53d87d2cf4
|
0274c84e6bf546a325ba2a981426a9cad33cdcfc
|
/pycoin/ecdsa/Curve.py
|
c022efb03779d2035b240e74c8c42f2d5b4d640b
|
[
"MIT"
] |
permissive
|
jaschadub/pycoin
|
2cc646461415d68b69ae5a0c4496c6a6b37740e2
|
1e8d0d9fe20ce0347b97847bb529cd1bd84c7442
|
refs/heads/master
| 2021-07-05T05:31:47.734966 | 2019-03-16T18:33:47 | 2019-03-16T18:33:47 | 140,237,273 | 0 | 0 |
MIT
| 2019-03-16T18:41:54 | 2018-07-09T05:51:01 |
Python
|
UTF-8
|
Python
| false | false | 5,076 |
py
|
# Adapted from code written in 2005 by Peter Pearson and placed in the public domain.
from .Point import Point
def _leftmost_bit(x):
# this is closer to constant time than bit-twiddling hacks like those in
# https://graphics.stanford.edu/~seander/bithacks.html
assert x > 0
result = 1
while result <= x:
result <<= 1
return result >> 1
class Curve(object):
"""
This class implements an `Elliptic curve <https://en.wikipedia.org/wiki/Elliptic_curve>`_ intended
for use in `Elliptic curve cryptography <https://en.wikipedia.org/wiki/Elliptic-curve_cryptography>`_
An elliptic curve ``EC<p, a, b>`` for a (usually large) prime p and integers a and b is a
`group <https://en.wikipedia.org/wiki/Group_(mathematics)>`_. The members of the group are
(x, y) points (where x and y are integers over the field of integers modulo p) that satisfy the relation
``y**2 = x**3 + a*x + b (mod p)``. There is a group operation ``+`` and an extra point known
as the "point at infinity" thrown in to act as the identity for the group.
The group operation is a marvelous thing but unfortunately this margin is too narrow to contain
a description of it, so please refer to the links above for more information.
:param p: a prime
:param a: an integer coefficient
:param b: an integer constant
:param order: (optional) the order of the group made up by the points on the
curve. Any point on the curve times the order is the identity for this
group (the point at infinity). Although this is optional, it's required
for some operations.
"""
def __init__(self, p, a, b, order=None):
"""
"""
self._p = p
self._a = a
self._b = b
self._order = order
self._infinity = Point(None, None, self)
def p(self):
"""
:returns: the prime modulus of the curve.
"""
return self._p
def order(self):
"""
:returns: the order of the curve.
"""
return self._order
def infinity(self):
""":returns: the "point at infinity" (also known as 0, or the identity)."""
return self._infinity
def contains_point(self, x, y):
"""
:param x: x coordinate of a point
:param y: y coordinate of a point
:returns: True if the point (x, y) is on the curve, False otherwise
"""
if x is None and y is None:
return True
return (y * y - (x * x * x + self._a * x + self._b)) % self._p == 0
def add(self, p0, p1):
"""
:param p0: a point
:param p1: a point
:returns: the sum of the two points
"""
p = self._p
infinity = self._infinity
if p0 == infinity:
return p1
if p1 == infinity:
return p0
x0, y0 = p0
x1, y1 = p1
if (x0 - x1) % p == 0:
if (y0 + y1) % p == 0:
return infinity
else:
slope = ((3 * x0 * x0 + self._a) * self.inverse_mod(2 * y0, p)) % p
else:
slope = ((y1 - y0) * self.inverse_mod(x1 - x0, p)) % p
x3 = (slope * slope - x0 - x1) % p
y3 = (slope * (x0 - x3) - y0) % p
return self.Point(x3, y3)
def multiply(self, p, e):
"""
multiply a point by an integer.
:param p: a point
:param e: an integer
:returns: the result, equivalent to adding p to itself e times
"""
if self._order:
e %= self._order
if p == self._infinity or e == 0:
return self._infinity
e3 = 3 * e
i = _leftmost_bit(e3) >> 1
result = p
while i > 1:
result += result
if (e3 & i):
v = [result, result+p]
else:
v = [result-p, result]
result = v[0 if (e & i) else 1]
i >>= 1
return result
def inverse_mod(self, a, m):
"""
:param a: an integer
:param m: another integer
:returns: the value ``b`` such that ``a * b == 1 (mod m)``
"""
if a < 0 or m <= a:
a = a % m
# From Ferguson and Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod(d, c) + (c,)
uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0:
return ud
else:
return ud + m
def Point(self, x, y):
"""
:returns: a :class:`Point <.Point>` object with coordinates ``(x, y)``
"""
return Point(x, y, self)
def __repr__(self):
return '{}({!r},{!r},{!r})'.format(self.__class__.__name__, self._p, self._a, self._b)
def __str__(self):
return 'y^2 = x^3 + {}*x + {} (mod {})'.format(self._a, self._b, self._p)
|
[
"[email protected]"
] | |
67e7af372c2ef8efcd69e1cb3b3609ff36a04e9b
|
ed491b7539193c30ffefcc52af276f77fc98f979
|
/train_LSTM.py
|
bf356af65321d25c10d22c36291f2a455380ec77
|
[
"Apache-2.0"
] |
permissive
|
FredHuang16/cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
|
fb5dcb345379deadc632a43f73a53c436a02ea42
|
8ef5b1321cf79063ee4b146c7502a31815e9f33b
|
refs/heads/master
| 2021-01-16T00:46:33.781766 | 2017-08-10T06:51:21 | 2017-08-10T06:51:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,640 |
py
|
import os
import sys
import torch
import torch.autograd as autograd
import torch.nn.functional as F
import torch.nn.utils as utils
import random
import shutil
random.seed(336)
torch.manual_seed(1234)
def train(train_iter, dev_iter, test_iter, model, args):
if args.cuda:
model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
steps = 0
model_count = 0
model.train()
for epoch in range(1, args.epochs+1):
print("## ็ฌฌ{} ่ฝฎ่ฟญไปฃ๏ผๅ
ฑ่ฎก่ฟญไปฃ {} ๆฌก ๏ผ##".format(epoch, args.epochs))
# the attr of shuffle in train_iter haved initialed True during data.Iterator.splits()
for batch in train_iter:
feature, target = batch.text, batch.label.data.sub_(1)
target =autograd.Variable(target)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
optimizer.zero_grad()
# model.zero_grad()
model.hidden = model.init_hidden(args.lstm_num_layers, args.batch_size)
if feature.size(1) != args.batch_size:
model.hidden = model.init_hidden(args.lstm_num_layers, feature.size(1))
logit = model(feature)
# target values >=0 <=C - 1 (C = args.class_num)
loss = F.cross_entropy(logit, target)
loss.backward()
# prevent grads boom
# utils.clip_grad_norm(model.parameters(), args.max_norm)
# the up line will make overfitting quickly, however, the line slow the overfitting,
# so,the speed of overfitting depend on the max_norm size, more big, moe quickly
utils.clip_grad_norm(model.parameters(), max_norm=1e-4)
optimizer.step()
steps += 1
if steps % args.log_interval == 0:
train_size = len(train_iter.dataset)
corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
accuracy = float(corrects)/batch.batch_size * 100.0
sys.stdout.write(
'\rBatch[{}/{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(steps,
train_size,
loss.data[0],
accuracy,
corrects,
batch.batch_size))
if steps % args.test_interval == 0:
eval(dev_iter, model, args)
if steps % args.save_interval == 0:
if not os.path.isdir(args.save_dir): os.makedirs(args.save_dir)
save_prefix = os.path.join(args.save_dir, 'snapshot')
save_path = '{}_steps{}.pt'.format(save_prefix, steps)
torch.save(model.state_dict(), save_path)
test_eval(test_iter, model, save_path, args)
model_count += 1
print("model_count \n", model_count)
return model_count
def eval(data_iter, model, args):
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label.data.sub_(1)
target = autograd.Variable(target)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
model.hidden = model.init_hidden(args.lstm_num_layers, batch.batch_size)
logit = model(feature)
loss = F.cross_entropy(logit, target, size_average=False)
avg_loss += loss.data[0]
corrects += (torch.max(logit, 1)
[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
# avg_loss = loss.data[0]/size
avg_loss = float(avg_loss)/size
accuracy = float(corrects)/size * 100.0
model.train()
print('\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
def test_eval(data_iter, model, save_path, args):
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label.data.sub_(1)
target = autograd.Variable(target)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
model.hidden = model.init_hidden(args.lstm_num_layers, batch.batch_size)
logit = model(feature)
loss = F.cross_entropy(logit, target, size_average=False)
avg_loss += loss.data[0]
corrects += (torch.max(logit, 1)
[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
# avg_loss = loss.data[0]/size
avg_loss = float(avg_loss) / size
accuracy = float(corrects)/size * 100.0
model.train()
print('\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
# test result
if os.path.exists("./Test_Result.txt"):
file = open("./Test_Result.txt", "a")
else:
file = open("./Test_Result.txt", "w")
file.write("model " + save_path + "\n")
file.write("Evaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n".format(avg_loss,
accuracy,
corrects,
size))
file.write("\n")
file.close()
shutil.copy("./Test_Result.txt", "./snapshot/" + args.mulu + "/Test_Result.txt")
def predict(text, model, text_field, label_feild):
assert isinstance(text, str)
model.eval()
text = text_field.tokenize(text)
text = text_field.preprocess(text)
text = [[text_field.vocab.stoi[x] for x in text]]
x = text_field.tensor_type(text)
x = autograd.Variable(x, volatile=True)
print(x)
output = model(x)
_, predicted = torch.max(output, 1)
return label_feild.vocab.itos[predicted.data[0][0]+1]
|
[
"[email protected]"
] | |
da2e3cb7bc9bcca1f4ca59c793fb86ef425b338d
|
0b2833b1b129d72ff805e3d775df00f79f421bfb
|
/parse_noise.py
|
53bbdcb73f13007c68694e2313de58134e3b8308
|
[] |
no_license
|
xssChauhan/torino-noise
|
95f2d8b8c9937c3135221f53da990716c010be38
|
10d58bdea11dc0a716e22ea325b901a4b6b59191
|
refs/heads/master
| 2023-05-31T16:52:34.217809 | 2021-06-27T19:56:43 | 2021-06-27T19:56:43 | 380,284,216 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 628 |
py
|
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import time
def read_noise_csv(file):
df = pd.read_csv(file, delimiter=";", skiprows=list(range(8)))
return df
def combine_date_hour(row):
date = row["Data"]
hour = row["Ora"]
parsed_date = datetime.strptime(
date, "%d-%m-%Y"
)
parsed_hour = datetime.strptime(
hour, "%H:%M"
).time()
combined_date = datetime.combine(
parsed_date, parsed_hour
)
return combined_date
def convert_float(num):
if isinstance(num, str):
return float(num.replace(",","."))
|
[
"[email protected]"
] | |
90734113d3bb75eb87aa511a4584a9e3234f7918
|
625d113273cf9bd6474e8ac12cd8afe2e4f8d50b
|
/Disposable Teleports.py
|
9147fab6c0a205e64c33d3d93de55a5efabb3f8e
|
[] |
no_license
|
hrvach/CheckiO
|
9e4b04cf93c8f4da7bd61b55a3cf9bdcaea0836f
|
d25bbdb3b33441f45f4b587fd2a651c5c086484a
|
refs/heads/master
| 2020-03-25T02:25:57.273381 | 2018-08-02T12:13:28 | 2018-08-02T12:13:28 | 143,289,238 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
def checkio(teleports_string):
teleports, paths = {frozenset(i) for i in teleports_string.split(',')}, []
def walk(station, links, path):
if len(set(path)) == 8 and station == '1':
paths.append(path+station)
for link in filter(lambda x: station in x, links):
walk(next(iter(link-{station})), links-{link}, path+station)
walk('1', teleports, '')
return min(paths, key=len)
|
[
"[email protected]"
] | |
60fe823701b35b1c4581a56117b8f31fd2dd832e
|
7d6bc1812e5ab4f55c53ae168a6f2317b35e037c
|
/l10n_cl_hr_payroll/model/hr_bonus.py
|
2485352eca5af3bda8f1f15699506ec45a91e6c5
|
[] |
no_license
|
suningwz/ODOO13-3
|
f172c2e16d2d3310f7c0cf88ff5b0fb5f1e1d15c
|
527732e22807be0d6a692ab808b74794cb027914
|
refs/heads/master
| 2022-12-15T10:19:51.328186 | 2020-09-17T22:16:14 | 2020-09-17T22:16:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,491 |
py
|
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
RO_STATES = {'draft': [('readonly', False)]}
class HrBonusSheet(models.Model):
_name = 'hr.bonus.sheet'
_description = 'Hoja de Bonos RRHH'
_inherit = ['mail.thread']
_order = 'date_issue desc, name desc, id desc'
name = fields.Char('Ref', copy=False, default='Nuevo')
date_issue = fields.Date('Fecha solicitud', default=fields.Date.today, copy=False, required=True, readonly=True, states=RO_STATES)
state = fields.Selection([
('draft', 'Borrador'),
('pending', 'Por aprobar'),
('approve', 'Aprobado'),
('cancel', 'Cancelado'),
('done', 'En nรณmina')
], 'Estado', default='draft', copy=False, tracking=True)
description = fields.Text('Descripciรณn', compute='_compute_description')
line_ids = fields.One2many('hr.bonus.line', 'sheet_id', 'Bonos', readonly=True, states=RO_STATES)
currency_id = fields.Many2one('res.currency', default=lambda self: self.env.company.currency_id)
bonus_count = fields.Integer('Cant. bonos', compute='_compute_total')
amount_total = fields.Monetary('Monto Total', compute='_compute_total')
@api.depends('line_ids')
def _compute_description(self):
for record in self:
record.description = ', '.join(record.mapped('line_ids.balance_id.desc'))
@api.depends('line_ids')
def _compute_total(self):
for record in self:
record.amount_total = sum(record.line_ids.filtered(lambda l: l.balance_id.um == '$').mapped('amount'))
record.bonus_count = len(record.line_ids)
def back_draft(self):
for record in self:
if record.state in ['approve', 'done', 'cancel'] and not self.env.user.has_group('hr.group_hr_manager'):
raise ValidationError(_('No tiene permisos para devolver a borrador el documento %s.') % record.name)
self.write({'state': 'draft'})
def confirm(self):
self.write({'state': 'pending'})
def approve(self):
self.write({'state': 'approve'})
def cancel(self):
self.write({'state': 'cancel'})
@api.model
def create(self, vals):
vals['name'] = self.env['ir.sequence'].next_by_code('hr.bonus.sheet')
return super(HrBonusSheet, self).create(vals)
def unlink(self):
for record in self:
if record.state != 'draft':
raise ValidationError(_('%s debe estar en estado Borrador para poder borrarlo.') % record.name)
return super(HrBonusSheet, self).unlink()
def create_haberesydesc(self):
employees = {}
for line in self.mapped('line_ids'):
# Se deben evaluar los casos:
# 1.- Lรญnea a insertar no tiene fecha fin y el hyd tampoco tiene: Se toma siempre
# 2.- Lรญnea a insertar tiene fecha fin y el hyd no tiene: fecha fin de lรญnea debe ser mayor a fecha inicio de hyd
# 3.- Lรญnea a insertar no tiene fecha fin y el hyd si tiene: fecha inicio de lรญnea debe ser menor a fecha fin de hyd debe ser
# 4.- Lรญnea a insertar y hyd tienen fecha fin: se compara rango exacto de fechas
if line.date_to:
hyd_ids = line.employee_id.balance_ids.filtered(lambda hd: hd.balance_id == line.balance_id and line.date_to >= hd.fecha_desde and (line.date_from <= hd.fecha_hasta or not hd.fecha_hasta))
else:
hyd_ids = line.employee_id.balance_ids.filtered(lambda hd: hd.balance_id == line.balance_id and (line.date_from < hd.fecha_hasta or not hd.fecha_hasta))
if hyd_ids:
hyd_ids.monto = line.amount
vals = False
else:
vals = (0, 0, {
'balance_id': line.balance_id.id,
'date_from': line.date_from,
'date_to': line.date_to,
'amount': line.amount
})
if vals:
if line.employee_id in employees:
employees[line.employee_id].append(vals)
else:
employees[line.employee_id] = [vals]
for employee, values in employees.items():
employee.balance_ids = values
self.write({'state': 'done'})
class HrBonusLine(models.Model):
_name = 'hr.bonus.line'
_description = 'Lรญnea de bonos RRHH'
_rec_name = 'balance_id'
balance_id = fields.Many2one('hr.balance', 'Haber/Descuento', required=True, ondelete='cascade')
employee_id = fields.Many2one('hr.employee', 'Empleado', required=True, ondelete='cascade')
date_from = fields.Date('Fecha desde', required=True, default=fields.Date.today)
date_to = fields.Date('Fecha hasta')
amount = fields.Float('Monto', required=True)
sheet_id = fields.Many2one('hr.bonus.sheet', 'Hoja de bonos', ondelete='cascade')
um = fields.Selection([
('$', '$'),
('u', 'u'),
('%', '%')], 'UM', related='balance_id.um', readonly=True)
@api.constrains('date_from', 'date_to')
def _check_dates(self):
for record in self:
if record.date_to and record.date_from > record.date_to:
raise ValidationError(_('Fecha desde no puede ser mayor a Fecha hasta.'))
@api.constrains('amount')
def _check_amount(self):
for record in self:
if record.amount <= 0:
raise ValidationError(_('Monto debe ser mayor a 0.'))
|
[
"[email protected]"
] | |
7fda55dbef1e0a4b55a7c58ca917e5e021f7952d
|
5d28c38dfdd185875ba0edaf77281e684c81da0c
|
/dev/update_ml_package_versions.py
|
4d3487199c6883792b944628ee0461737a34b53a
|
[
"Apache-2.0"
] |
permissive
|
imrehg/mlflow
|
3a68acc1730b3ee6326c1366760d6ddc7e66099c
|
5ddfe9a1b48e065540094d83125040d3273c48fa
|
refs/heads/master
| 2022-09-24T05:39:02.767657 | 2022-09-20T00:14:07 | 2022-09-20T00:14:07 | 244,945,486 | 1 | 0 |
Apache-2.0
| 2020-03-04T16:11:54 | 2020-03-04T16:11:53 | null |
UTF-8
|
Python
| false | false | 3,569 |
py
|
"""
A script to update the maximum package versions in 'mlflow/ml-package-versions.yml'.
# Prerequisites:
$ pip install packaging pyyaml
# How to run (make sure you're in the repository root):
$ python dev/update_ml_package_versions.py
"""
import argparse
import json
from packaging.version import Version
import re
import sys
import urllib.request
import yaml
def read_file(path):
with open(path) as f:
return f.read()
def save_file(src, path):
with open(path, "w") as f:
f.write(src)
def get_package_versions(package_name):
url = "https://pypi.python.org/pypi/{}/json".format(package_name)
with urllib.request.urlopen(url) as res:
data = json.load(res)
def is_dev_or_pre_release(version_str):
v = Version(version_str)
return v.is_devrelease or v.is_prerelease
return [
version
for version, dist_files in data["releases"].items()
if len(dist_files) > 0 and not is_dev_or_pre_release(version)
]
def get_latest_version(candidates):
return sorted(candidates, key=Version, reverse=True)[0]
def update_max_version(src, key, new_max_version, category):
"""
Examples
========
>>> src = '''
... sklearn:
... ...
... models:
... minimum: "0.0.0"
... maximum: "0.0.0"
... xgboost:
... ...
... autologging:
... minimum: "1.1.1"
... maximum: "1.1.1"
... '''.strip()
>>> new_src = update_max_version(src, "sklearn", "0.1.0", "models")
>>> new_src = update_max_version(new_src, "xgboost", "1.2.1", "autologging")
>>> print(new_src)
sklearn:
...
models:
minimum: "0.0.0"
maximum: "0.1.0"
xgboost:
...
autologging:
minimum: "1.1.1"
maximum: "1.2.1"
"""
pattern = r"({key}:.+?{category}:.+?maximum: )\".+?\"".format(
key=re.escape(key), category=category
)
# Matches the following pattern:
#
# <key>:
# ...
# <category>:
# ...
# maximum: "1.2.3"
return re.sub(pattern, r'\g<1>"{}"'.format(new_max_version), src, flags=re.DOTALL)
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--path",
help="Path to the ML package versions yaml (default: mlflow/ml-package-versions.yml)",
default="mlflow/ml-package-versions.yml",
required=False,
)
return parser.parse_args(args)
def main(args):
args = parse_args(args)
yml_path = args.path
old_src = read_file(yml_path)
new_src = old_src
config_dict = yaml.load(old_src, Loader=yaml.SafeLoader)
for flavor_key, config in config_dict.items():
for category in ["autologging", "models"]:
if (category not in config) or config[category].get("pin_maximum", False):
continue
print("Processing", flavor_key, category)
package_name = config["package_info"]["pip_release"]
max_ver = config[category]["maximum"]
versions = get_package_versions(package_name)
unsupported = config[category].get("unsupported", [])
versions = set(versions).difference(unsupported) # exclude unsupported versions
latest_version = get_latest_version(versions)
if max_ver == latest_version:
continue
new_src = update_max_version(new_src, flavor_key, latest_version, category)
save_file(new_src, yml_path)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"[email protected]"
] | |
0184f7b0157794daaa04d557ec2d252d71771506
|
13625dd7375297b066ccd69d6c229e9a1535c9b2
|
/payment/migrations/0002_auto_20201222_1654.py
|
0b3a4704493f451e693f9d894903346df0675eeb
|
[] |
no_license
|
rajman01/investfy
|
9d5fa3ed7593ec13db575016fc839664630318af
|
a4c8bf16ba7a1ce38d1370e4779284a4d6426733
|
refs/heads/main
| 2023-09-01T19:10:18.411861 | 2023-08-28T02:30:23 | 2023-08-28T02:30:23 | 320,408,218 | 0 | 1 | null | 2023-08-28T02:30:24 | 2020-12-10T22:46:03 | null |
UTF-8
|
Python
| false | false | 592 |
py
|
# Generated by Django 3.1.3 on 2020-12-22 15:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('payment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accounts', to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
b0b869cbd233806fd2ad95371f63f04ff9c5a250
|
634514a9c10e32051964b179cc807d089d31124e
|
/S2l/Thesis_Ch3/Exp1_reach3dof/Scripts/gym_test_random.py
|
4aef80b71d849bb60fdb335eafc4e754e3b2312f
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
leopauly/Observation-Learning-Simulations
|
34009872a1f453ffc4ae7ddced7447a74ff704c4
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
refs/heads/master
| 2021-08-04T10:55:42.900015 | 2021-07-05T13:41:09 | 2021-07-05T13:41:09 | 129,761,220 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,872 |
py
|
#### Random agent in gym env
##Imports
import numpy as np
import gym
import matplotlib.pyplot as plt
## Defining env
env = gym.make('Pusher3DOFReal-v1')
env.switch=-5
env.initialize_env()
#env = gym.make('Pusher7DOF-v1')
print(env.observation_space.shape[0])
print(env.action_space.shape[0])
## Defining vars
LR = 1e-3
goal_steps = 500
score_requirement = 50
initial_games = 10000
def some_random_games_first():
# Each of these is its own game.
for episode in range(200):
env.reset()
#env.render(mode='human')
#img=env.render(mode='rgb_array') # Get the observation
#plt.imshow(img)
#plt.show()
# this is each frame, up to 200...but we wont make it that far.
while(True):
# This will just create a sample action in any environment.
# In this environment, the action can be 0 or 1, which is left or right
action = env.action_space.sample()
# This will display the environment
# Only display if you really want to see it.
# Takes much longer to display it.
#env.render(mode='human')
#img=env.render(mode='human') # Get the observation
#plt.imshow(img)
#plt.show()
print(env.action_space.shape,np.array(env.action_space.high).tolist())
# this executes the environment with an action,
# and returns the observation of the environment,
# the reward, if the env is over, and other info.
observation, reward, done, info = env.step([1,-2,-1])
env.render(mode='human')
#observation, reward, done, info = env.step([0,1,0,0])
#print(env.get_eval())
#if done:
# break
some_random_games_first()
|
[
"[email protected]"
] | |
04b46a126c8d2fbabf65762af4d074ed560955e1
|
644bcdabf35261e07c2abed75986d70f736cb414
|
/python-project/Maths/plot9.py
|
3fc9fd26fdfdbe4a19540be4d5d84ac23ece7649
|
[] |
no_license
|
matcianfa/playground-X1rXTswJ
|
f967ab2c2cf3905becafb6d77e89a31414d014de
|
67859b496e407200afb2b1d2b32bba5ed0fcc3f0
|
refs/heads/master
| 2023-04-03T11:56:15.878757 | 2023-03-24T15:52:37 | 2023-03-24T15:52:37 | 122,226,979 | 5 | 20 | null | null | null | null |
UTF-8
|
Python
| false | false | 117 |
py
|
import matplotlib.pyplot as plt
import numpy as np
x = [ 1, 3, 2, 1]
y = [ 2, 3, 1, 3]
plt.scatter(x,y)
plt.show()
|
[
"[email protected]"
] | |
70dde7f0aba2392ea2ae24df204e883b3e0f9b2a
|
1f177b5e7bdaca49076c6ff806f5e2be9a86e834
|
/algorithm/190121_array/practice_01.py
|
e802a0b6dc1111ee6056bb6ebc4cea381642c8ca
|
[] |
no_license
|
silverlyjoo/TIL
|
9e19ba407a9dc82c231e66e352f1c7783e767782
|
98a139770a6d19598d787674bcf20d2fe744ced0
|
refs/heads/master
| 2021-08-17T02:10:35.101212 | 2019-08-26T08:21:32 | 2019-08-26T08:21:32 | 162,099,046 | 6 | 1 | null | 2021-06-10T21:20:36 | 2018-12-17T08:32:39 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 530 |
py
|
arr = [[1,3,4,5,9],[10,11,2,13,14],[15,6,7,8,16],[17,18,12,19,20],[21,22,23,24,25]]
# arr.append(map(int, input().split()))
arr = [[0 for ze in range(5)] for ro in range(5)]
for i in range(5):
arr[i] = list(map(int, input().split()))
def iswall(testX, testY):
if testX < 0 or testX >=5:
return False
if testY <0 or testY >= 5:
return False
return True
def my_abs(num):
if num < 0:
return num *= -1
ans_list = []
for i in range(len(arr)):
for j in range(len(arr[i])):
|
[
"[email protected]"
] | |
9322317b4bb5487ab61113643c8fa3661466d872
|
491dd5a8f7f53ff05e4efc599bd6f32e840c8806
|
/scripts/compose_api.py
|
222b4da5e4e41e1d6039090fbf5b875e0c36ec6e
|
[
"CC0-1.0"
] |
permissive
|
forgeRW/WALKOFF
|
9b50ffa4eff42286d709c1f13069f836a3af59b5
|
52b688f427e5360f7acab6cdae701fe67f7a0712
|
refs/heads/master
| 2021-01-17T04:25:21.154843 | 2018-02-06T19:39:27 | 2018-02-06T19:39:27 | 82,941,065 | 0 | 0 | null | 2017-02-23T15:29:03 | 2017-02-23T15:29:03 | null |
UTF-8
|
Python
| false | false | 1,390 |
py
|
import logging
import os
import sys
sys.path.append(os.path.abspath('.'))
from walkoff.config import paths
logger = logging.getLogger(__name__)
def read_and_indent(filename, indent):
indent = ' ' * indent
with open(filename, 'r') as file_open:
return ['{0}{1}'.format(indent, line) for line in file_open]
def compose_api():
with open(os.path.join(paths.api_path, 'api.yaml'), 'r') as api_yaml:
final_yaml = []
for line_num, line in enumerate(api_yaml):
if line.lstrip().startswith('$ref:'):
split_line = line.split('$ref:')
reference = split_line[1].strip()
indentation = split_line[0].count(' ')
try:
final_yaml.extend(read_and_indent(os.path.join(paths.api_path, reference), indentation))
final_yaml.append(os.linesep)
except (IOError, OSError):
logger.error('Could not find or open referenced YAML file {0} in line {1}'.format(reference,
line_num))
else:
final_yaml.append(line)
with open(os.path.join(paths.api_path, 'composed_api.yaml'), 'w') as composed_yaml:
composed_yaml.writelines(final_yaml)
if __name__ == '__main__':
compose_api()
|
[
"[email protected]"
] | |
045662700cb8d10239ea775433abc0ace3cb9c57
|
c71b00b530efe7e8fe4e666b9156ff30976bd72d
|
/_draft/coordtransform/coordtransform.py
|
e917e065e6bf08cbb18cf2aff5008487a6d0c4a0
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
IfeanyiEkperi/autopew
|
af714537d3438100d01d3c988930fa3e8d270581
|
964c379f7e3a7b15259672df37629baee46e158c
|
refs/heads/master
| 2020-08-15T12:14:01.165061 | 2019-10-15T16:08:27 | 2019-10-15T16:08:27 | 215,340,368 | 0 | 0 |
NOASSERTION
| 2019-10-15T16:02:54 | 2019-10-15T16:02:53 | null |
UTF-8
|
Python
| false | false | 3,094 |
py
|
import logging
import itertools
from autopew.transform import (
affine_from_AB,
transform_from_affine,
inverse_affine_transform,
)
class CoordinateTransform(object):
library = []
def __init__(self, source, dest, *args, **kwargs):
self.forward = None
self.reverse = None
self.source = source
self.dest = dest
# todo: methods for dealing with maximum dimensionality of the transform
# if you create a 3D-3D transform you can keep all dims, but dims will be lost
# for 3D-2D, and any subsequent transforms.
self._register()
if not (self.dest, self.source) in self._links:
self._invert # register inverse
self._iter_library()
@property
def _links(self):
return set(
zip([i.source for i in self.library], [i.dest for i in self.library])
)
@property
def _domains(self):
return set([i.dest for i in self.library] + [i.source for i in self.library])
def _register(self):
"""
Register the Coordinate Transform in the Transform Library
"""
if self not in self.library:
self.library.append(self)
else:
logger.warning("Transform Already Exists in Library")
def _iter_library(self):
"""
Calibrate all relevant transforms between available sources and destination
coordinate systems.
"""
logger.debug("Iterating over transform library.")
# identify all coordinate reference systems
crs = self._domains
present = set([(c.source, c.dest) for c in self.library])
possible = itertools.product(crs, repeat=2)
for a, b in possible:
if (a != b) and ((a, b) not in present):
print("Need to add ({}, {})".format(a, b))
pass
@property
def _invert(self):
logger.debug("Creating inverse for {}".format(str(self)))
self.inverse = CoordinateTransform(self.dest, self.source)
self.inverse.inverse = self
self.inverse.forward, self.inverse.reverse = self.reverse, self.forward
return self.inverse
def calibrate(self, sourcepoints, destpoints):
logger.debug("Calibrating {}".format(str(self)))
self.affine = affine_from_AB(pixelpoints, transformpoints)
self.forward = affine_transform(self.affine)
self.reverse = inverse_affine_transform(self.affine)
self.inverse.forward, self.inverse.reverse, self.inverse.affine = (
self.reverse,
self.forward,
np.linalg.inv(self.affine),
)
def __eq__(self, other):
if other.__class__ == self.__class__:
return (self.source == other.source) and (self.dest == other.dest)
else:
return False
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.source, self.dest)
def __str__(self):
return "{} from {} to {}".format(
self.__class__.__name__, self.source, self.dest
)
|
[
"[email protected]"
] | |
e7f8f5e7cca6d1c74426ec28482859f35ce8c31f
|
e68a59efcf3591a7efedb2f66d26d0f01607e288
|
/simulator/main.py
|
07df44ae9e227d6cdcb934e3c3462ced3d185252
|
[] |
no_license
|
fagan2888/core-1
|
efc0fb9e4a3139c1174d7caf539163f34b966898
|
97930712b71ebdb6ad587a2dee2bf6b8ac0dbac7
|
refs/heads/master
| 2020-12-01T22:46:18.836996 | 2017-12-15T16:43:30 | 2017-12-15T16:43:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,633 |
py
|
from flask import Flask, url_for
from flask import jsonify
from flask import request
from connectivity.bitstamp_api import BitstampAPI
from constants import SIMULATOR_REPLAYER_DATA_FILE
from constants import SIMULATOR_USE_REPLAYER, API_URL_V2_TICKER
from constants import TRADING_DEFAULT_CURRENCY_PAIR
from simulator.logic import send_order, market_order, UserAccount
from simulator.replayer import Replayer
app = Flask(__name__)
user = UserAccount()
if SIMULATOR_USE_REPLAYER:
replayer = Replayer(data_file=SIMULATOR_REPLAYER_DATA_FILE)
else:
replayer = None
@app.errorhandler(404)
def page_not_found(err):
return 'Endpoint not found. Browse / to list all the endpoints.'
@app.route('/reset/', methods=['GET', 'POST'], strict_slashes=False)
def reset():
global user, replayer
user = UserAccount()
if replayer is not None:
replayer.reset()
return 'Reset.'
@app.route('/', methods=['GET', 'POST'], strict_slashes=False)
def list_all_end_points():
def has_no_empty_params(rule_):
defaults = rule_.defaults if rule_.defaults is not None else ()
arguments = rule_.arguments if rule_.arguments is not None else ()
return len(defaults) >= len(arguments)
links = []
for rule in app.url_map.iter_rules():
if 'GET' in rule.methods and has_no_empty_params(rule):
url = url_for(rule.endpoint, **(rule.defaults or {}))
links.append((url, rule.endpoint))
return '<b>' + '<br/><br/>'.join(
sorted(['<a href="{0}">{0}</a> -> {1}()'.format(l, v) for (l, v) in links])) + '</b>'
@app.route('/ticker/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/ticker/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def ticker():
if SIMULATOR_USE_REPLAYER:
return jsonify(replayer.next())
else:
tick = BitstampAPI.ticker(API_URL_V2_TICKER)
tick.update({k: str(v) for (k, v) in tick.items()})
return jsonify(tick)
@app.route('/balance/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/balance/', methods=['GET', 'POST'], strict_slashes=False)
def balance():
return jsonify(user.balance())
# if request.method == 'GET':
# # for the web interface.
# return json.dumps(user.balance(), indent=4).replace('\n', '<br/>')
# # post. returns a JSON.
# return user.balance()
@app.route('/buy/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/buy/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def buy():
return jsonify(send_order(user, is_buy=True))
@app.route('/sell/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/sell/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def sell():
return jsonify(send_order(user, is_buy=False))
@app.route('/buy/market/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/buy/market/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def buy_market():
return jsonify(market_order(user, is_buy=True))
@app.route('/sell/market/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/sell/market/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def sell_market():
return jsonify(market_order(user, is_buy=False))
@app.route('/cancel_order/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/cancel_order/', methods=['GET', 'POST'], strict_slashes=False)
def cancel_order():
if request.method == 'POST':
try:
data = request.form
print(data)
order_id = data['id']
del user.open_orders[order_id]
return 'Order canceled.'
except:
return jsonify({'error': 'Invalid order id'})
else:
return 'Only available through POST.'
# {'id': '320464858', 'key': '***', 'signature': '***', 'nonce': '1506251383558164'}
@app.route('/order_status/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/order_status/', methods=['GET', 'POST'], strict_slashes=False)
def order_status():
if request.method == 'POST':
data = request.form
print(data)
order_id = data['id']
return user.order_statuses[order_id]
@app.route('/open_orders/all/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/open_orders/all/', methods=['GET', 'POST'], strict_slashes=False)
def open_orders_all():
return b'[]'
@app.route('/transactions/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/transactions/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def transactions():
raise user.transactions
@app.route('/user_transactions/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/user_transactions/', methods=['GET', 'POST'], strict_slashes=False)
def user_transactions():
return user.transactions[::-1]
if __name__ == '__main__':
"""
curl -X POST http://127.0.0.1:5000/buy/btceur/ -d "{'amount': 0.003, 'key': '***', 'signature': '***', 'nonce': '1506251375795815'}"
curl -X POST http://127.0.0.1:5000/buy/market/btceur/ -d "amount=0.03&nounce=033"
"""
app.run()
# export PYTHONPATH=../:$PYTHONPATH; python3 main.py
|
[
"[email protected]"
] | |
4a4a7d6aa42fc38b95683641c7ce780278223e45
|
fff94a56c2992b6930d9964f63dba1ddb32c3193
|
/setup.py
|
5c6206248384bc36faeeb5f551001680e81a22bd
|
[
"MIT"
] |
permissive
|
veltzer/pylogconf
|
fb94ee0bf16168ab7d169b10038fa28f72df5f3b
|
b1a965fa25199768ad1432990ac6ec8d9bb9ac5e
|
refs/heads/master
| 2023-07-06T11:12:30.656069 | 2023-07-01T14:40:27 | 2023-07-01T14:40:27 | 78,429,800 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,519 |
py
|
import setuptools
def get_readme():
with open('README.rst') as f:
return f.read()
setuptools.setup(
# the first three fields are a must according to the documentation
name="pylogconf",
version="0.0.37",
packages=[
"pylogconf",
],
# from here all is optional
description="correctly configure python logging",
long_description=get_readme(),
long_description_content_type="text/x-rst",
author="Mark Veltzer",
author_email="[email protected]",
maintainer="Mark Veltzer",
maintainer_email="[email protected]",
keywords=[
"python",
"logging",
"configuration",
"easy",
"yaml",
"json",
"debug",
],
url="https://veltzer.github.io/pylogconf",
download_url="https://github.com/veltzer/pylogconf",
license="MIT",
platforms=[
"python3",
],
install_requires=[
"pyfakeuse",
"logging_tree",
"pyyaml",
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
|
[
"[email protected]"
] | |
1e45b04e9cbebb6d6b50aa7b99ef5585e8af6788
|
23bc70263cc5355a247dd242d9dc35fb64d1ffbc
|
/portfoliyo/tests/view/users/test_forms.py
|
12ec9945d65a75e37e0b901ac5174ffc64df7832
|
[] |
no_license
|
sdevani/portfoliyo
|
c8abd2c7328a4a7b75d630db5ff74f2e20bbd749
|
e85ae37ccbc404a26751539ea756fce484b2db62
|
refs/heads/master
| 2021-01-16T23:01:27.906389 | 2013-04-26T19:29:34 | 2013-04-26T19:29:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,808 |
py
|
"""Tests for user-related forms."""
from django.test.utils import override_settings
from portfoliyo.tests import factories, utils
from portfoliyo.view.users import forms
class TestRegistrationForm(object):
"""Tests for RegistrationForm."""
base_data = {
'name': 'Some Body',
'email': '[email protected]',
'password': 'sekrit',
'password_confirm': 'sekrit',
'role': 'Some Role',
'country_code': 'us',
}
def test_register(self, db):
"""Registration creates active school_staff w/ unconfirmed email."""
form = forms.RegistrationForm(self.base_data.copy())
assert form.is_valid()
profile = form.save()
assert not profile.email_confirmed
assert profile.school_staff
assert profile.user.is_active
assert profile.country_code == 'us'
def test_source_phone(self, db):
"""Source phone is set according to country code."""
data = self.base_data.copy()
data['country_code'] = 'ca'
ca_phone = '+13216543987'
with override_settings(PORTFOLIYO_NUMBERS={'ca': ca_phone}):
form = forms.RegistrationForm(data)
assert form.is_valid()
profile = form.save()
assert profile.country_code == 'ca'
assert profile.source_phone == ca_phone
def test_unmatched_passwords(self, db):
"""Registration form not valid if passwords don't match."""
data = self.base_data.copy()
data['password'] = 'other-sekrit'
form = forms.RegistrationForm(data)
assert not form.is_valid()
assert form.errors['__all__'] == [u"The passwords didn't match."]
def test_dupe_email(self, db):
"""Registration form not valid if email already in use."""
factories.UserFactory.create(email='[email protected]')
form = forms.RegistrationForm(self.base_data.copy())
assert not form.is_valid()
assert form.errors['email'] == [
u"This email address is already in use. "
u"Please supply a different email address."
]
def test_add_school(self, db):
"""If addschool is True, create a new school and use it."""
data = self.base_data.copy()
data['addschool'] = '1'
data['addschool-name'] = "New School"
data['addschool-postcode'] = "12345"
form = forms.RegistrationForm(data)
assert form.is_valid()
profile = form.save()
school = profile.school
assert school.name == u"New School"
assert school.postcode == u"12345"
def test_add_school_takes_user_country(self, db):
"""New school takes country of new user."""
data = self.base_data.copy()
data['country_code'] = 'ca'
data['addschool'] = '1'
data['addschool-name'] = "New School"
data['addschool-postcode'] = "12345"
form = forms.RegistrationForm(data)
assert form.is_valid()
profile = form.save()
school = profile.school
assert school.country_code == 'ca'
def test_add_school_validation_error(self, db):
"""If addschool is True but fields not complete, validation error."""
data = self.base_data.copy()
data['addschool'] = 'True'
data['addschool-name'] = "New School"
data['addschool-postcode'] = ""
form = forms.RegistrationForm(data)
assert not form.is_valid()
assert form.errors['__all__'] == [u"Could not add a school."]
assert form.addschool_form.errors['postcode'] == [
u"This field is required."]
def test_no_addschool_validation_error_if_addschool_false(self, db):
"""If addschool is False, addschool form not bound."""
data = self.base_data.copy()
data['addschool'] = 'False'
data['email'] = 'not a valid email'
form = forms.RegistrationForm(data)
assert not form.is_valid()
assert not form.addschool_form.is_bound
def test_no_school(self, db):
"""If no school selected, create one."""
form = forms.RegistrationForm(self.base_data.copy())
assert form.is_valid()
profile = form.save()
school = profile.school
assert school.auto
assert not school.postcode
def test_add_dupe_school(self, db):
"""No integrity error on school-creation race condition."""
data = self.base_data.copy()
data['addschool'] = '1'
data['addschool-name'] = "My School"
data['addschool-postcode'] = "12345"
form = forms.RegistrationForm(data)
assert form.is_valid()
school = factories.SchoolFactory.create(
name="My School",
postcode="12345",
)
profile = form.save()
assert profile.school == school
class TestEditProfileForm(object):
def test_update_relationships(self, db):
"""
Updating role updates matching relationship descriptions to empty.
If I have my role set in my profile as 'foo' and I change it to 'bar',
any relationships where I am the elder and the relationship description
is 'foo' will be updated to '' (which falls back to profile role).
"""
rel1 = factories.RelationshipFactory.create(
description='foo', from_profile__role='foo')
rel2 = factories.RelationshipFactory.create(
description='bar', from_profile=rel1.elder)
form = forms.EditProfileForm(
{'name': 'New', 'role': 'new'}, instance=rel1.elder)
assert form.is_valid()
form.save()
rel1 = utils.refresh(rel1)
rel2 = utils.refresh(rel2)
assert rel1.description == ''
assert rel2.description == 'bar'
|
[
"[email protected]"
] | |
4bc4e9d63a5d83d6aa54557c80f837a7946c8875
|
a97f60aaa261a0e54f674c2bd1587694c41fd50d
|
/bitsofpluto.py
|
89c242e1333178b1686d146641168d9a19c5a9ac
|
[] |
no_license
|
asears/bitsofpluto
|
7c4bef986740d4518972e3c1c8418db97e173600
|
1cb6a80a9863e429693640b36608e6f2b360895e
|
refs/heads/master
| 2020-11-26T10:34:50.752896 | 2019-09-16T07:15:00 | 2019-09-16T07:15:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,617 |
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Twitter bot. Tweeting a different bit of Pluto every six hours.
Photo by NASA's New Horizons spacecraft.
https://www.nasa.gov/image-feature/the-rich-color-variations-of-pluto/โฆ
"""
from __future__ import print_function
import argparse
import os.path
import random
import sys
import tempfile
import webbrowser
import twitter # pip install twitter
import yaml # pip install PyYAML
import closest_point_to_pluto
from PIL import Image # pip install pillow
WIDTHS = [600, 800, 1000, 1200, 2000]
def load_yaml(filename):
"""
File should contain:
consumer_key: TODO_ENTER_YOURS
consumer_secret: TODO_ENTER_YOURS
access_token: TODO_ENTER_YOURS
access_token_secret: TODO_ENTER_YOURS
"""
with open(filename) as f:
data = yaml.safe_load(f)
keys = data.viewkeys() if sys.version_info.major == 2 else data.keys()
if not keys >= {
"access_token",
"access_token_secret",
"consumer_key",
"consumer_secret",
}:
sys.exit("Twitter credentials missing from YAML: " + filename)
return data
def tweet_it(string, credentials, image=None):
""" Tweet string using credentials """
if len(string) <= 0:
return
# Create and authorise an app with (read and) write access at:
# https://dev.twitter.com/apps/new
# Store credentials in YAML file
auth = twitter.OAuth(
credentials["access_token"],
credentials["access_token_secret"],
credentials["consumer_key"],
credentials["consumer_secret"],
)
t = twitter.Twitter(auth=auth)
print("TWEETING THIS:\n", string)
if args.test:
print("(Test mode, not actually tweeting)")
else:
if image:
print("Upload image")
# Send images along with your tweets.
# First just read images from the web or from files the regular way
with open(image, "rb") as imagefile:
imagedata = imagefile.read()
t_up = twitter.Twitter(domain="upload.twitter.com", auth=auth)
id_img = t_up.media.upload(media=imagedata)["media_id_string"]
else:
id_img = None # Does t.statuses.update work with this?
lat, long = closest_point_to_pluto.closest_point_to_pluto()
result = t.statuses.update(
status=string,
media_ids=id_img,
lat=lat,
long=long,
display_coordinates=True,
)
url = (
"http://twitter.com/"
+ result["user"]["screen_name"]
+ "/status/"
+ result["id_str"]
)
print("Tweeted:\n" + url)
if not args.no_web:
webbrowser.open(url, new=2) # 2 = open in a new tab, if possible
def bitsofpluto(pluto_filename):
""" Get a bit of Pluto """
pluto = Image.open(pluto_filename)
print(pluto.size)
while True:
width = random.choice(WIDTHS)
height = width * 3 / 4
print("width, height:", width, height)
x = random.randrange(0, pluto.width - width + 1)
y = random.randrange(0, pluto.height - height + 1)
print("x, y: ", x, y)
print("x + width, y + height: ", x + width, y + height)
bit_of_pluto = pluto.crop((x, y, x + width, y + height))
top = 0
left = 0
bottom = bit_of_pluto.height - 1
right = bit_of_pluto.width - 1
points = [
(left, top),
(right, top),
(right / 2, top),
(left, bottom / 2),
(right, bottom / 2),
(right / 2, bottom / 2),
(left, bottom),
(right, bottom),
(right / 2, bottom),
]
total_brightness = 0
total_dark_points = 0
for point in points:
r, g, b = bit_of_pluto.getpixel(point)
brightness = sum([r, g, b]) / 3 # 0 is black and 255 is white
print("r, g, b, brightness: ", r, g, b, brightness)
total_brightness += brightness
if brightness < 10:
total_dark_points += 1
print("total_brightness: ", total_brightness)
print("total_dark_points: ", total_dark_points)
if total_dark_points <= 6:
# bit_of_pluto.show()
break
outfile = os.path.join(tempfile.gettempdir(), "bitofpluto.jpg")
print("outfile: " + outfile)
bit_of_pluto.save(outfile, quality=95)
return outfile
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Tweeting a different bit of Pluto every six hours.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-y",
"--yaml",
default="M:/bin/data/bitsofpluto.yaml",
help="YAML file location containing Twitter keys and secrets",
)
parser.add_argument(
"-nw",
"--no-web",
action="store_true",
help="Don't open a web browser to show the tweeted tweet",
)
parser.add_argument(
"-x",
"--test",
action="store_true",
help="Test mode: go through the motions but don't tweet anything",
)
parser.add_argument(
"-p",
"--pluto",
default="M:/bin/data/pluto/crop_p_color2_enhanced_release.7000x7000.png",
help="Path to a big photo of Pluto",
)
args = parser.parse_args()
credentials = load_yaml(args.yaml)
image = bitsofpluto(args.pluto)
tweet = "A bit of Pluto"
tweet_it(tweet, credentials, image)
# End of file
|
[
"[email protected]"
] | |
6c3f7cf99c6ff475862e67b3741ac051d547d733
|
0db97db08743783019efe022190f409d22ff95bd
|
/aliyun/api/rest/Rds20130528DescribeSecurityIpsRequest.py
|
8fba72f95fde069ab59928950cd9bfc12d5138d4
|
[
"Apache-2.0"
] |
permissive
|
snowyxx/aliyun-python-demo
|
8052e2a165f1b869affe632dda484d6ca203bd9b
|
ed40887ddff440b85b77f9b2a1fcda11cca55c8b
|
refs/heads/master
| 2021-01-10T03:37:31.657793 | 2016-01-21T02:03:14 | 2016-01-21T02:03:14 | 49,921,095 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
'''
Created by auto_sdk on 2014.10.21
'''
from aliyun.api.base import RestApi
class Rds20130528DescribeSecurityIpsRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
def getapiname(self):
return 'rds.aliyuncs.com.DescribeSecurityIps.2013-05-28'
|
[
"[email protected]"
] | |
b3f8a8af7f853575ba3e3f9c063337ec7292bf2e
|
2f91251d41f32346f2f6eb5f0a6e957f253f005f
|
/bit manipulation/python/leetcode289_Game_of_Life.py
|
02b9f64191bcfdb7a15dd241bf098483a1844fea
|
[
"Apache-2.0"
] |
permissive
|
wenxinjie/leetcode
|
8a5f666a1548d0d205cea09cb87fc2c65aec2b58
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
refs/heads/master
| 2020-03-24T21:56:00.662969 | 2018-09-06T21:25:58 | 2018-09-06T21:25:58 | 143,058,016 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,228 |
py
|
# According to the Wikipedia's article: "The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970."
# Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article):
# Any live cell with fewer than two live neighbors dies, as if caused by under-population.
# Any live cell with two or three live neighbors lives on to the next generation.
# Any live cell with more than three live neighbors dies, as if by over-population..
# Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
# Write a function to compute the next state (after one update) of the board given its current state. The next state is created by applying the above rules simultaneously to every cell in the current state, where births and deaths occur simultaneously.
# Example:
# Input:
# [
# [0,1,0],
# [0,0,1],
# [1,1,1],
# [0,0,0]
# ]
# Output:
# [
# [0,0,0],
# [1,0,1],
# [0,1,1],
# [0,1,0]
# ]
class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if not board or len(board) == 0: return
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
live = self.countLive(board, m, n, i, j)
if board[i][j] == 1 and live in (2,3):
board[i][j] = 3
if board[i][j] == 0 and live == 3:
board[i][j] = 2
for i in range(m):
for j in range(n):
board[i][j] = board[i][j] >> 1
def countLive(self, board, m, n, i, j):
live = 0
for x in range(max(i-1, 0), min(i+2, m)):
for y in range(max(j-1,0), min(j+2, n)):
live += board[x][y] & 1
live -= board[i][j] &
return live
# Time: O(n^2)
# Space: O(1)
# Difficulty: medium
|
[
"[email protected]"
] | |
fae288441e708509afa743d412bbf58f5f5f1d63
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/gotostage.py
|
112293bef56c46a7ec238dacf85905690703b07e
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538 | 2023-09-05T20:35:23 | 2023-09-05T20:35:23 | 307,260,205 | 52,742 | 5,376 |
Unlicense
| 2023-09-14T05:22:08 | 2020-10-26T04:22:55 |
Python
|
UTF-8
|
Python
| false | false | 2,727 |
py
|
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
try_get,
url_or_none
)
import json
class GoToStageIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gotostage\.com/channel/[a-z0-9]+/recording/(?P<id>[a-z0-9]+)/watch'
_TESTS = [{
'url': 'https://www.gotostage.com/channel/8901680603948959494/recording/60bb55548d434f21b9ce4f0e225c4895/watch',
'md5': 'ca72ce990cdcd7a2bd152f7217e319a2',
'info_dict': {
'id': '60bb55548d434f21b9ce4f0e225c4895',
'ext': 'mp4',
'title': 'What is GoToStage?',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 93.924711
}
}, {
'url': 'https://www.gotostage.com/channel/bacc3d3535b34bafacc3f4ef8d4df78a/recording/831e74cd3e0042be96defba627b6f676/watch?source=HOMEPAGE',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
metadata = self._download_json(
'https://api.gotostage.com/contents?ids=%s' % video_id,
video_id,
note='Downloading video metadata',
errnote='Unable to download video metadata')[0]
registration_data = {
'product': metadata['product'],
'resourceType': metadata['contentType'],
'productReferenceKey': metadata['productRefKey'],
'firstName': 'foo',
'lastName': 'bar',
'email': '[email protected]'
}
registration_response = self._download_json(
'https://api-registrations.logmeininc.com/registrations',
video_id,
data=json.dumps(registration_data).encode(),
expected_status=409,
headers={'Content-Type': 'application/json'},
note='Register user',
errnote='Unable to register user')
content_response = self._download_json(
'https://api.gotostage.com/contents/%s/asset' % video_id,
video_id,
headers={'x-registrantkey': registration_response['registrationKey']},
note='Get download url',
errnote='Unable to get download url')
return {
'id': video_id,
'title': try_get(metadata, lambda x: x['title'], compat_str),
'url': try_get(content_response, lambda x: x['cdnLocation'], compat_str),
'ext': 'mp4',
'thumbnail': url_or_none(try_get(metadata, lambda x: x['thumbnail']['location'])),
'duration': try_get(metadata, lambda x: x['duration'], float),
'categories': [try_get(metadata, lambda x: x['category'], compat_str)],
'is_live': False
}
|
[
"[email protected]"
] | |
ea7239225feecb34fa333d4d532077816030969f
|
312a86122cd08be0a8b7d0aacf87c8445aa6dead
|
/cmsplugin_blog/migrations/0007_auto__del_pygmentsplugin__chg_field_entry_tags.py
|
19ebbd3218c864309e4b6d9f986286c701b081bb
|
[] |
no_license
|
arnaudbenard/fle-website
|
f4cbaaa10d96de6eca9a18a5ba558480892d268d
|
d90d6be9909cd962ca22c72b3af8c43966a33e71
|
refs/heads/master
| 2021-01-21T01:33:37.534567 | 2013-05-21T22:16:12 | 2013-05-21T22:16:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,214 |
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_blog.entry': {
'Meta': {'object_name': 'Entry'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {'default': "''"})
},
'cmsplugin_blog.entrytitle': {
'Meta': {'object_name': 'EntryTitle'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_blog.Entry']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cmsplugin_blog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin', 'db_table': "'cmsplugin_latestentriesplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'current_language_only': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'limit': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['cmsplugin_blog']
|
[
"[email protected]"
] | |
cf4a5ac19de8240a8e03404df64ae4461030be7d
|
b6ff7b98079bcc8e7d8ad248e2de1f61bfabc0db
|
/multiples_sum_average.py
|
296bbc63022fdf2e0973adcce247a959ee3cfd83
|
[] |
no_license
|
bakker4444/multiples_sum_average
|
4b55808276dcd853d8688e255581c363b6c4cabb
|
efbc13dda3cc3459b44e0eb853dcb1f11ddca767
|
refs/heads/master
| 2020-03-09T08:35:48.384936 | 2018-04-09T00:17:45 | 2018-04-09T00:17:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 464 |
py
|
# Multiples
# Part 1 - print odd numbers from 1 to 1000
for i in range(1, 1001, 2):
print i
# Multiples
# Part 2 - print all the multiples of 5 from 5 to 1,000,000
for i in range(5, 1000001, 5):
print i
# Sum List
# Create a program that prints the sum of all the values in the list
a = [1, 2, 5, 10, 255, 3]
print sum(a)
# Average Lists
# Create a program that prints the average of the values in the list
b = [1, 2, 5, 10, 255, 3]
print sum(b)/len(b)
|
[
"[email protected]"
] | |
f8aaa4290c282f8bce6f11f9e721a671c7ceb6bc
|
696dec6a8d1eba189d36049afedec36da47c08f3
|
/dataset_utilities/_core_utils/_unet_attn.py
|
e87f1eef42bece2a971fe9aa3e96cd671e454917
|
[] |
no_license
|
JoelRaymann/polyp-segmentation
|
d99079f56bb3ae0886fb4c610c4abcc420137781
|
38da6c8bf47df2d2382d31f04faf63649b7d8ab0
|
refs/heads/master
| 2023-04-10T06:17:34.720237 | 2021-04-14T22:04:36 | 2021-04-14T22:04:36 | 358,053,229 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 788 |
py
|
"""
Script Implementing U-Net Attention dice's data generator core functionality and all its helper functions
"""
# Import necessary packages
import tensorflow as tf
# Handling U-Net Attention dice
def _load_data_unet_attn(img, seg):
"""
Function to prepare the images and return the data in accordance with the U-Net Attention dice model for training.
Parameters
----------
img : tf.Tensor
The image input tensor.
seg : tf.Tensor
The segmentation input tensor
Returns
-------
tuple
The tuple output (image, segmentation)
"""
# Get the numpy version
img = img.numpy().copy()
seg = seg.numpy().copy()
# Normalize
img = img / 255.0
seg = seg / 255.0
return img, tf.expand_dims(seg, axis=-1)
|
[
"[email protected]"
] | |
73530b2a28d38e95cc3955cb0f50ddd9f0985ef7
|
e6208febf7e34d4108422c6da54453373733a421
|
/sdks/python/client/argo_workflows/model/downward_api_projection.py
|
49898a74dfbb383ff0787b357d3e85234f9e5841
|
[
"Apache-2.0"
] |
permissive
|
wreed4/argo
|
05889e5bb7738d534660c58a7ec71c454e6ac9bb
|
41f94310b0f7fee1ccd533849bb3af7f1ad4f672
|
refs/heads/master
| 2023-01-22T05:32:12.254485 | 2022-01-27T21:24:45 | 2022-01-27T22:02:22 | 233,143,964 | 0 | 0 |
Apache-2.0
| 2023-01-17T19:04:43 | 2020-01-10T22:56:25 |
Go
|
UTF-8
|
Python
| false | false | 11,464 |
py
|
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.downward_api_volume_file import DownwardAPIVolumeFile
globals()['DownwardAPIVolumeFile'] = DownwardAPIVolumeFile
class DownwardAPIProjection(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'items': ([DownwardAPIVolumeFile],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'items': 'items', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""DownwardAPIProjection - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
items ([DownwardAPIVolumeFile]): Items is a list of DownwardAPIVolume file. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DownwardAPIProjection - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
items ([DownwardAPIVolumeFile]): Items is a list of DownwardAPIVolume file. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
[
"[email protected]"
] | |
a5636d5656eb4187372f3d8f24142e7df9c7fa89
|
74473f650f36ad78d8eee801dcbdea8a4825d4a2
|
/voltron/remote_debugger.py
|
49b88fea297b75670aad50a41c662f1e7ac3c45f
|
[] |
no_license
|
buttslol/voltron
|
f2a20d808ca165feaee80d3f6350695a4b7334fc
|
1b00674276acd465ca69b78d6ea91aa9467ed92f
|
refs/heads/master
| 2021-01-16T21:40:23.455224 | 2013-10-12T11:36:38 | 2013-10-12T11:36:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 834 |
py
|
import pdb
import socket
import sys
# Trying to debug a quirk in some code that gets called async by {ll,d}db?
#
# from .remote_debugger import Rdb
# Rdb().set_trace()
#
# Then: telnet localhost 4444
socks = {}
# Only bind the socket once
def _sock(port):
if port in socks:
return socks[port]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", port))
socks[port] = s
return s
class Rdb(pdb.Pdb):
def __init__(self, port=4444):
self.old_stdout = sys.stdout
self.old_stdin = sys.stdin
self.skt = _sock(port)
self.skt.listen(1)
(clientsocket, address) = self.skt.accept()
handle = clientsocket.makefile('rw')
pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
sys.stdout = sys.stdin = handle
|
[
"[email protected]"
] | |
f64a3a28daa398d4d3500352ba4349bb2d82e861
|
a404f504febdc835b69b72c4ac28b153885fc119
|
/DungeonsKitgard/KithgardBrawl/KithgardBrawl3.py
|
edd41caf028501808f7009ea6568177cec161633
|
[] |
no_license
|
Chaboi45/CodeCombat
|
6093a2eae29ef00c0c277653c4ffd075c9e2ac4c
|
6e008a94e65bb72ca9292e303d391a4142de16f5
|
refs/heads/master
| 2021-07-02T23:24:52.050154 | 2017-09-21T07:42:21 | 2017-09-21T07:42:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,233 |
py
|
while True:
enemys = hero.findEnemies()
index = 0
closest_soldier = None
soldier_dist = 999
closest_archer = None
archer_dist = 999
closest = None
dist = 999
close_count = 0;
priority = None
while (index < len(enemys)):
distance = hero.distanceTo(enemys[index])
shield = False;
if (enemys[index].type == 'shaman' and distance < 20):
priority = enemys[index];
if (enemys[index].type == 'Ogre' and distance < 10):
shield = True
if (distance < 10):
close_count += 1
if (enemys[index].health > 0):
if (enemys[index].type == 'archer' and distance < archer_dist):
archer_dist = distance
closest_archer = enemys[index]
if (enemys[index].type == 'soldier' and distance < soldier_dist):
soldier_dist = distance
closest_soldier = enemys[index]
if (distance < dist):
soldier_dist = dist
closest = enemys[index]
index += 1
if (priority):
enemy = priority
# elif(closest_archer and archer_dist<15):
# enemy = closest_archer
# elif(closest_soldier and soldier_dist<10):
# enemy = closest_soldier
else:
enemy = closest
if (hero.health < hero.maxHealth / 3):
item = hero.findNearest(hero.findItems())
if (item):
if (hero.isReady("jump")):
hero.jumpTo(item.pos)
else:
hero.move(item.pos)
elif (enemy):
if (hero.isReady("jump") and hero.distanceTo > 10):
hero.jumpTo(enemy.pos)
elif (hero.isReady("bash")):
hero.bash(enemy)
elif (hero.isReady("power-up")):
hero.powerUp()
hero.attack(enemy)
elif (hero.isReady("cleave") and close_count >= 7):
hero.cleave(enemy)
elif (shield):
hero.shield()
elif (close_count < 10 or priority):
hero.attack(enemy)
else:
hero.shield()
|
[
"[email protected]"
] | |
1970ad0f37eea983e19e8edfa26c8d95bbb9dd45
|
70450f0c551adf47b450468e424f4f90bebfb58d
|
/dataclasses/resources/test/test_I3RecoPulseSeriesMapMask_pybindings.py
|
ef13165679e0c4ac5ba061bf58050c184596e4c9
|
[
"MIT"
] |
permissive
|
hschwane/offline_production
|
ebd878c5ac45221b0631a78d9e996dea3909bacb
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
refs/heads/master
| 2023-03-23T11:22:43.118222 | 2021-03-16T13:11:22 | 2021-03-16T13:11:22 | 280,381,714 | 0 | 0 |
MIT
| 2020-07-17T09:20:29 | 2020-07-17T09:20:29 | null |
UTF-8
|
Python
| false | false | 2,626 |
py
|
#!/usr/bin/env python
import unittest
import sys
from icecube import icetray,dataclasses
class I3RecoPulseSeriesMapMaskTest(unittest.TestCase):
def setUp(self):
self.frame = icetray.I3Frame(icetray.I3Frame.Physics)
pulses = dataclasses.I3RecoPulseSeriesMap()
key1 = icetray.OMKey(42, 7)
vec = dataclasses.I3RecoPulseSeries()
pulse = dataclasses.I3RecoPulse()
pulse.time = 1.0
pulse.charge = 2.3
vec.append(pulse)
pulse.time = 2.0
vec.append(pulse)
pulse.time = 15.0
vec.append(pulse)
pulses[key1] = vec
key2 = icetray.OMKey(7,7)
vec = dataclasses.I3RecoPulseSeries()
pulse.time = 1.0
pulse.charge = 2.3
vec.append(pulse)
pulse.time = 2.0
vec.append(pulse)
pulse.time = 15.0
vec.append(pulse)
pulses[key2] = vec
self.frame['Pulses'] = pulses
mask1 = dataclasses.I3RecoPulseSeriesMapMask(self.frame, 'Pulses')
mask1.set(key1, 1, False)
self.frame['Mask1'] = mask1
mask2 = dataclasses.I3RecoPulseSeriesMapMask(self.frame, 'Pulses')
mask2.set(key2, 1, False)
self.frame['Mask2'] = mask2
def testApply(self):
mask = self.frame['Mask1']
pulses = mask.apply(self.frame)
self.assertEquals(len(pulses), 2)
self.assertEquals(len(pulses.values()[0]), 3)
self.assertEquals(len(pulses.values()[1]), 2)
mask = self.frame['Mask2']
pulses = mask.apply(self.frame)
self.assertEquals(len(pulses), 2)
self.assertEquals(len(pulses.values()[0]), 2)
self.assertEquals(len(pulses.values()[1]), 3)
def testCombine(self):
mask1 = self.frame['Mask1']
mask2 = self.frame['Mask2']
combined = mask1 & mask2
pulses = combined.apply(self.frame)
self.assertEquals(len(pulses), 2)
self.assertEquals(len(pulses.values()[0]), 2)
self.assertEquals(len(pulses.values()[1]), 2)
combined = mask1 | mask2
pulses = combined.apply(self.frame)
self.assertEquals(len(pulses), 2)
self.assertEquals(len(pulses.values()[0]), 3)
self.assertEquals(len(pulses.values()[1]), 3)
def testQuery(self):
mask1 = self.frame['Mask1']
self.assertEquals(mask1.any(), True)
self.assertEquals(mask1.all(), False)
self.assertEquals(mask1.sum(), 5)
mask1.set_none()
self.assertEquals(mask1.any(), False)
self.assertEquals(mask1.sum(), 0)
def testEqual(self):
mask1 = self.frame['Mask1']
mask2 = self.frame['Mask2']
mask3 = dataclasses.I3RecoPulseSeriesMapMask(mask1)
self.assertNotEquals(mask1,mask2)
self.assertEquals(mask1.source,mask3.source)
self.assertEquals(mask1.bits,mask3.bits)
self.assertEquals(mask1,mask3)
self.assertEquals(mask1 != mask3,False)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
d405f7741c1355404c6409d8713c69c2ea95bd5b
|
d873f3e6c322c930068711e795bfc635ed98fd6a
|
/mainapp/dao/Product/ProductImageDao.py
|
2b71855d6d277ecf129805bf13f3abe5a28ce74d
|
[] |
no_license
|
trunganhvu/personalweb
|
3d912366045448b20ec9b7722e9190197cef4f61
|
b7afc1a32665e578cbd546b1d2c375597b060914
|
refs/heads/master
| 2023-08-18T04:30:40.302843 | 2021-09-30T15:46:24 | 2021-09-30T15:46:24 | 401,062,022 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,733 |
py
|
from mainapp.model.ProductImage import ProductImage
from datetime import datetime
from django.utils import timezone
def get_all_image_in_product(product_id):
"""
Get all image in product
"""
list_image = ProductImage.objects.filter(product_id=product_id)
return list_image
def get_one_image_in_product(product_id):
"""
Get one image in product
"""
image = ProductImage.objects.filter(product_id=product_id).first()
return image
def get_product_image_by_image_id(product_image_id):
"""
Get product image by image id
"""
product_image = ProductImage.objects.get(pk=product_image_id)
return product_image
def insert_image(product_image):
"""
Insert image
"""
p_image = ProductImage(product_image_name=product_image.product_image_name,
product_image_path=product_image.product_image_path,
product_id=product_image.product_id,
created_at=datetime.now(tz=timezone.utc))
p_image.save()
return p_image
def update_image(product_image):
"""
Update image
"""
p_image = ProductImage.objects.get(pk=product_image.product_image_id)
p_image.product_image_name=product_image.product_image_name
p_image.product_image_path=product_image.product_image_path
p_image.save()
return p_image
def delete_image_by_id(product_image_id):
"""
Delete image by id
"""
p_image = ProductImage.objects.get(pk=product_image_id)
p_image.delete()
def delete_all_product_image_by_product_id(product_id):
"""
Delete product image by product id
"""
p_image = ProductImage.objects.filter(product_id=product_id)
p_image.delete()
|
[
"[email protected]"
] | |
e36ef19e8a22954393d4d6945028df91e13e2086
|
2432996ac1615cd36d61f0feeff8a359d2b438d8
|
/env/lib/python3.8/site-packages/PyInstaller/hooks/rthooks/pyi_rth_glib.py
|
c8a30ed042ca186466fa26d6db022c72d8a049d1
|
[
"Apache-2.0"
] |
permissive
|
Parveshdhull/AutoTyper
|
dd65d53ece7c13fbc1ead7ce372947483e05e2e3
|
7fabb30e15b770d790b69c2e4eaf9bbf5a4d180c
|
refs/heads/main
| 2023-05-08T14:10:35.404160 | 2023-05-07T20:43:15 | 2023-05-07T20:43:15 | 315,415,751 | 26 | 18 |
Apache-2.0
| 2023-05-07T20:43:16 | 2020-11-23T19:13:05 |
Python
|
UTF-8
|
Python
| false | false | 560 |
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2015-2020, PyInstaller Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: Apache-2.0
#-----------------------------------------------------------------------------
import os
import sys
os.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')
|
[
"[email protected]"
] | |
a899d1f17d27c6ae466745f8cc745c25c5c96fe5
|
dbeb1e145eba012a200073038d8a8965ae0c6f5d
|
/MachineLearning/MLaPP/MixtureModels/mixGaussMLvsMAP.py
|
29000b8f1da18391afd40be017477335f1fe0a4f
|
[] |
no_license
|
hellJane/Python_DataAnalysis
|
b7027cb9d8e75a98b5626a58ee85b64f62c54c9c
|
966ee5d732e074e9d124333f13d3e3e23ade1edc
|
refs/heads/master
| 2021-05-17T01:57:24.092791 | 2017-12-01T15:32:32 | 2017-12-01T15:32:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,409 |
py
|
import numpy as np
import scipy.stats as ss
import scipy.linalg as sl
import sklearn.preprocessing as sp
import matplotlib.pyplot as plt
from mixGaussFit import *
np.random.seed(0)
# Generate Data, fixed K = 3
def MakeCov(origMat, D):
m11 = origMat
m21 = np.zeros((D - 2, 2)) # origMatๅฟ
้กปไธบ(2, 2)
m12 = np.zeros((2, D - 2))
m22 = np.eye(D - 2)
return np.r_[np.c_[m11, m12], np.c_[m21, m22]]
def GetInitial(D, K=3):
mu_init = np.random.rand(K, D)
mixWeights_init = np.tile(1/K, K)
return mu_init, mixWeights_init
def Sample(D, N=100):
K = 3 # ๅบๅฎ็
mean_1 = np.r_[-1, 1, np.zeros(D-2)]
mean_2 = np.r_[1, -1, np.zeros(D-2)]
mean_3 = np.r_[3, -1, np.zeros(D-2)]
cov_1 = MakeCov([[1, -0.7],
[-0.7, 1]], D)
cov_2 = MakeCov([[1, 0.7],
[0.7, 1]], D)
cov_3 = MakeCov([[1, 0.9],
[0.9, 1]], D)
n = [0.5, 0.3, 0.2] # ้ๆ ท็ๆฐ้
x1 = ss.multivariate_normal(mean_1, cov_1).rvs((int)(n[0] * N))
x2 = ss.multivariate_normal(mean_2, cov_2).rvs((int)(n[1] * N))
x3 = ss.multivariate_normal(mean_3, cov_3).rvs((int)(n[2] * N))
x = np.r_[x1, x2, x3]
sigma_init = np.array([cov_1, cov_2, cov_3])
return x, sigma_init # ่ฟๅ็ๆฏๆฐๆฎๆ ทๆฌ้๏ผ ไปฅๅๅๆนๅทฎ็ๅๅงๅผ(ๅบๅฎ็)
# Fit model with MLE or MAP
def Fit(x, pi, mu, cov, isMAP=False):
success = True
try:
maxIter = 30
cov_old = cov
pi_old = pi
mu_old = mu
prior = MakeNIWPrior(x)
for i in range(maxIter):
if isMAP:
r, pi_new, mu_new, cov_new = EM_MAP(x, pi_old, mu_old, cov_old, prior)
else:
r, pi_new, mu_new, cov_new = EM(x, pi_old, mu_old, cov_old)
#print('{0:-^60}'.format('Iteration: ' + str(i + 1)))
#print('pi: ', pi_new)
if np.allclose(pi_new, pi):
print('converged')
break
pi_old = pi_new
mu_old = mu_new
cov_old = cov_new
except Exception as e:
print(e)
success = False
return success
# Fit with several trials
def GetFailRatio(D, trials=10):
print('D = ', D)
x, cov = Sample(D)
x = sp.StandardScaler().fit_transform(x)
MLE_fail, MAP_fail = 0, 0
for i in range(trials):
mu, pi = GetInitial(D) # ๆฏๆฌกๅฐ่ฏ๏ผไธไธๆ ท็ๅๅงๅผ
if not Fit(x, pi, mu, cov, True):
MAP_fail += 1
if not Fit(x, pi, mu, cov, False):
MLE_fail += 1
print('MLE_fail, MAP_fail: ', MLE_fail, MAP_fail)
return [MLE_fail / trials, MAP_fail / trials]
D = np.arange(10, 101, 10)
ratios = []
for i in range(len(D)):
Di = D[i]
ratios.append(GetFailRatio(Di))
ratios = np.array(ratios)
print('ratios: \n', ratios)
# plots
fig = plt.figure()
fig.canvas.set_window_title("mixGaussMLvsMAP")
plt.subplot()
plt.axis([5, 105, -0.04, 1.04])
plt.xticks(np.arange(10, 101, 10))
plt.yticks(np.arange(0, 1.01, 0.1))
plt.xlabel('dimensionality')
plt.ylabel('fraction of times EM for GMM fails')
plt.plot(D, ratios[:, 0], 'r-', marker='o', fillstyle='none', label='MLE')
plt.plot(D, ratios[:, 1], 'k:', marker='s', fillstyle='none', label='MAP')
plt.legend()
plt.show()
|
[
"[email protected]"
] | |
4787e27cc0386dadaba83b52ae9fea1ba5f0e346
|
7d502a6501c404bdb856c9bc8cde2ae0ee853d7a
|
/detectron2/utils/visualizer.py
|
b2e97a2fcbfa479904054b9c73214a73daa01944
|
[
"Apache-2.0"
] |
permissive
|
yonatanTaranis/detectron2
|
fb5f7e51cf93e2b0cd9df4f0ad20d6c51a21adea
|
d760e9fd34863ad68a05a4e370ac1d1082a24804
|
refs/heads/master
| 2020-09-09T18:11:00.944915 | 2019-11-20T15:49:09 | 2019-11-20T15:49:09 | 221,522,308 | 0 | 1 |
Apache-2.0
| 2019-11-20T15:49:11 | 2019-11-13T18:09:02 | null |
UTF-8
|
Python
| false | false | 44,893 |
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import colorsys
import logging
import math
import numpy as np
from enum import Enum, unique
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import pycocotools.mask as mask_util
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg
from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes
from .colormap import random_color
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
Attributes:
IMAGE: Picks a random color for every instance and overlay segmentations with low opacity.
SEGMENTATION: Let instances of the same category have similar colors, and overlay them with
high opacity. This provides more attention on the quality of segmentation.
IMAGE_BW: same as IMAGE, but convert all areas without masks to gray-scale.
Only available for drawing per-instance mask predictions.
"""
IMAGE = 0
SEGMENTATION = 1
IMAGE_BW = 2
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (height, width), m.shape
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
res = [x for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
def __init__(self, panoptic_seg, segments_info):
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
Returns:
list[str] or None
"""
labels = None
if classes is not None and class_names is not None and len(class_names) > 1:
labels = [class_names[i] for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3).
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
ax.set_xlim(0.0, self.width)
ax.set_ylim(self.height)
self.fig = fig
self.ax = ax
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
if filepath.endswith(".jpg") or filepath.endswith(".png"):
# faster than matplotlib's imshow
cv2.imwrite(filepath, self.get_image()[:, :, ::-1])
else:
# support general formats (e.g. pdf)
self.ax.imshow(self.img, interpolation="nearest")
self.fig.savefig(filepath)
def get_image(self):
"""
Returns:
ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
if (self.width, self.height) != (width, height):
img = cv2.resize(self.img, (width, height))
else:
img = self.img
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
# imshow is slow. blend manually (still quite slow)
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
try:
import numexpr as ne # fuse them with numexpr
visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)")
except ImportError:
alpha = alpha.astype("float32") / 255.0
visualized_image = img * (1 - alpha) + rgb * alpha
visualized_image = visualized_image.astype("uint8")
return visualized_image
class Visualizer:
def __init__(self, img_rgb, metadata, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (MetadataCatalog): image metadata.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
assert predictions.has("pred_masks"), "ColorMode.IMAGE_BW requires segmentations"
self.output.img = self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg_predictions(
self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7
):
"""
Draw panoptic prediction results on an image.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(pred.non_empty_mask())
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(category_ids, scores, self.metadata.thing_classes)
try:
colors = [random_color(rgb=True, maximum=1) for k in category_ids]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
def draw_dataset_dict(self, dic):
"""
Draw annotations/segmentaions in Detectron2 Dataset format.
Args:
dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
Returns:
output (VisImage): image object with visualizations.
"""
annos = dic.get("annotations", None)
if annos:
if "segmentation" in annos[0]:
masks = [x["segmentation"] for x in annos]
else:
masks = None
if "keypoints" in annos[0]:
keypts = [x["keypoints"] for x in annos]
keypts = np.array(keypts).reshape(len(annos), -1, 3)
else:
keypts = None
boxes = [BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) for x in annos]
labels = [x["category_id"] for x in annos]
names = self.metadata.get("thing_classes", None)
if names:
labels = [names[i] for i in labels]
labels = [
"{}".format(i) + ("|crowd" if a.get("iscrowd", 0) else "")
for i, a in zip(labels, annos)
]
self.overlay_instances(labels=labels, boxes=boxes, masks=masks, keypoints=keypts)
sem_seg = dic.get("sem_seg", None)
if sem_seg is None and "sem_seg_file_name" in dic:
sem_seg = cv2.imread(dic["sem_seg_file_name"], cv2.IMREAD_GRAYSCALE)
if sem_seg is not None:
self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* `structures.masks.PolygonMasks`, `structures.masks.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = None
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
"""
Args:
boxes (ndarray): an Nx5 numpy array of
(x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > _KEYPOINT_THRESHOLD:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
"""
Args:
rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
where cnt_x and cnt_y are the center coordinates of the box.
w and h are the width and height of the box. angle represents how
many degrees the box is rotated CCW with regard to the 0-degree box.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
label (string): label for rotated box. It will not be rendered when set to None.
Returns:
output (VisImage): image object with box drawn.
"""
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
# use thinner lines when the box is small
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1] # topleft corner
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
for a full list of formats that are accepted.
linewidth (float or None): width of the line. When it's None,
a default value will be computed and used.
Returns:
output (VisImage): image object with line drawn.
"""
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
def draw_binary_mask(
self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=4096
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn in the object's center of mass.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component small than this will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
if area_threshold is None:
area_threshold = 4096
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < area_threshold:
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba)
if text is not None and has_valid_segment:
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=lighter_color)
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
if edge_color is None:
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
"""
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
return boxes.tensor.numpy()
else:
return np.asarray(boxes)
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, PolygonMasks):
m = m.polygons
if isinstance(m, BitMasks):
m = m.tensor.numpy()
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
keypoints = keypoints.tensor
keypoints = np.asarray(keypoints)
return keypoints
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
|
[
"[email protected]"
] | |
994b6e8a30b8b059f3ada223aba127b030d365e9
|
fafb89a3552e4dbb47d134966462ef5f3f37f576
|
/KEMP/v0.7_pml/fdtd3d/naive/pml.py
|
f6409279223ecf31ef52a0051bacb14a914258ca
|
[] |
no_license
|
EMinsight/fdtd_accelerate
|
78fa1546df5264550d12fba3cf964838b560711d
|
a566c60753932eeb646c4a3dea7ed25c7b059256
|
refs/heads/master
| 2021-12-14T03:26:52.070069 | 2012-07-25T08:25:21 | 2012-07-25T08:25:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,491 |
py
|
from __future__ import division
import numpy as np
from kemp.fdtd3d.util import common
from fields import Fields
class Pml:
def __init__(self, fields, directions, npml=50, sigma_max=0.5, kappa_max=1, alpha_max=0, m_sigma=3, m_alpha=1):
common.check_type('fields', fields, Fields)
common.check_type('directions', directions, (list, tuple), str)
assert len(directions) == 3
for axis in directions:
assert axis in ['+', '-', '+-', '']
# local variables
dt = fields.dt
nx, ny, nz = fields.ns
dtype = fields.dtype
# allocations
psi_xs = [np.zeros((2*npml + 2, ny, nz), dtype) for i in range(4)]
psi_ys = [np.zeros((nx, 2*npml + 2, nz), dtype) for i in range(4)]
psi_zs = [np.zeros((nx, 2*npml + 2, nz), dtype) for i in range(4)]
i_e = np.arange(0.5, npml)
i_h = np.arange(1, npml+1)
sigma_e = sigma_max# * (i_e / npml) ** m_sigma
sigma_h = sigma_max# * (i_h / npml) ** m_sigma
print 'sigma_e', sigma_e
print 'sigma_h', sigma_h
kappa_e = 1 + (kappa_max - 1) * (i_e / npml) ** m_sigma
kappa_h = 1 + (kappa_max - 1) * (i_h / npml) ** m_sigma
alpha_e = alpha_max * ((npml - i_e) / npml) ** m_alpha
alpha_h = alpha_max * ((npml - i_h) / npml) ** m_alpha
com_e = (kappa_e * alpha_e + sigma_e) * dt + 2 * kappa_e
com_h = (kappa_h * alpha_h + sigma_h) * dt + 2 * kappa_h
pca_e = 4 * kappa_e / com_e - 1
pca_h = 4 * kappa_h / com_h - 1
pcb_e = (alpha_e * dt - 2 + 4 * kappa_e) / com_e - 1
pcb_h = (alpha_h * dt - 2 + 4 * kappa_h) / com_h - 1
pcc_e = (alpha_e * dt + 2) / com_e - 1
pcc_h = (alpha_h * dt + 2) / com_h - 1
# global variables
self.mainf = fields
self.directions = directions
self.npml = npml
self.psi_xs = psi_xs
self.psi_ys = psi_ys
self.psi_zs = psi_zs
self.pcs_e = [pca_e, pcb_e, pcc_e]
self.pcs_h = [pca_h, pcb_h, pcc_h]
# append to the update list
self.priority_type = 'pml'
fields.append_instance(self)
def update(self, sl, sls, slc, f1, f2, f3, f4, psi1, psi2, psi3, psi4, pca, pcb, pcc, c1, c2):
'''
print 'sl', sl
print 'sls', sls
print 'f1', f1[sl].shape
print 'c1', c1[sl].shape
print 'psi4', psi4[sls].shape
print 'psi4', psi4[sl].shape
'''
f1[sl] -= c1[sl] * (psi4[sls] - psi4[sl])
f2[sl] += c2[sl] * (psi3[sls] - psi3[sl])
psi1[sl] += pcc[slc] * f1[sl]
psi2[sl] += pcc[slc] * f2[sl]
psi3[sls] = pca[slc] * psi3[sls] + pcb[slc] * f3[sls]
psi4[sls] = pca[slc] * psi4[sls] + pcb[slc] * f4[sls]
def update_e(self):
npml = self.npml
ex, ey, ez, hx, hy, hz = self.mainf.ehs
cex, cey, cez = self.mainf.ces
psi_eyx, psi_ezx, psi_hyx, psi_hzx = self.psi_xs
psi_ezy, psi_exy, psi_hzy, psi_hxy = self.psi_ys
psi_exz, psi_eyz, psi_hxz, psi_hyz = self.psi_zs
pca_h, pcb_h = self.pcs_h[:2]
pcc_e = self.pcs_e[2]
directions = self.directions
sln = slice(None, None)
nax = np.newaxis
if '+' in directions[0]:
sl = (slice(-npml-1, -1), sln, sln)
sls = (slice(-npml, None), sln, sln)
slc = (sln, nax, nax)
self.update(sl, sls, slc, ey, ez, hy, hz, psi_eyx, psi_ezx, psi_hyx, psi_hzx, pca_h, pcb_h, pcc_e, cey, cez)
'''
pca, pcb, pcc = pcs
ey[sl] -= cey[sl] * (psi_hzx[sls] - psi_hzx[sl])
ez[sl] += cez[sl] * (psi_hyx[sls] - psi_hyx[sl])
psi_eyx[sl] += pcc[slc] * ey[sl]
psi_ezx[sl] += pcc[slc] * ez[sl]
psi_hyx[sls] = pca[slc] * psi_hyx[sls] + pcb[slc] * hy[sls]
psi_hzx[sls] = pca[slc] * psi_hzx[sls] + pcb[slc] * hz[sls]
'''
'''
if '-' in directions[0]:
sl = (slice(None, npml), sln, sln)
sls = (slice(1, npml+1), sln, sln)
slc = (slice(None, None, -1), nax, nax)
self.update(sl, sls, slc, pcs, ey, ez, hy, hz, psi_eyx, psi_ezx, psi_hyx, psi_hzx, cey, cez)
if '+' in directions[1]:
sl = (sln, slice(-npml-1, -1), sln)
sls = (sln, slice(-npml, None), sln)
slc = (sln, nax)
self.update(sl, sls, slc, pcs, ez, ex, hz, hx, psi_ezy, psi_exy, psi_hzy, psi_hxy, cez, cex)
if '-' in directions[1]:
sl = (sln, slice(None, npml), sln)
sls = (sln, slice(1, npml+1), sln)
slc = (slice(None, None, -1), nax)
self.update(sl, sls, slc, pcs, ez, ex, hz, hx, psi_ezy, psi_exy, psi_hzy, psi_hxy, cez, cex)
if '+' in directions[2]:
sl = (sln, sln, slice(-npml-1, -1))
sls = (sln, sln, slice(-npml, None))
slc = sln
self.update(sl, sls, slc, pcs, ex, ey, hx, hy, psi_exz, psi_eyz, psi_hxz, psi_hyz, cex, cey)
if '-' in directions[2]:
sl = (sln, sln, slice(None, npml))
sls = (sln, sln, slice(1, npml+1))
slc = slice(None, None, -1)
self.update(sl, sls, slc, pcs, ex, ey, hx, hy, psi_exz, psi_eyz, psi_hxz, psi_hyz, cex, cey)
'''
def update_h(self):
npml = self.npml
ex, ey, ez, hx, hy, hz = self.mainf.ehs
chx, chy, chz = self.mainf.chs
psi_eyx, psi_ezx, psi_hyx, psi_hzx = self.psi_xs
psi_ezy, psi_exy, psi_hzy, psi_hxy = self.psi_ys
psi_exz, psi_eyz, psi_hxz, psi_hyz = self.psi_zs
pca_e, pcb_e = self.pcs_e[:2]
pcc_h = self.pcs_h[2]
directions = self.directions
sln = slice(None, None)
nax = np.newaxis
if '+' in directions[0]:
sl = (slice(-npml, None), sln, sln)
sls = (slice(-npml-1, -1), sln, sln)
slc = (sln, nax, nax)
self.update(sl, sls, slc, hz, hy, ez, ey, psi_hzx, psi_hyx, psi_ezx, psi_eyx, pca_e, pcb_e, pcc_h, chz, chy)
'''
if '-' in directions[0]:
sl = (slice(1, npml+1), sln, sln)
sls = (slice(None, npml), sln, sln)
slc = (slice(None, None, -1), nax, nax)
self.update(sl, sls, slc, pcs, hz, hy, ez, ey, psi_hzx, psi_hyx, psi_ezx, psi_eyx, chz, chy)
if '+' in directions[1]:
sl = (sln, slice(-npml, None), sln)
sls = (sln, slice(-npml-1, -1), sln)
slc = (sln, nax)
self.update(sl, sls, slc, pcs, hx, hz, ex, ez, psi_hxy, psi_hzy, psi_exy, psi_ezy, chx, chz)
if '-' in directions[1]:
sl = (sln, slice(1, npml+1), sln)
sls = (sln, slice(None, npml), sln)
slc = (slice(None, None, -1), nax)
self.update(sl, sls, slc, pcs, hx, hz, ex, ez, psi_hxy, psi_hzy, psi_exy, psi_ezy, chx, chz)
if '+' in directions[2]:
sl = (sln, sln, slice(-npml, None))
sls = (sln, sln, slice(-npml-1, -1))
slc = sln
self.update(sl, sls, slc, pcs, hy, hx, ey, ex, psi_hyz, psi_hxz, psi_eyz, psi_exz, chy, chx)
if '-' in directions[2]:
sl = (sln, sln, slice(1, npml+1))
sls = (sln, sln, slice(None, npml))
slc = slice(None, None, -1)
self.update(sl, sls, slc, pcs, hy, hx, ey, ex, psi_hyz, psi_hxz, psi_eyz, psi_exz, chy, chx)
'''
|
[
"[email protected]"
] | |
2235fd21f94d6254b3b1d3e5f3b629814f5968d0
|
3eff0ac549dd24fbade02d63c3a541ab88db1e5b
|
/ultimate_python/piglatin/piglatin/urls.py
|
36974b0b38594cf07ff90f2517eda8e50821e820
|
[] |
no_license
|
lisaolson/udemy
|
618410fb548db864b7878de5a2231e8293daa2ad
|
f40f947f6f79d692748f3efba02176fb360f0c4e
|
refs/heads/master
| 2020-03-28T20:14:23.028759 | 2018-09-18T19:45:32 | 2018-09-18T19:45:32 | 149,051,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 293 |
py
|
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home, name='home'),
url(r'^translate/', views.translate, name='translate'),
url(r'^about/', views.about, name='about'),
]
|
[
"[email protected]"
] | |
ddf8b17e1cfddfb7814b90814bc8b3951bbd659a
|
e2081f2f873825a3cc8b529614eb784f5cf5e8c5
|
/permutations2.py
|
01b9d9cb4078de5aa049df6d087af9df69cca105
|
[] |
no_license
|
yilinanyu/Leetcode-with-Python
|
17b454058c673381dbafa5a2a154c4e84b449399
|
a55d2a3e383f858477170effbf8f6454e5dfd218
|
refs/heads/master
| 2021-01-21T04:55:31.025194 | 2016-07-11T20:10:18 | 2016-07-11T20:10:18 | 36,630,923 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 521 |
py
|
class Solution:
# @param num, a list of integer
# @return a list of lists of integers
def permuteUnique(self, num):
length = len(num)
if length == 0: return []
if length == 1: return [num]
num.sort()
res = []
previousNum = None
for i in range(length):
if num[i] == previousNum: continue
previousNum = num[i]
for j in self.permuteUnique(num[:i] + num[i+1:]):
res.append([num[i]] + j)
return res
|
[
"[email protected]"
] | |
1be5bd328ca7a8254b806833e9502b03feb20333
|
e71ecfe679dd8c800e8b0960d4ba68e19401a4fc
|
/get_actual_news_from_rss_ya/webserver/common.py
|
7a3cea0336a59bdd1034fefde3a6b74098048a66
|
[] |
no_license
|
igizm0/SimplePyScripts
|
65740038d36aab50918ca5465e21c41c87713630
|
62c8039fbb92780c8a7fbb561ab4b86cc2185c3d
|
refs/heads/master
| 2021-04-12T10:48:17.769548 | 2017-06-15T18:53:04 | 2017-06-15T18:53:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,908 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# # TODO: ะบะพัััะปั ะดะปั ะฒะธะฝะดั, ะดะปั ะธัะฟัะฐะฒะปะตะฝะธั ะฟัะพะฑะปะตะผ ั ะธัะบะปััะตะฝะธัะผะธ
# # ะฟัะธ ะฒัะฒะพะดะต ัะฝะธะบะพะดะฝัั
ัะธะผะฒะพะปะพะฒ ะฒ ะบะพะฝัะพะปั ะฒะธะฝะดั
# # ะะพะทะผะพะถะฝะพ, ะฝะต ัะพะปัะบะพ ะดะปั ะฒะธะฝะดั, ะฝะพ ะธ ะดะปั ะปัะฑะพะน ะฟะปะฐััะพัะผั ััะพะธั ะธัะฟะพะปัะทะพะฒะฐัั
# # ััั ะฝะฐัััะพะนะบั -- ะผะฐะปะพ ะบะฐะบะธะต ะฟัะพะฑะปะตะผั ะผะพะณัั ะฒัััะตัะธัััั
# import sys
# if sys.platform == 'win32':
# import codecs
# sys.stdout = codecs.getwriter(sys.stdout.encoding)(sys.stdout.detach(), 'backslashreplace')
# sys.stderr = codecs.getwriter(sys.stderr.encoding)(sys.stderr.detach(), 'backslashreplace')
DB_FILE_NAME = 'database.sqlite'
def create_connect():
import sqlite3
return sqlite3.connect(DB_FILE_NAME)
def init_db():
# ะกะพะทะดะฐะฝะธะต ะฑะฐะทั ะธ ัะฐะฑะปะธัั
connect = create_connect()
try:
connect.executescript('''
CREATE TABLE IF NOT EXISTS News (
id INTEGER PRIMARY KEY,
title TEXT NOT NULL,
url TEXT NOT NULL,
interest TEXT NOT NULL,
is_read BOOLEAN NOT NULL DEFAULT 0,
CONSTRAINT news_url_unique UNIQUE (url)
);
''')
connect.commit()
# NOTE: ะัะธะผะตั, ะบะพะณะดะฐ ะฝัะถะฝะพ ะฒ ัะฐะฑะปะธัะต ะฟะพะดะฟัะฐะฒะธัั ัั
ะตะผั:
# cursor.executescript('''
# DROP TABLE Game2;
#
# CREATE TABLE IF NOT EXISTS Game2 (
# id INTEGER PRIMARY KEY,
#
# name TEXT NOT NULL,
# price TEXT DEFAULT NULL,
# modify_date TIMESTAMP DEFAULT NULL,
# kind TEXT NOT NULL,
# check_steam BOOLEAN NOT NULL DEFAULT 0
# );
#
# INSERT INTO Game2 SELECT * FROM Game;
#
# DROP TABLE Game;
# ALTER TABLE Game2 RENAME TO Game;
#
# ''')
#
# connect.commit()
finally:
connect.close()
def append_list_news(list_news: [str, str], interest: str):
connect = create_connect()
def insert_news(title, url, interest):
# ะะปั ะพััะตะธะฒะฐะฝะธั ะดัะฑะปะธะบะฐัะพะฒ
has = connect.execute("SELECT 1 FROM News WHERE url = ?", (url,)).fetchone()
if has:
return
print('ะะพะฑะฐะฒะปัั ะฝะพะฒะพััั "{}" ({})'.format(title, interest))
connect.execute("INSERT OR IGNORE INTO News (title, url, interest) VALUES (?,?,?)", (title, url, interest))
try:
for title, url in list_news:
insert_news(title, url, interest)
connect.commit()
finally:
connect.close()
def get_news_list(interest: str=None, last: int=None) -> ([str, str, str], int):
connect = create_connect()
try:
if interest:
news_list = connect.execute("SELECT title, url, interest from News where interest = ?", (interest,)).fetchall()
else:
news_list = connect.execute("SELECT title, url, interest from News").fetchall()
total = len(news_list)
# TODO: ะปัััะต ะฒััะฐัะบะธะฒะฐัั ะธะท ะฑะฐะทั ะฟะพัะปะตะดะฝะธะต <last> ะทะฐะฟะธัะตะน ัะตะผ ัะฐะบ
if last and last != -1:
news_list = news_list[-last:]
return news_list, total
finally:
connect.close()
def get_news_list_and_mark_as_read(interest: str=None, count: int=None) -> ([str, str, str], int):
connect = create_connect()
try:
if interest:
news_list = connect.execute("SELECT id, title, url, interest from News where interest = ? and is_read = 0",
(interest,)).fetchall()
else:
news_list = connect.execute("SELECT id, title, url, interest from News where is_read = 0").fetchall()
# ะัะตะณะพ ะฝะตะฟัะพัะธัะฐะฝะฝัั
ะฝะพะฒะพััะตะน
total = len(news_list)
# TODO: ะปัััะต ะฒััะฐัะบะธะฒะฐัั ะธะท ะฑะฐะทั <count> ะทะฐะฟะธัะตะน ัะตะผ ัะฐะบ
if count and count != -1:
news_list = news_list[:count]
# ะััะฐะปะพัั ะฝะตะฟัะพัะธัะฐะฝะฝัั
ะฝะพะฒะพััะตะน
total -= len(news_list)
# ะฃััะฐะฝะฐะฒะปะธะฒะฐะตะผ ะฝะพะฒะพัััะผ ัะปะฐะณ ัะพะณะพ ััะพ ะพะฝะธ ะฟัะพัะธัะฐะฝั
for _id, _, _, interest in news_list:
connect.execute("UPDATE News SET is_read = 1 WHERE id = ?", (_id,))
connect.commit()
return [(title, url, interest) for _id, title, url, interest in news_list], total
finally:
connect.close()
def reset_all_is_read():
connect = create_connect()
try:
connect.execute("UPDATE News SET is_read = 0")
connect.commit()
finally:
connect.close()
|
[
"[email protected]"
] | |
ffe374573d512fb90b5fbf3f362cd00fdf9f63c2
|
d8820bda3cfa93151255cd07290332dd50cb3ae4
|
/videos/utils.py
|
0b223f7d5fe234e867666b39f953345d0cede7cb
|
[] |
no_license
|
defance/video-app
|
249aae4f81a4b89ce4b8ddadbf43332a05beb137
|
c8c3128dbd41a651d26cba0022d80bb644eaaf8a
|
refs/heads/master
| 2021-01-23T06:20:29.072852 | 2017-06-01T07:32:00 | 2017-06-01T07:32:00 | 93,020,341 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,495 |
py
|
from __future__ import unicode_literals
from collections import defaultdict
from django.core.files.storage import DefaultStorage
from django.utils.translation import pgettext_lazy as _p, ugettext_lazy as _u
from re import search as re_search
from subprocess import check_output
from .models import Video
TIME_DICT = {
'h': (_p('duration', 'hrs'), None),
'm': (_p('duration', 'min'), None),
's': (_p('duration', 'sec'), '.2'),
}
def get_video_info(video):
"""
Execute ffprobe and extract file info in following format:
>> duration=XX:XX:XX.XXXXXXX
>> height=XXX
>> width=XXX
Return output as string.
:param video: (Video) Video to extract info about
:return: (str) Extracted info of duration, height and width
"""
return check_output([
'ffprobe', '-v', 'error',
'-show_entries', 'format=duration', '-sexagesimal',
'-show_entries', 'stream=height,width',
'-of', 'default=noprint_wrappers=1', video.video.path
])
def extract_raw_duration_info(output):
"""
Extract rew duration info from ffprobe output. Returns str containing video
duration in format:
>> hours:minutes:seconds.micros
:param output: (str) Previous output of ffprobe
:return: (str) Str with durations
"""
re_exp = 'duration=(\d*:\d*:\d*\.\d*)'
re_res = re_search(re_exp, output)
return re_res.groups()[0] if re_res is not None else None
def extract_duration_info(output):
"""
Extract duration info from ffprobe output. Returns dict containing video
duration in hours (h), minutes (m) and seconds (s).
:param output: (str) Previous output of ffprobe
:return: (dict: str => float) Dict with durations
"""
re_exp = '(?P<h>\d*):(?P<m>\d*):(?P<s>\d*.\d*)'
re_res = re_search(re_exp, output or '')
return dict(map(
lambda (key, val): (key, float(val)),
re_res.groupdict().items()
)) if re_res else {}
def get_duration_str(info):
"""
Builds duration string with duration info.
Note: it is language-dependant.
:param info: (dict: str -> float)
:return: (str)
"""
if not info:
return _u('Unknown')
base_str = "{{dur{pr}}} {{desc}}"
# This probably should be split into 3 modules, though all tests are ok
def get_desc(dur):
if not info.get(dur, 0):
return None
desc, precision = TIME_DICT.get(dur, (None, None))
if desc is None:
return None
precision = ":{}f".format(precision) if precision else ''
return base_str.format(pr=precision).format(dur=info[dur], desc=desc)
return ' '.join(filter(None, map(get_desc, ['h', 'm', 's']))) or \
_u('Unknown')
def generate_video_thumbnail(video, size=150):
"""
Executes ffmpeg to create thumbnail and saves it to static with a name of:
>> media_dir/preview/video_id.png
:param video: (Video) Video to create preview of
:param size: (int) Dimensional size (in px) of thumbnail, greater than 0,
default 150
:return: Nothing
"""
storage = DefaultStorage()
short_name = storage.get_available_name('preview/%s.png' % video.id)
filename = storage.path(short_name)
check_output([
'convert', video.video.path+'[1]',
'-resize', '{size}x{size}^'.format(size=size),
'-gravity', 'center',
'-extent', '{size}x{size}'.format(size=size),
filename
])
video.preview = short_name
def process_video(video):
try:
video.status = 'processing'
video.save()
output = get_video_info(video)
video.duration = extract_raw_duration_info(output)
generate_video_thumbnail(video)
video.status = 'ready'
video.save()
except Exception:
video.status = 'error'
video.save()
return False
return True
def process_videos(process=False):
"""
Process videos. Update its duration and create thumbnail.
:param process: (bool) Whether do the actual processing
:return: (int) Number of videos processed
"""
videos = Video.objects.filter(status__in=['loaded', 'queued'])
report = defaultdict(int)
for video in videos:
if video.status != 'queued':
video.status = 'queued'
video.save()
for video in videos:
if video.status == 'queued':
report[process_video(video) if process else True] += 1
return report
|
[
"[email protected]"
] | |
1576cbb5260f834bd650b40249ce45e746869d66
|
a1614311937bae5204e171b2a3481fb31e61a490
|
/media/codigos/32/32sol427.py
|
77e953054cf12ce07ea6e205c0bbb2eb137e0364
|
[] |
no_license
|
alexandre146/avaliar
|
8d406100ed72f10292a0580edac50ad061ad92e9
|
3daf247ca68962086592a356e013b07fa1569afe
|
refs/heads/master
| 2020-03-21T03:09:29.493919 | 2018-07-23T11:41:38 | 2018-07-23T11:41:38 | 137,883,682 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 56 |
py
|
f = float(input())
c=(f-32) * 5/9
print ("%1.2f" %c)
|
[
"[email protected]"
] | |
659fef29c9cd67a7f70fdb47f11176ca70521932
|
4c76c88f6421abb52a9e68ae48d33f32b0fcf5af
|
/google_api/run.py
|
b8dd4c7394f9e5d4177cc85794e3d903ab8a0d13
|
[
"Apache-2.0"
] |
permissive
|
aperturetechnology/starthinker
|
76ba1d8883dbcf32eff4164f57f4342d0b912b70
|
fd2d70e39f05cb29afc65b8a78ea38441e1e2b9a
|
refs/heads/master
| 2020-04-09T22:19:14.752457 | 2018-11-27T13:52:49 | 2018-11-27T13:52:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,224 |
py
|
###########################################################################
#
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from util.project import project
from util.google_api import API
from util.data import put_rows
def google_api():
if project.verbose: print 'GOOGLE_API', project.task['api'], project.task['version']
results = API(project.task).execute()
put_rows(
project.task['auth'],
project.task['out'],
'%s_%s.json' % (project.task['function'].replace('.', '_'), project.date),
results
)
if __name__ == "__main__":
project.load('google_api')
google_api()
|
[
"[email protected]"
] | |
f0a35b9b28d1264951cbe6cb5db8880991747ed4
|
d6d5a3e94b7c9762a08b9c6e986ea5c4d78bcd64
|
/problems/543/test.py
|
49f8f23776892759d236a9495951ad1a13303c91
|
[] |
no_license
|
neuxxm/leetcode
|
46a9c247982834d769731b06cb9e9587f68702a5
|
fd6c8082f81bcd9eda084b347c77fd570cfbee4a
|
refs/heads/master
| 2023-02-10T22:37:18.966470 | 2021-01-07T08:48:50 | 2021-01-07T08:48:50 | 265,777,914 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#16:18-16:21
def f(x, z):
if x == None:
return 0
l = f(x.left, z)
r = f(x.right, z)
t = l + r
if t > z[0]:
z[0] = t
return max(l, r) + 1
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
z = [0]
f(root, z)
return z[0]
|
[
"[email protected]"
] | |
ac0286e178de05ae2d4c2c5440ca018bc646c444
|
0e51d1be69b26a4bc2020db597b187b8b4784614
|
/OOP Prep Exam 22 Aug 2020/project/rooms/room.py
|
eb117f0a7b64c7b6aefe8313f1b8fd37421631cb
|
[] |
no_license
|
dplamen/04_Python_OOP
|
81dbc21095ca776d9ce034dbe9959ca4903c8d82
|
cb0880a70c903e252958587d7051527527f57af4
|
refs/heads/main
| 2023-09-03T03:15:28.313039 | 2021-11-13T16:51:27 | 2021-11-13T16:51:27 | 427,719,779 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 926 |
py
|
from project.appliances.appliance import Appliance
from project.people.child import Child
class Room:
def __init__(self, name, budget, members_count):
self.family_name = name
self.budget = budget
self.members_count = members_count
self.children = []
self.expenses = 0
@property
def expenses(self):
return self.__expenses
@expenses.setter
def expenses(self, value):
if value < 0:
raise ValueError("Expenses cannot be negative")
self.__expenses = value
def calculate_expenses(self, *args):
total_expenses = 0
for list_el in args:
for el in list_el:
if isinstance(el, Appliance):
total_expenses += el.get_monthly_expense()
elif isinstance(el, Child):
total_expenses += el.cost * 30
self.__expenses = total_expenses
|
[
"[email protected]"
] | |
aa0ed51e33bc948e2bb33149fca8c8d598babdae
|
3902cee7d59ef823a4839576f984c63452dd0d23
|
/Code/pso_v1_1.py
|
be60b6b90cb4669afa4acc58f3b006da7e8f8314
|
[
"MIT"
] |
permissive
|
zaman13/Particle-Swarm-Optimization-PSO-using-Python
|
36a6f2afcf69fdd6d790b2f3a7f11a6a00a849d9
|
0b43df807182de993be4675cf683676f43531dd3
|
refs/heads/master
| 2023-01-12T07:23:37.686017 | 2022-12-27T04:06:28 | 2022-12-27T04:06:28 | 255,481,106 | 7 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,627 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 11 17:48:18 2020
@author: Mohammad Asif Zaman
Particle swarm optimization code
- General code, would work with fitness function of any dimensions (any no. of parameters)
- Vectorized fast code. Only one for loop is used to go over the iterations. Calculations
over all the dimensions and particles are done using matrix operations.
- One function call per iteration.
Tested in python 2.7.
v1_1: Added average fitness and average pbest plots for checking convergence
"""
from __future__ import print_function
import time
import math
import numpy as np
import pylab as py
py.rcParams.update({'font.size': 14})
# Control parameters
w = 0.5 # Intertial weight. In some variations, it is set to vary with iteration number.
c1 = 2.0 # Weight of searching based on the optima found by a particle
c2 = 2.0 # Weight of searching based on the optima found by the swarm
v_fct = 1 # Velocity adjust factor. Set to 1 for standard PSO.
Np = 40 # population size (number of particles)
D = 4 # dimension (= no. of parameters in the fitness function)
max_iter = 100 # maximum number of iterations
xL = np.zeros(D) - 4 # lower bound (does not need to be homogeneous)
xU = np.zeros(D) + 4 # upper bound (does not need to be homogeneous)
# Fitness function. The code maximizes the value of the fitness function
def fitness(x):
# x is a matrix of size D x Np
# The position of the entire swarmp is inputted at once.
# Thus, one function call evaluates the fitness value of the entire swarm
# F is a vector of size Np. Each element represents the fitness value of each particle in the swarm
F_sphere = 2.0 - np.sum(np.multiply(x,x),0) # modified sphere function
return F_sphere
pbest_val = np.zeros(Np) # Personal best fintess value. One pbest value per particle.
gbest_val = np.zeros(max_iter) # Global best fintess value. One gbest value per iteration (stored).
pbest = np.zeros((D,Np)) # pbest solution
gbest = np.zeros(D) # gbest solution
gbest_store = np.zeros((D,max_iter)) # storing gbest solution at each iteration
pbest_val_avg_store = np.zeros(max_iter)
fitness_avg_store = np.zeros(max_iter)
x = np.random.rand(D,Np) # Initial position of the particles
v = np.zeros((D,Np)) # Initial velocity of the particles
# Setting the initial position of the particles over the given bounds [xL,xU]
for m in range(D):
x[m,:] = xL[m] + (xU[m]-xL[m])*x[m,:]
# Initial evaluations (for iteration = 0)
# Function call. Evaluates the fitness of the initial swarms
fit = fitness(x) # vector of size Np
pbest_val = np.copy(fit) # initial personal best = initial fitness values. Vector of size Np
pbest = np.copy(x) # initial pbest solution = initial position. Matrix of size D x Np
# Calculating gbest_val and gbest. Note that gbest is the best solution within pbest
ind = np.argmax(pbest_val) # index where pbest_val is maximum.
gbest_val[0] = np.copy(pbest_val[ind]) # set initial gbest_val
gbest = np.copy(pbest[:,ind])
pbest_val_avg_store[0] = np.mean(pbest_val)
fitness_avg_store[0] = np.mean(fit)
print("Iter. =", 0, ". gbest_val = ", gbest_val[0])
print("gbest_val = ",gbest_val[0])
# Loop over the generations
for iter in range(1,max_iter):
r1 = np.random.rand(D,Np) # random numbers [0,1], matrix D x Np
r2 = np.random.rand(D,Np) # random numbers [0,1], matrix D x Np
v_global = np.multiply(((x.transpose()-gbest).transpose()),r2)*c2*(-1.0) # velocity towards global optima
v_local = np.multiply((pbest- x),r1)*c1 # velocity towards local optima (pbest)
v = w*v + (v_local + v_global) # velocity update
x = x + v*v_fct # position update
fit = fitness(x) # fitness function call (once per iteration). Vector Np
# pbest and pbest_val update
ind = np.argwhere(fit > pbest_val) # indices where current fitness value set is greater than pbset
pbest_val[ind] = np.copy(fit[ind]) # update pbset_val at those particle indices where fit > pbest_val
pbest[:,ind] = np.copy(x[:,ind]) # update pbest for those particle indices where fit > pbest_val
# gbest and gbest_val update
ind2 = np.argmax(pbest_val) # index where the fitness is maximum
gbest_val[iter] = np.copy(pbest_val[ind2]) # store gbest value at each iteration
gbest = np.copy(pbest[:,ind2]) # global best solution, gbest
gbest_store[:,iter] = np.copy(gbest) # store gbest solution
pbest_val_avg_store[iter] = np.mean(pbest_val)
fitness_avg_store[iter] = np.mean(fit)
print("Iter. =", iter, ". gbest_val = ", gbest_val[iter]) # print iteration no. and best solution at each iteration
# Plotting
py.close('all')
py.figure(1)
py.plot(gbest_val,label = 'gbest_val')
py.plot(pbest_val_avg_store, label = 'Avg. pbest')
py.plot(fitness_avg_store, label = 'Avg. fitness')
py.legend()
py.xlabel('iterations')
py.ylabel('fitness, gbest_val')
py.figure(2)
for m in range(D):
py.plot(gbest_store[m,:],label = 'D = ' + str(m+1))
py.legend()
py.xlabel('iterations')
py.ylabel('Best solution, gbest[:,iter]')
|
[
"[email protected]"
] | |
337ede14a2180d4c5958f83d5e10ef2915d5266b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/295/66182/submittedfiles/testes.py
|
3d79ec34943c90627f64c66fde2d551806cdeeb5
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 163 |
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
print("Marlon Anderson Leandro de Lima Filho")
print("19")
print(11+1037)
print(9*35+160)/5
print("2.356,1925")
|
[
"[email protected]"
] | |
294e700bd5ebe76fe25fc3abeb124fb10fea0aae
|
123e2e28017973eefedaffb273cb3a5164f582c5
|
/tests/integrations/helpers.py
|
68f57836c2a9a1c3fe727dc6531340c2feed0752
|
[
"MIT"
] |
permissive
|
pythonindia/junction
|
ef4c0bf64f8c396edd2407f6d91444ab60a36b02
|
208d1757bf39c4727cf78b52cd2285e902eec84d
|
refs/heads/master
| 2023-08-17T09:30:50.961028 | 2023-08-10T06:44:34 | 2023-08-10T06:44:34 | 27,966,694 | 209 | 226 |
MIT
| 2023-08-10T06:44:35 | 2014-12-13T16:40:17 |
Python
|
UTF-8
|
Python
| false | false | 241 |
py
|
# -*- coding: utf-8 -*-
def assert_template_used(response, template_name):
res = False
for template in response.templates:
if template.name == template_name:
res = True
break
assert res is True
|
[
"[email protected]"
] | |
92531f86d7ff93e73ab54d1ab9712ecb3d1b4f22
|
67cf6d0e91253107a7d3a3dd879a31dcc1f5b36f
|
/0x0F-python-object_relational_mapping/3-my_safe_filter_states.py
|
0bf2a4dcb156ae06009396d036ad114a99052c05
|
[] |
no_license
|
nzomobrian/holbertonschool-higher_level_programming
|
dd1646a2b8ccf21ecf41c39efbe8f7dac2771065
|
908ec393c1a3591dde500c10e8eb9c73e35d57f8
|
refs/heads/master
| 2023-03-19T00:16:15.893949 | 2020-05-15T00:55:06 | 2020-05-15T00:55:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 498 |
py
|
#!/usr/bin/python3
# Lists all states that match the argument given without injection
import sys
import MySQLdb
if __name__ == "__main__":
db = MySQLdb.connect(host="localhost", port=3306, user=sys.argv[1],
passwd=sys.argv[2], db=sys.argv[3])
cur = db.cursor()
cur.execute("SELECT id, name FROM states WHERE BINARY name=%s \
ORDER BY id", (sys.argv[4],))
states = cur.fetchall()
for state in states:
print((state[0], state[1]))
|
[
"[email protected]"
] | |
14d2fd5064686d8fcb86c06141f63c75e0dbfe99
|
8a82a83655f118208692e55d7804d9fa480ad4b6
|
/src/dg/python_nlp/homework/ch09.py
|
df71319e81e7ee778a578cc8328adc2ac31535aa
|
[] |
no_license
|
xenron/sandbox-da-python
|
0814159da9a91923e4b66c5e40057e381f765e96
|
ab8f1c0d57fdc6006355f613012b84165068c315
|
refs/heads/master
| 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,933 |
py
|
# -*- coding: utf-8 -*-
from nltk.corpus import wordnet as wn
import nltk
from nltk.corpus import *
from matplotlib import pylab
from nltk import word_tokenize
import re
import jieba
import jieba.posseg
import jieba.analyse
from nltk.corpus import brown
import nltk
#2. โ Write a tag pattern to match noun phrases containing plural head nouns, e.g.,
#many/JJ researchers/NNS, two/CD weeks/NNS, both/DT new/JJ positions/NNS. Try
#to do this by generalizing the tag pattern that handled singular noun phrases.
#2. โๅไธไธชๆ ่ฎฐๆจกๅผๅน้
ๅ
ๅซๅคๆฐไธญๅฟๅ่ฏๅจๅ
็ๅ่ฏ็ญ่ฏญ๏ผๅฆmany/JJ researchers
#/NNS, two/CD weeks/NNS, both/DT new/JJ positions/NNSใ้่ฟๆณๅๅค็ๅ
#ๆฐๅ่ฏ็ญ่ฏญ็ๆ ่ฎฐๆจกๅผ๏ผๅฐ่ฏๅ่ฟไธชใ
textchunk = [("many", "JJ"), ("researchers", "NNS"), ("two", "CD"), ("weeks", "NNS"), ("both","DT"), ("new", "JJ"), ("positions", "NNS")]
corpus = nltk.RegexpParser("NP:{<DT>?<CD>?<JJ>*<NNS>}")
result = corpus.parse(textchunk)
print result
result.draw()
#6. โ Write one or more tag patterns to handle coordinated noun phrases, e.g., July/
#NNP and/CC August/NNP, all/DT your/PRP$ managers/NNS and/CC supervisors/NNS,
#company/NN courts/NNS and/CC adjudicators/NNS.
#6. โๅไธไธชๆๅคไธชๆ ่ฎฐๆจกๅผๅค็ๆ่ฟๆฅ่ฏ็ๅ่ฏ็ญ่ฏญ๏ผๅฆ๏ผJuly/NNP and/CC August
#/NNP๏ผall/DT your/PRP$ managers/NNS and/CC supervisors/NNS๏ผcompa
#ny/NN courts/NNS and/CC adjudicators/NNSใ
textchunk = [("July","NNP"), ("and","CC"), ("August","NNP"), ("all", "DT"), ("your", "PRP$"), ("managers", "NNS"), ("and", "CC"), ("supervisors", "NNS"), ("company","NN"), ("courts","NNS"), ("and","CC"), ("adjudicators","NNS")]
corpus = nltk.RegexpParser(" Coordinated noun: {<NNP><CC><NNP>|<DT><PRP\$><NNS><CC><NNS>|<NN><NNS><CC><NNS>}")
result = corpus.parse(textchunk)
print result
result.draw()
#7. โ Carry out the following evaluation tasks for any of the chunkers you have developed
#earlier. (Note that most chunking corpora contain some internal inconsistencies,
#such that any reasonable rule-based approach will produce errors.)
#a. Evaluate your chunker on 100 sentences from a chunked corpus, and report
#the precision, recall, and F-measure.
#b. Use the chunkscore.missed() and chunkscore.incorrect() methods to identify
#the errors made by your chunker. Discuss.
#c. Compare the performance of your chunker to the baseline chunker discussed
#in the evaluation section of this chapter.
#7. โ็จไปปไฝไฝ ไนๅๅทฒ็ปๅผๅ็ๅๅๅจๆง่กไธๅ่ฏไผฐไปปๅกใ๏ผ่ฏทๆณจๆ๏ผๅคงๅคๆฐๅๅ่ฏญๆๅบๅ
#ๅซไธไบๅ
้จ็ไธไธ่ด๏ผไปฅ่ณไบไปปไฝๅ็็ๅบไบ่งๅ็ๆนๆณ้ฝๅฐไบง็้่ฏฏใ๏ผ
#a. ๅจๆฅ่ชๅๅ่ฏญๆๅบ็100 ไธชๅฅๅญไธ่ฏไผฐไฝ ็ๅๅๅจ๏ผๆฅๅ็ฒพๅบฆใๅฌๅ็ๅF ้ๅบฆใ
#b. ไฝฟ็จchunkscore.missed()ๅchunkscore.incorrect()ๆนๆณ่ฏๅซไฝ ็ๅๅๅจ็
#้่ฏฏ๏ผๅนถ่ฎจ่ฎบๅฎใ
#c. ไธๆฌ็ซ ็่ฏไผฐ้จๅ่ฎจ่ฎบ็ๅบๅๅๅๅจๆฏ่พไฝ ็ๅๅๅจ็ๆง่ฝใ
from nltk.corpus import conll2000
test_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP'])[:100]
print len(test_sents)
# ไธไฝฟ็จ่ฏญๆณ่งๅ็ๅๅฟซๅจ
cp = nltk.RegexpParser("")
print cp.evaluate(test_sents)
cp = nltk.RegexpParser('CHUNK: {<V.*> <TO> <V.*>}')
print cp.evaluate(test_sents)
cp = nltk.RegexpParser('NP: {<NN>+}')
print cp.evaluate(test_sents)
grammar = r"NP: {<[CDJNP].*>+}"
cp = nltk.RegexpParser(grammar)
print cp.evaluate(test_sents)
#ไฝฟ็จunigramๆ ๆณจๅจๅฏนๅ่ฏ็ญ่ฏญๅๅ
class UnigramChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = nltk.UnigramTagger(train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)
in zip(sentence, chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
test_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP'])
train_sents = conll2000.chunked_sents('train.txt', chunk_types=['NP'])
unigram_chunker = UnigramChunker(train_sents)
print unigram_chunker.evaluate(test_sents)
#17. โ An n-gram chunker can use information other than the current part-of-speech
#tag and the n-1 previous chunk tags. Investigate other models of the context, such
#as the n-1 previous part-of-speech tags, or some combination of previous chunk
#tags along with previous and following part-of-speech tags.
#17. โไธไธชn-gram ๅๅๅจๅฏไปฅไฝฟ็จ้คๅฝๅ่ฏๆงๆ ่ฎฐๅn-1 ไธชๅ้ข็ๅ็ๆ ่ฎฐไปฅๅคๅ
ถไปไฟกๆฏใ
#่ฐๆฅๅ
ถไป็ไธไธๆๆจกๅ๏ผๅฆn-1 ไธชๅ้ข็่ฏๆงๆ ่ฎฐ๏ผๆไธไธชๅๅ้ขๅๆ ่ฎฐ่ฟๅๅ้ขๅๅ
#้ข็่ฏๆงๆ ่ฎฐ็็ปๅใ
|
[
"[email protected]"
] | |
88003732df9aca53f2788ef6179f052a5afd181e
|
52585c8d95cef15199c18ba1a76899d2c31329f0
|
/01Learn Python The Hard Way/11_input.py
|
f05f4e063f81a0eb96aa73d24c9841a108683cd7
|
[] |
no_license
|
greatabel/PythonRepository
|
c7a952257303a21083ed7d535274c339362bd126
|
836fcdd3f5c1b150122302685104fe51b5ebe1a3
|
refs/heads/master
| 2023-08-30T15:56:05.376391 | 2023-08-26T03:34:14 | 2023-08-26T03:34:14 | 29,392,599 | 33 | 6 | null | 2023-02-14T13:33:21 | 2015-01-17T13:54:58 |
Python
|
UTF-8
|
Python
| false | false | 428 |
py
|
print "How old are you?",
age=raw_input()
print "how tall are your?",
height=raw_input()
print "how much do you weight?",
weight=raw_input()
print "So, you're %r old, %r tall and %r heavy." % (
age, height, weight)
print "How old are you?"
age=raw_input()
print "how tall are your?"
height=raw_input()
print "how much do you weight?"
weight=raw_input()
print "So, you're %r old, %r tall and %r heavy." % (
age, height, weight)
|
[
"[email protected]"
] | |
0c674243effa19ac035cb6a58cd3db0be2c5155b
|
25d2afe5d12fe58a97da7b51e23fdc55929e38f5
|
/create_nt_error_subset_results.py
|
1ebafa06051b3e413a1190b4c64414c17a83fbab
|
[] |
no_license
|
apmoore1/tdsa_comparisons
|
071396efe0c5e0bad297119d2ce48bf0c1cbb42f
|
ba613afece15239e6a38f277c455a035739f0b2d
|
refs/heads/master
| 2021-06-23T16:00:49.803589 | 2021-05-25T09:32:53 | 2021-05-25T09:32:53 | 225,565,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,623 |
py
|
from pathlib import Path
import pandas as pd
from target_extraction.data_types import TargetTextCollection
from target_extraction.analysis.sentiment_error_analysis import (ERROR_SPLIT_SUBSET_NAMES,
error_split_df, PLOT_SUBSET_ABBREVIATION,
error_analysis_wrapper,
subset_name_to_error_split)
from target_extraction.analysis.sentiment_metrics import accuracy
from target_extraction.analysis.util import add_metadata_to_df
def parse_path(path_string: str) -> Path:
path_string = Path(path_string).resolve()
return path_string
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('results_dir', type=parse_path,
help='Directory that contain results for each dataset')
parser.add_argument('save_fp', type=parse_path,
help='File path to save the results too')
args = parser.parse_args()
results_dir = args.results_dir
save_fp = args.save_fp
# Get the data
data_splits = ['test', 'val']
dataset_names = ['election', 'laptop', 'restaurant']
index_keys = ['prediction key', 'run number']
tssr_func = error_analysis_wrapper('TSSR')
ds_func = error_analysis_wrapper('DS')
nt_func = error_analysis_wrapper('NT')
all_relevant_error_funcs = [tssr_func, ds_func, nt_func]
splits = ['test', 'val']
dataset_names = ['Laptop', 'Restaurant', 'Election']
all_dfs = []
relevant_prediction_keys = ['predicted_target_sentiment_AE_GloVe_None_None',
'predicted_target_sentiment_CNN_GloVe_None_None',
'predicted_target_sentiment_IAN_GloVe_None_None',
'predicted_target_sentiment_TDLSTM_GloVe_None_None']
nt_error_names = ERROR_SPLIT_SUBSET_NAMES['NT']
ds_error_names = ERROR_SPLIT_SUBSET_NAMES['DS']
tssr_error_names = ERROR_SPLIT_SUBSET_NAMES['TSSR']
reduced_collection_subset_names = ds_error_names + tssr_error_names
nt_split_subsets = {'NT': ERROR_SPLIT_SUBSET_NAMES['NT']}
import time
overall_time = time.time()
for dataset_name in dataset_names:
print(f'Dataset {dataset_name}')
for split in splits:
one_time = time.time()
print(f'Data Split {split}')
data_fp = Path(results_dir, f'{dataset_name.lower()}_dataset',
f'{split}.json')
dataset = TargetTextCollection.load_json(data_fp)
for error_func in all_relevant_error_funcs:
error_func(None, dataset, True)
for reduced_collection_subset_name in reduced_collection_subset_names:
temp_df = error_split_df(None, dataset, relevant_prediction_keys,
'target_sentiments', nt_split_subsets, accuracy,
{'ignore_label_differences': True},
include_dataset_size=True,
collection_subsetting=[[reduced_collection_subset_name]],
table_format_return=False)
temp_df = add_metadata_to_df(temp_df, dataset,
'predicted_target_sentiment_key')
temp_df['reduced collection subset'] = reduced_collection_subset_name
temp_df['Dataset'] = dataset_name
temp_df['Split'] = split.capitalize()
all_dfs.append(temp_df)
print(time.time() - one_time)
print(f'total time {time.time() - overall_time}')
all_dfs = pd.concat(all_dfs, 0, ignore_index=True)
temp_dfs = all_dfs.copy(deep=True)
all_subset_names = [name for subset_names in ERROR_SPLIT_SUBSET_NAMES.values()
for name in subset_names]
temp_dfs['Reduced Error Split'] = temp_dfs.apply(lambda x: subset_name_to_error_split(x['reduced collection subset']), 1)
temp_dfs['Metric'] = temp_dfs['Metric'] * 100
temp_dfs = temp_dfs.rename(columns={'Metric': 'Accuracy'})
temp_dfs['NT'] = temp_dfs.apply(lambda x: PLOT_SUBSET_ABBREVIATION[x['subset names']], 1)
temp_dfs['Subset By'] = temp_dfs.apply(lambda x: PLOT_SUBSET_ABBREVIATION[x['reduced collection subset']], 1)
temp_dfs = temp_dfs.drop(columns=['reduced collection subset', 'subset names'])
temp_dfs.to_csv(save_fp, sep='\t')
|
[
"[email protected]"
] | |
f9bda7505e7d23ead634575aa13665b1f86033de
|
c619ea6c1663c6ba0614b4dd63806cda5489adee
|
/wwtool/datasets/utils.py
|
40da33f74a28ed219dced7e76fd44b7122a94162
|
[] |
no_license
|
ZhangRuixiang-WHU/wwtool
|
35107e36fc4cce892c6d0c096ee90cbf8e1eeb97
|
be87af3ad49a3befb331b2530e0cfdd5dd479a4a
|
refs/heads/master
| 2020-12-02T15:26:22.383469 | 2019-12-25T15:02:42 | 2019-12-25T15:02:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,558 |
py
|
import os
import numpy as np
import shutil
import wwtool
def shuffle_dataset(origin_dataset_dir, trainval_dir, test_dir, trainval_rate=0.8, image_format='.png', label_format='.txt', seed=0):
"""Generate trainval and test sets from origin set by copying files randomly.
Arguments:
origin_dataset_dir {str} -- path of origin dataset, contains `images` and `labels` folds
trainval_dir {str} -- path of trainval set, contains `images` and `labels` folds
test_dir {str} -- path of test set, contains `images` and `labels` folds
seed {int} -- seed of random function
Returns:
None
"""
np.random.seed(seed)
src_label_path = os.path.join(origin_dataset_dir, 'labels')
src_image_path = os.path.join(origin_dataset_dir, 'images')
trainval_dst_label_path = os.path.join(trainval_dir, 'labels')
wwtool.mkdir_or_exist(trainval_dst_label_path)
trainval_dst_image_path = os.path.join(trainval_dir, 'images')
wwtool.mkdir_or_exist(trainval_dst_image_path)
test_dst_label_path = os.path.join(test_dir, 'labels')
wwtool.mkdir_or_exist(test_dst_label_path)
test_dst_image_path = os.path.join(test_dir, 'images')
wwtool.mkdir_or_exist(test_dst_image_path)
file_names = [label_file.split('.txt')[0] for label_file in os.listdir(src_label_path)]
file_names = sorted(file_names)
np.random.shuffle(file_names)
trainval_file_names = file_names[0 : int(len(file_names) * trainval_rate)]
test_file_names = file_names[int(len(file_names) * trainval_rate):]
for trainval_file_name in trainval_file_names:
print("From {} to {}.".format(os.path.join(src_label_path, trainval_file_name), os.path.join(trainval_dst_label_path, trainval_file_name)))
shutil.copy(os.path.join(src_label_path, trainval_file_name + label_format), os.path.join(trainval_dst_label_path, trainval_file_name + label_format))
shutil.copy(os.path.join(src_image_path, trainval_file_name + image_format), os.path.join(trainval_dst_image_path, trainval_file_name + image_format))
for test_file_name in test_file_names:
print("From {} to {}.".format(os.path.join(src_label_path, test_file_name), os.path.join(test_dst_label_path, test_file_name)))
shutil.copy(os.path.join(src_label_path, test_file_name + label_format), os.path.join(test_dst_label_path, test_file_name + label_format))
shutil.copy(os.path.join(src_image_path, test_file_name + image_format), os.path.join(test_dst_image_path, test_file_name + image_format))
|
[
"[email protected]"
] | |
c96aa409b3a94b535c64d972fe0c0378789259a7
|
3e37c4e8b5c9a432183da2629484decd0e6fc088
|
/KiBuzzard/dialog/dialog_text_base.py
|
f008fbe0b24f56bfc9b0d70cf83cd44bae0009bc
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
robin7331/KiBuzzard
|
c1c1f58af04a7f8deee1314f4c32293895690737
|
079327a3c2f152fef4566aee0fe07b90e72de856
|
refs/heads/main
| 2023-04-03T00:52:23.941163 | 2021-04-13T00:12:59 | 2021-04-13T00:12:59 | 356,836,698 | 0 | 0 |
MIT
| 2021-04-11T10:26:57 | 2021-04-11T10:26:56 | null |
UTF-8
|
Python
| false | false | 9,696 |
py
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version 3.9.0 Apr 11 2021)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
from .compat import DialogShim
import wx
import wx.xrc
import wx.stc
import gettext
_ = gettext.gettext
###########################################################################
## Class DIALOG_TEXT_BASE
###########################################################################
class DIALOG_TEXT_BASE ( DialogShim ):
def __init__( self, parent ):
DialogShim.__init__ ( self, parent, id = wx.ID_ANY, title = _(u"KiBuzzard Text Properties"), pos = wx.DefaultPosition, size = wx.Size( -1,-1 ), style = wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER|wx.SYSTEM_MENU )
self.SetSizeHints( wx.Size( -1,-1 ), wx.DefaultSize )
bMainSizer = wx.BoxSizer( wx.VERTICAL )
m_MultiLineSizer = wx.BoxSizer( wx.VERTICAL )
self.textLabel = wx.StaticText( self, wx.ID_ANY, _(u"Text:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.textLabel.Wrap( -1 )
m_MultiLineSizer.Add( self.textLabel, 0, wx.RIGHT|wx.LEFT, 5 )
self.m_MultiLineText = wx.stc.StyledTextCtrl( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)
self.m_MultiLineText.SetUseTabs ( True )
self.m_MultiLineText.SetTabWidth ( 4 )
self.m_MultiLineText.SetIndent ( 4 )
self.m_MultiLineText.SetTabIndents( False )
self.m_MultiLineText.SetBackSpaceUnIndents( False )
self.m_MultiLineText.SetViewEOL( False )
self.m_MultiLineText.SetViewWhiteSpace( False )
self.m_MultiLineText.SetMarginWidth( 2, 0 )
self.m_MultiLineText.SetIndentationGuides( True )
self.m_MultiLineText.SetReadOnly( False );
self.m_MultiLineText.SetMarginWidth( 1, 0 )
self.m_MultiLineText.SetMarginWidth ( 0, 0 )
self.m_MultiLineText.MarkerDefine( wx.stc.STC_MARKNUM_FOLDER, wx.stc.STC_MARK_BOXPLUS )
self.m_MultiLineText.MarkerSetBackground( wx.stc.STC_MARKNUM_FOLDER, wx.BLACK)
self.m_MultiLineText.MarkerSetForeground( wx.stc.STC_MARKNUM_FOLDER, wx.WHITE)
self.m_MultiLineText.MarkerDefine( wx.stc.STC_MARKNUM_FOLDEROPEN, wx.stc.STC_MARK_BOXMINUS )
self.m_MultiLineText.MarkerSetBackground( wx.stc.STC_MARKNUM_FOLDEROPEN, wx.BLACK )
self.m_MultiLineText.MarkerSetForeground( wx.stc.STC_MARKNUM_FOLDEROPEN, wx.WHITE )
self.m_MultiLineText.MarkerDefine( wx.stc.STC_MARKNUM_FOLDERSUB, wx.stc.STC_MARK_EMPTY )
self.m_MultiLineText.MarkerDefine( wx.stc.STC_MARKNUM_FOLDEREND, wx.stc.STC_MARK_BOXPLUS )
self.m_MultiLineText.MarkerSetBackground( wx.stc.STC_MARKNUM_FOLDEREND, wx.BLACK )
self.m_MultiLineText.MarkerSetForeground( wx.stc.STC_MARKNUM_FOLDEREND, wx.WHITE )
self.m_MultiLineText.MarkerDefine( wx.stc.STC_MARKNUM_FOLDEROPENMID, wx.stc.STC_MARK_BOXMINUS )
self.m_MultiLineText.MarkerSetBackground( wx.stc.STC_MARKNUM_FOLDEROPENMID, wx.BLACK)
self.m_MultiLineText.MarkerSetForeground( wx.stc.STC_MARKNUM_FOLDEROPENMID, wx.WHITE)
self.m_MultiLineText.MarkerDefine( wx.stc.STC_MARKNUM_FOLDERMIDTAIL, wx.stc.STC_MARK_EMPTY )
self.m_MultiLineText.MarkerDefine( wx.stc.STC_MARKNUM_FOLDERTAIL, wx.stc.STC_MARK_EMPTY )
self.m_MultiLineText.SetSelBackground( True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT ) )
self.m_MultiLineText.SetSelForeground( True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT ) )
m_MultiLineSizer.Add( self.m_MultiLineText, 1, wx.EXPAND|wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bMainSizer.Add( m_MultiLineSizer, 20, wx.EXPAND|wx.ALL, 10 )
m_SingleLineSizer = wx.BoxSizer( wx.VERTICAL )
self.m_PreviewLabel = wx.StaticText( self, wx.ID_ANY, _(u"Preview:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_PreviewLabel.Wrap( -1 )
m_SingleLineSizer.Add( self.m_PreviewLabel, 0, wx.LEFT|wx.RIGHT, 5 )
self.m_PreviewPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
m_SingleLineSizer.Add( self.m_PreviewPanel, 1, wx.ALL|wx.EXPAND, 5 )
bMainSizer.Add( m_SingleLineSizer, 20, wx.EXPAND|wx.BOTTOM|wx.RIGHT|wx.LEFT, 10 )
fgSizerSetup = wx.FlexGridSizer( 0, 5, 4, 0 )
fgSizerSetup.AddGrowableCol( 1 )
fgSizerSetup.AddGrowableCol( 4 )
fgSizerSetup.SetFlexibleDirection( wx.BOTH )
fgSizerSetup.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_LayerLabel = wx.StaticText( self, wx.ID_ANY, _(u"Font:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_LayerLabel.Wrap( -1 )
fgSizerSetup.Add( self.m_LayerLabel, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5 )
m_FontComboBoxChoices = []
self.m_FontComboBox = wx.ComboBox( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, m_FontComboBoxChoices, wx.CB_READONLY )
fgSizerSetup.Add( self.m_FontComboBox, 0, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND, 5 )
fgSizerSetup.Add( ( 0, 0), 0, wx.RIGHT|wx.LEFT, 40 )
self.m_CapLeftLabel = wx.StaticText( self, wx.ID_ANY, _(u"Cap Left:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_CapLeftLabel.Wrap( -1 )
fgSizerSetup.Add( self.m_CapLeftLabel, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5 )
m_JustifyChoice1Choices = [ wx.EmptyString, _(u"["), _(u"("), _(u"/"), _(u"\\"), _(u"<"), _(u">") ]
self.m_JustifyChoice1 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_JustifyChoice1Choices, 0 )
self.m_JustifyChoice1.SetSelection( 0 )
fgSizerSetup.Add( self.m_JustifyChoice1, 0, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.RIGHT, 3 )
self.m_SizeYLabel = wx.StaticText( self, wx.ID_ANY, _(u"Height:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_SizeYLabel.Wrap( -1 )
fgSizerSetup.Add( self.m_SizeYLabel, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.LEFT, 5 )
self.m_SizeYCtrl = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_PROCESS_ENTER )
self.m_SizeYCtrl.SetMaxLength( 0 )
fgSizerSetup.Add( self.m_SizeYCtrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND, 5 )
self.m_SizeYUnits = wx.StaticText( self, wx.ID_ANY, _(u"unit"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_SizeYUnits.Wrap( -1 )
fgSizerSetup.Add( self.m_SizeYUnits, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.LEFT, 5 )
self.m_staticText11 = wx.StaticText( self, wx.ID_ANY, _(u"Cap Right:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText11.Wrap( -1 )
fgSizerSetup.Add( self.m_staticText11, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.LEFT, 5 )
m_JustifyChoiceChoices = [ wx.EmptyString, _(u"]"), _(u")"), _(u"/"), _(u"\\"), _(u">"), _(u"<") ]
self.m_JustifyChoice = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_JustifyChoiceChoices, 0 )
self.m_JustifyChoice.SetSelection( 0 )
fgSizerSetup.Add( self.m_JustifyChoice, 0, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.RIGHT, 3 )
self.m_ThicknessLabel = wx.StaticText( self, wx.ID_ANY, _(u"Thickness:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_ThicknessLabel.Wrap( -1 )
fgSizerSetup.Add( self.m_ThicknessLabel, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.LEFT, 5 )
self.m_ThicknessCtrl = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_PROCESS_ENTER )
self.m_ThicknessCtrl.SetMaxLength( 0 )
self.m_ThicknessCtrl.Enable( False )
fgSizerSetup.Add( self.m_ThicknessCtrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND, 2 )
self.m_ThicknessUnits = wx.StaticText( self, wx.ID_ANY, _(u"unit"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_ThicknessUnits.Wrap( -1 )
fgSizerSetup.Add( self.m_ThicknessUnits, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.LEFT, 5 )
fgSizerSetup.Add( ( 0, 0), 1, wx.EXPAND, 5 )
fgSizerSetup.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bMainSizer.Add( fgSizerSetup, 0, wx.EXPAND|wx.RIGHT|wx.LEFT, 10 )
bMainSizer.Add( ( 0, 0), 0, wx.TOP, 5 )
self.m_staticline = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
bMainSizer.Add( self.m_staticline, 0, wx.EXPAND|wx.TOP|wx.RIGHT|wx.LEFT, 10 )
lowerSizer = wx.BoxSizer( wx.HORIZONTAL )
lowerSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
m_sdbSizer = wx.StdDialogButtonSizer()
self.m_sdbSizerOK = wx.Button( self, wx.ID_OK )
m_sdbSizer.AddButton( self.m_sdbSizerOK )
self.m_sdbSizerCancel = wx.Button( self, wx.ID_CANCEL )
m_sdbSizer.AddButton( self.m_sdbSizerCancel )
m_sdbSizer.Realize();
lowerSizer.Add( m_sdbSizer, 0, wx.ALL, 5 )
bMainSizer.Add( lowerSizer, 0, wx.EXPAND, 5 )
self.SetSizer( bMainSizer )
self.Layout()
bMainSizer.Fit( self )
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_INIT_DIALOG, self.OnInitDlg )
self.m_SizeYCtrl.Bind( wx.EVT_TEXT_ENTER, self.OnOkClick )
self.m_ThicknessCtrl.Bind( wx.EVT_TEXT_ENTER, self.OnOkClick )
self.m_sdbSizerOK.Bind( wx.EVT_BUTTON, self.OnOkClick )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def OnInitDlg( self, event ):
pass
def OnOkClick( self, event ):
pass
|
[
"[email protected]"
] | |
157478c0e27dedf77864a8acc05f49de4067093d
|
40f4908483b98fc4f370ff4f2d520e1284d045b3
|
/phase02/immortals_repo/harness/pymmortals/generated/com/securboration/immortals/ontology/fm/feature/featureselectioncriterion.py
|
9cab2e071f0f03b3b5559b965410bbe4239d1247
|
[] |
no_license
|
TF-185/bbn-immortals
|
7f70610bdbbcbf649f3d9021f087baaa76f0d8ca
|
e298540f7b5f201779213850291337a8bded66c7
|
refs/heads/master
| 2023-05-31T00:16:42.522840 | 2019-10-24T21:45:07 | 2019-10-24T21:45:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 261 |
py
|
from enum import Enum
# noinspection PyPep8Naming
class FeatureSelectionCriterion(Enum):
SELECT_ALL = 'SELECT_ALL'
SELECT_ZERO_OR_MORE = 'SELECT_ZERO_OR_MORE'
SELECT_ONE_OR_MORE = 'SELECT_ONE_OR_MORE'
SELECT_EXACTLY_ONE = 'SELECT_EXACTLY_ONE'
|
[
"[email protected]"
] | |
434de5a96c6a7a561dffb0b43c686a35b6c46e0e
|
d7b89e6090759d242f0afc8fd5d9f5200c17371a
|
/20190904/BOJ_12100_kyeong.py
|
384dd43ab88deea6efbc4bf457ad2d902a7dedd8
|
[] |
no_license
|
JISU-JEONG/algorithm-
|
545c5f758d2ca15d2d59a706ab75be8fb71e39a6
|
02ff7df63dd8a8518005d37a2edec67d158ae6db
|
refs/heads/master
| 2020-09-13T14:15:59.919347 | 2019-11-19T23:57:40 | 2019-11-19T23:57:40 | 222,812,910 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,745 |
py
|
import sys
sys.stdin = open('12100.txt')
from pprint import pprint
from itertools import product as pd
from copy import deepcopy
def rotate(a):
n = len(a)
copy = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
copy[j][n-1-i] = a[i][j]
return copy
def push(a):
for i in range(N):
for j in range(N):
if a[i][j] == 0:
a[i].insert(a[i].pop(j),0)
def add(a):
for i in range(N):
for j in range(N-1,0,-1):
if a[i][j] ==a[i][j-1]:
a[i][j] *= 2
a[i].pop(j-1)
a[i].insert(0,0)
def mymax(a):
mxtmp = 0
for i in range(N):
if mxtmp < max(a[i]):
mxtmp = max(a[i])
return mxtmp
def solve(S, board):
if S == 1:
board = rotate(board)
push(board)
add(board)
board = rotate(board)
board = rotate(board)
board = rotate(board)
elif S == 2:
board = rotate(board)
board = rotate(board)
board = rotate(board)
push(board)
add(board)
board = rotate(board)
elif S == 3:
board = rotate(board)
board = rotate(board)
push(board)
add(board)
board = rotate(board)
board = rotate(board)
else:
push(board)
add(board)
return boardโ
# 1,u 2,d,3,l
N = int(input())
board = []
tmp = []
S = 1
for i in range(N):
board.append(list(map(int, input().split())))
original = deepcopy(board)
for i in list(pd([1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4])):
new_board = original
for j in range(5):
new_board = solve(i[j],new_board)
tmp.append(mymax(new_board))
print(max(tmp))
|
[
"[email protected]"
] | |
c1158f4b21ea5e89da3ac0828829fe99d4e66d9f
|
4d1cc7c794039d31044eb94e38ab2ce4e4d62a72
|
/0x0B-python-input_output/5-to_json_string.py
|
451186994587818da5fcf46fb5179ba0ced72e8d
|
[] |
no_license
|
icculp/holbertonschool-higher_level_programming
|
2aca8f9df999b8931fb77280cb96ec16d9dffe07
|
698792cdc9096a17f2da0212d33518fda76b5213
|
refs/heads/master
| 2022-12-21T21:07:58.720950 | 2020-09-25T00:33:44 | 2020-09-25T00:33:44 | 259,339,854 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 186 |
py
|
#!/usr/bin/python3
"""
Task 5 Module
"""
import json
def to_json_string(my_obj):
"""
returns json representation of string object
"""
return json.dumps(my_obj)
|
[
"[email protected]"
] | |
fe2598c389279bc2342d80a08973218dd6bba537
|
c2de8119d4347fc34fd101b1e98588f3624a1f0a
|
/bestiary/schema.py
|
bff17176857457dc1ff50c9a15fbccc921772d29
|
[
"Apache-2.0"
] |
permissive
|
claytondaley/swarfarm
|
d2b44628fda3d6c14046ea177337cfa42e87c3db
|
10c0d381ab6d29d98c8cb88070a034f41d4028b6
|
refs/heads/master
| 2020-04-03T13:00:15.997376 | 2019-04-05T17:53:03 | 2019-04-05T17:53:03 | 155,271,052 | 0 | 0 |
Apache-2.0
| 2018-10-29T19:44:30 | 2018-10-29T19:44:29 | null |
UTF-8
|
Python
| false | false | 7,623 |
py
|
import graphene
from graphene import relay
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
from .api_filters import MonsterFilter, SkillFilter
from .models import Monster, LeaderSkill, Skill, SkillEffect, SkillEffectDetail, Source, ScalingStat, MonsterCraftCost, \
CraftMaterial, Dungeon, Level
class LevelNode(DjangoObjectType):
class Meta:
model = Level
interfaces = (relay.Node,)
filter_fields = []
class DungeonNode(DjangoObjectType):
class Meta:
model = Dungeon
description = "Dungeon objects"
interfaces = (relay.Node,)
only_fields = [
'id',
'name',
'max_floors',
'slug',
'category',
'levels',
]
filter_fields = [
'id',
'name',
'category',
]
class LeaderSkillNode(DjangoObjectType):
class Meta:
model = LeaderSkill
interfaces = (relay.Node,)
filter_fields = []
class EffectNode(DjangoObjectType):
class Meta:
model = SkillEffect
interfaces = (relay.Node,)
filter_fields = []
class EffectDetailNode(DjangoObjectType):
class Meta:
model = SkillEffectDetail
interfaces = (relay.Node,)
filter_fields = []
class ScalingStatNode(DjangoObjectType):
class Meta:
model = ScalingStat
interfaces = (relay.Node,)
filter_fields = []
only_fields = [
'stat',
'com2us_desc',
'description',
]
class SkillNode(DjangoObjectType):
effects = graphene.List(of_type=EffectDetailNode)
scaling_stats = graphene.List(of_type=ScalingStatNode)
def resolve_effects(self, info, *args, **kwargs):
return self.skilleffectdetail_set.all()
def resolve_scaling_stats(self, info, *args, **kwargs):
return self.scaling_stats.all()
class Meta:
model = Skill
interfaces = (relay.Node,)
only_fields = [
'id',
'name',
'com2us_id',
'description',
'slot',
'effects',
'cooltime',
'hits',
'aoe',
'passive',
'max_level',
'level_progress_description',
'icon_filename',
'multiplier_formula',
'multiplier_formula_raw',
'scaling_stats',
]
class SourceNode(DjangoObjectType):
class Meta:
model = Source
interfaces = (relay.Node,)
filter_fields = []
class MonsterCraftCostNode(DjangoObjectType):
class Meta:
model = MonsterCraftCost
interfaces = (relay.Node,)
filter_fields = []
class CraftMaterialNode(DjangoObjectType):
class Meta:
model = CraftMaterial
interfaces = (relay.Node,)
filter_fields = []
class MonsterNode(DjangoObjectType):
base_stars = graphene.Int()
skills = graphene.List(of_type=SkillNode)
def resolve_skills(self, *args, **kwargs):
return self.skills.all()
class Meta:
model = Monster
interfaces = (relay.Node,)
only_fields = [
'id',
'name',
'com2us_id',
'family_id',
'image_filename',
'element',
'archetype',
'base_stars',
'obtainable',
'can_awaken',
'is_awakened',
'awaken_bonus',
'skills',
'skill_ups_to_max',
'leader_skill',
'raw_hp',
'raw_attack',
'raw_defense',
'base_hp',
'base_attack',
'base_defense',
'max_lvl_hp',
'max_lvl_attack',
'max_lvl_defense',
'speed',
'crit_rate',
'crit_damage',
'resistance',
'accuracy',
'homunculus',
'monstercraftcost_set',
'craft_cost',
'transforms_into',
'awakens_from',
'awakens_to',
'awaken_mats_fire_low',
'awaken_mats_fire_mid',
'awaken_mats_fire_high',
'awaken_mats_water_low',
'awaken_mats_water_mid',
'awaken_mats_water_high',
'awaken_mats_wind_low',
'awaken_mats_wind_mid',
'awaken_mats_wind_high',
'awaken_mats_light_low',
'awaken_mats_light_mid',
'awaken_mats_light_high',
'awaken_mats_dark_low',
'awaken_mats_dark_mid',
'awaken_mats_dark_high',
'awaken_mats_magic_low',
'awaken_mats_magic_mid',
'awaken_mats_magic_high',
'source',
'farmable',
'fusion_food',
'bestiary_slug'
]
def _optimized_monster_queryset():
return Monster.objects.all().select_related(
'leader_skill',
'awakens_from',
'awakens_to',
'transforms_into',
).prefetch_related(
'skills',
'skills__effect',
'skills__effect__effect',
'skills__scaling_stats',
'monstercraftcost_set',
'monstercraftcost_set__craft',
'source',
)
def _optimized_skill_queryset():
return Skill.objects.all().prefetch_related(
'scaling_stats',
'skilleffectdetail_set',
'skilleffectdetail_set__effect',
'skilleffectdetail_set__effect__effect',
)
class Query(object):
dungeon = relay.Node.Field(DungeonNode)
all_dungeons = DjangoFilterConnectionField(DungeonNode)
def resolve_dungeon(self, info, id, **kwargs):
return Dungeon.objects.prefetch_related('level_set').get(pk=id)
def resolve_all_dungeons(self, info, **kwargs):
return Dungeon.objects.all().prefetch_related('level_set')
level = relay.Node.Field(LevelNode)
all_levels = DjangoFilterConnectionField(LevelNode, )
def resolve_level(self, info, id, **kwargs):
return Level.objects.select_related('dungeon').get(pk=id)
def resolve_all_levels(self, info, **kwargs):
return Level.objects.all().select_related('dungeon')
monster = relay.Node.Field(MonsterNode)
all_monsters = DjangoFilterConnectionField(
MonsterNode,
filterset_class=MonsterFilter,
max_limit=200,
enforce_first_or_last=True,
)
def resolve_monster(self, info, id, **kwargs):
return _optimized_monster_queryset().get(pk=id)
def resolve_all_monsters(self, info, **kwargs):
return _optimized_monster_queryset()
source = graphene.Field(SourceNode)
all_sources = DjangoFilterConnectionField(SourceNode)
craftCost = relay.Node.Field(MonsterCraftCostNode)
craftMaterial = relay.Node.Field(CraftMaterialNode)
leader_skill = relay.Node.Field(LeaderSkillNode)
skill = relay.Node.Field(SkillNode)
all_skills = DjangoFilterConnectionField(
SkillNode,
filterset_class=SkillFilter,
max_limit=200,
enforce_first_or_last=True,
)
def resolve_skill(self, info, id, **kwargs):
return _optimized_skill_queryset().get(pk=id)
def resolve_all_skills(self, info, **kwargs):
return _optimized_skill_queryset()
skill_effect = relay.Node.Field(EffectNode)
all_skill_effects = DjangoFilterConnectionField(EffectNode)
scaling_stat = relay.Node.Field(ScalingStatNode)
all_scaling_stats = DjangoFilterConnectionField(ScalingStatNode)
|
[
"[email protected]"
] | |
9aff49686506d7d1a151a9dc5a4b2d8284b5c1ff
|
01d390ba1ecdf8475ff6bc7655f3b1c86f4a4c10
|
/phonepad_combinations.py
|
9fc14da6f01a514e02f152208e6d0cb73ef4b0cf
|
[] |
no_license
|
IkeyBenz/InterviewProblems
|
c7feb520119f15d18a21108720229c3d0b9b4951
|
55171fc49d30ae21f58000ea14d2a40f1a81d019
|
refs/heads/master
| 2020-06-02T15:36:44.466766 | 2020-05-12T00:49:47 | 2020-05-12T00:49:47 | 191,211,244 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,026 |
py
|
letters = {
'1': '',
'2': 'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz',
'0': ' '
}
# Ikey's Way
def combos(nums, curr=[]):
if len(nums) == 0:
return curr
if len(curr) == 0:
return combos(nums[1:], list(letters[nums[0]]))
extended = []
for combo in combos:
for letter in letters[nums[0]]:
extended.append(combo + letter)
return combos(nums[1:], extended)
# Alan's Way (cleaner cause no optional param)
def combos2(nums):
if len(nums) == 0:
return []
if len(nums) == 1:
return list(letters[nums])
all_combos = []
for combo in combos2(nums[1:]):
for letter in letters[nums[0]]:
all_combos.append(combo + letter)
return all_combos
def possible_words(nums):
words = (combos(word) for word in nums.split('0'))
def test_combos():
assert combos('123') == [
'adg', 'adh', 'adi',
'aeg', 'aef'
]
|
[
"[email protected]"
] | |
d7133de348ab246f13ca3e1239122d3158674487
|
1f7fce552cc68731f683ded3f831e8f4650c7197
|
/Axis16/Axis16/wsgi.py
|
5139839645ceae062b39e5a7f5253511300b6776
|
[] |
no_license
|
tanaypatil/axis-website
|
3985068cf1c52bb038b7174cbdf938b8b4084c03
|
b5eda2906150a38b1bb0daf8b23c9194572b849c
|
refs/heads/master
| 2020-06-13T03:14:05.855948 | 2019-06-30T13:12:11 | 2019-06-30T13:12:11 | 194,514,303 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 389 |
py
|
"""
WSGI config for Axis16 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Axis16.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
2e991e0e4ef010d0049ff75e77aae840060ece57
|
577a40ff1c84d28b88a9ade84d265587d28ed2a3
|
/0715/02.TextProcessingAPI.py
|
708becb6d522488c2c631c2e45a37169303e25c6
|
[] |
no_license
|
bosl95/MachineLearning_Note
|
b167c182fcf5186f6466b8b062cde83b076b0b04
|
934714c5a62e4864f2b5338153c3aaeb3363abe9
|
refs/heads/master
| 2022-12-06T20:58:20.457567 | 2020-09-05T16:18:11 | 2020-09-05T16:18:11 | 279,835,223 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,812 |
py
|
# STEP 1
from keras.preprocessing.text import Tokenizer
samples = ['The cat sat on the mat.','The dog ate my homework.']
# ๊ฐ์ฅ ๋น๋๊ฐ ๋์ 1000๊ฐ์ ๋จ์ด๋ง ์ ํํ๋๋ก tokenizer ๊ฐ์ฒด ์์ฑ
tokenizer = Tokenizer(num_words=1000) # ์๋์ผ๋ก ๋จ์ด๋ง๋ค ๋ฒํธ๋ฅผ ๋ถ์ฌํ๋ ์ฌ์ ์์ฑ.
tokenizer.fit_on_texts(samples)
# STEP 2. ๋ฌธ์์ด์ ์ ์ ์ธ๋ฑ์ค์ ๋ฆฌ์คํธ๋ก ๋ณํ
sequences = tokenizer.texts_to_sequences(samples)
# print(sequences) # [[1, 2, 3, 4, 1, 5], [1, 6, 7, 8, 9]]
# ์ง์ one-hot binary vector ํํ์ ์ป์ ์ ์๋ค.
one_hot_results = tokenizer.texts_to_matrix(samples, mode='binary')
# print(one_hot_results)
# [[0. 1. 1. ... 0. 0. 0.]
# [0. 1. 0. ... 0. 0. 0.]]
# print(one_hot_results.shape) # (2, 1000)
# print(one_hot_results[0][3]) # 1.0 ์ฌ์ ์์ ์ฐพ์ ๊ฒฝ์ฐ ๋จ์ด์ ๊ฒฝ์ฐ 1
# print(one_hot_results[0][10]) # 0.0 ์ฌ์ ์์ ์ฐพ์ง ๋ชปํ ๋จ์ด์ ๊ฒฝ์ฐ 0
# ๋ช๊ฐ์ ๋จ์ด๋ฅผ ์ฒ๋ฆฌํ๋ ์ง ๊ฐ์
word_idx = tokenizer.word_index
print("Found {} unique tokens".format(len(word_idx)))
# STEP 3
from keras.datasets import imdb
from keras import preprocessing
import numpy as np
# ํน์ฑ์ผ๋ก ์ฌ์ฉํ ๋จ์ด์ ์
max_features = 10000
# ์ฌ์ฉํ ํ
์คํธ์ ๊ธธ์ด(๊ฐ์ฅ ๋น๋ฒํ max_features ๊ฐ์ ๋จ์ด๋ง ์ฌ์ฉํฉ๋๋ค)
maxlen = 20 # ํ ๋ฌธ์ฅ์ ์ต๋ ๋จ์ด ์
# ์ ์๋ฆฌ์คํธ๋ก ๋ฐ์ดํฐ๋ฅผ ๋ก๋
(x_train, y_train),(x_test,y_test) = imdb.load_data(num_words=max_features)
# print(x_train.shape, y_train.shape, x_test.shape, y_test.shape) # (25000,) (25000,) (25000,) (25000,)
# ๋ฆฌ์คํธ๋ฅผ (samples,maxlen) ํฌ๊ธฐ์ 2D ์ ์ ํ
์๋ก ๋ณํ
x_train = preprocessing.sequence.pad_sequences(x_train,maxlen=maxlen)
x_test = preprocessing.sequence.pad_sequences(x_test,maxlen=maxlen)
# STEP 4. ๋ชจ๋ธ ์์ฑ
from keras.models import Sequential
from keras.layers import Flatten,Dense,Embedding
model = Sequential()
# ๋์ค์ ์๋ฒ ๋ฉ ๋ ์
๋ ฅ์ Flatten ์ธต์์ ํผ์น๊ธฐ ์ํด Embedding ์ธต์ input_length๋ฅผ ์ง์
# param1: ์ค์(๋จ์ด์)/param2: ํํฐ ํญ/ param3: ๋ฐ์ดํฐ ํญ. ๋จ์ด ์ต๋ ๊ธธ์ด
# Embedding ์ธต์ ์ถ๋ ฅ ํฌ๊ธฐ๋ (samples,maxlen,8) ์ด ๋๋ค
model.add(Embedding(10000,8,input_length=maxlen)) # ์๋ฒ ๋ฉ(๋ผ์๋ฃ๊ธฐ) ์ธต(๋ฐ์ดํฐ ๊ฐ์, ํํฐ depth, ๋จ์ด ์ต๋ ๊ธธ์ด ํญ)
# ์ฒซ๋ฒ์งธ ์ธ์ = ๋จ์ด ์งํฉ์ ํฌ๊ธฐ. ์ฆ, ์ด ๋จ์ด์ ๊ฐ์
# ๋๋ฒ์งธ ์ธ์ = ์๋ฒ ๋ฉ ๋ฒกํฐ์ ์ถ๋ ฅ ์ฐจ์. ๊ฒฐ๊ณผ๋ก์ ๋์ค๋ ์๋ฒ ๋ฉ ๋ฒกํฐ์ ํฌ๊ธฐ
# input_length = ์
๋ ฅ ์ํ์ค์ ๊ธธ์ด
# 3D ์๋ฒ ๋ฉ ํ
์๋ฅผ (samples, maxlen * 8) ํฌ๊ธฐ์ 2D ํ
์๋ก ํผ์นจ
model.add(Flatten())
# ๋ถ๋ฅ๊ธฐ ์ถ๊ฐ
model.add(Dense(1,activation='sigmoid'))
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
model.summary()
# ํ์ต
history = model.fit(x_train,y_train,
epochs = 10, # 10๋ฒ ๋ฐ๋ณต
batch_size = 32,# 32๋ง๋ค
validation_split = 0.2) # ํ์ต ๋ฐ์ดํฐ์์ 20ํผ ๋ผ์ด๋ด์ ํ
์คํ
์ ์ฌ์ฉํด๋ผ
# STEP 5. ํ
์คํ
# sigmoid์ธต์ผ๋ก๋ถํฐ ๋์ถํ ๊ฒฐ๊ณผ. 0.5๋ณด๋ค ์์ผ๋ฉด ๋ถ์ . ํฌ๋ฉด ๊ธ์
pre = model.predict(x_test)
print(pre)
# STEP 6. ์ฑ๋ฅ ํ์ธ
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1,len(acc)+1)
plt.plot(epochs,acc,'bo',label='Training acc')
plt.plot(epochs, val_acc,'b',label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss,'bo',label='Training loss')
plt.plot(epochs, val_loss,'b',label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.