blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
079662848033b228ee09c8bb812f1c80e52e4cb0
|
1f68b6f9f55afaa7cb32df262f4fe0864472da05
|
/leetcode(多线程,DP,贪心,SQL)/二刷DP与贪心LeetCode/回溯/51. N皇后/solution.py
|
761c902fdb433e6e6f0765ec8b75578496b26cb9
|
[] |
no_license
|
faker-hong/testOne
|
7c4496362cb5495c25c640076102fe0704f8552f
|
768edc4a5526c8972fec66c6a71a38c0b24a1451
|
refs/heads/master
| 2022-12-04T14:47:53.614685 | 2020-10-30T03:17:50 | 2020-10-30T03:17:50 | 196,514,862 | 1 | 0 | null | 2022-11-22T02:43:32 | 2019-07-12T05:35:09 |
Python
|
UTF-8
|
Python
| false | false | 1,100 |
py
|
class Solution(object):
def solveNQueens(self, n):
"""
:type n: int
:rtype: List[List[str]]
"""
# 判断放置的皇后位置与之前的是否冲突
def is_valid(row, col, track):
# 因为每一次的row不同,所以不用判断是否在同一行
if col in track: # 是否在同一列
return False
# 判断是否在两条对角线上
for k in range(row):
if row + col == k + track[k] or row - col == k - track[k]:
return False
return True
def backtrack(row, track):
if row == n:
res.append(track)
return
for col in range(n):
if is_valid(row, col, track): # 位置合法,进入下一行
backtrack(row + 1, track + [col])
res = []
backtrack(0, [])
return [['.'*i + 'Q' + '.'*(n-i-1) for i in l] for l in res]
if __name__ == '__main__':
s = Solution()
res = s.solveNQueens(4)
print(res)
|
[
"[email protected]"
] | |
952fd72ad5a8100025aa2e461084375532616b8e
|
677562bf6835be104204f32a6c9998d9a901f9fc
|
/from_scratch/detect_metadata/times.py
|
fefebd85201b58cd0821fa91c8c528a5f775d688
|
[] |
no_license
|
santokalayil/neural_network
|
3cb2f843430e9f35e017edcde83ba13212d0f5cf
|
f453856214d027f55afc5c861784dc693a9bf2c6
|
refs/heads/main
| 2023-06-12T01:53:43.588403 | 2021-07-02T08:30:20 | 2021-07-02T08:30:20 | 382,281,787 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 343 |
py
|
# import platform # to detect the operating system
import os
import time
def get_last_modified_time(path_to_file):
return os.path.getmtime(path_to_file) # time.ctime(os.path.getmtime(path_to_file))
def get_created_time(path_to_file):
return os.path.getctime(path_to_file) # time.ctime(os.path.getctime(path_to_file))
|
[
"[email protected]"
] | |
3b937e27177d4b2213f47baa81c00973e7037be0
|
bc4910ecec94749697dbce5e7cf5093275411125
|
/src/generator/Cheetah/ErrorCatchers.py
|
500f2490d613628fe69f683fafa883f5d586e55d
|
[
"MIT"
] |
permissive
|
se210/tracy
|
7e73a6f0d64f355842b9a11035c3720b4d40fde5
|
232a42ce1aefcffa1f8544c89d60a16ebd897033
|
refs/heads/master
| 2021-01-09T20:55:03.241111 | 2013-12-15T23:34:36 | 2013-12-15T23:34:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,838 |
py
|
#!/usr/bin/env python
# $Id: ErrorCatchers.py,v 1.1 2006-09-06 09:50:08 skyostil Exp $
"""ErrorCatcher class for Cheetah Templates
Meta-Data
================================================================================
Author: Tavis Rudd <[email protected]>
Version: $Revision: 1.1 $
Start Date: 2001/08/01
Last Revision Date: $Date: 2006-09-06 09:50:08 $
"""
__author__ = "Tavis Rudd <[email protected]>"
__revision__ = "$Revision: 1.1 $"[11:-2]
import time
from Cheetah.NameMapper import NotFound
class Error(Exception):
pass
class ErrorCatcher:
_exceptionsToCatch = (NotFound,)
def __init__(self, templateObj):
pass
def exceptions(self):
return self._exceptionsToCatch
def warn(self, exc_val, code, rawCode, lineCol):
return rawCode
## make an alias
Echo = ErrorCatcher
class BigEcho(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
return "="*15 + "<" + rawCode + " could not be found>" + "="*15
class KeyError(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
raise KeyError("no '%s' in this Template Object's Search List" % rawCode)
class ListErrors(ErrorCatcher):
"""Accumulate a list of errors."""
_timeFormat = "%c"
def __init__(self, templateObj):
ErrorCatcher.__init__(self, templateObj)
self._errors = []
def warn(self, exc_val, code, rawCode, lineCol):
dict = locals().copy()
del dict['self']
dict['time'] = time.strftime(self._timeFormat,
time.localtime(time.time()))
self._errors.append(dict)
return rawCode
def listErrors(self):
"""Return the list of errors."""
return self._errors
|
[
"[email protected]"
] | |
5207bdfd9ec7ed6f7459b591d7345960cb085457
|
6a5ce7d885db1baa5a9d43b26f0ae623a5ef0f01
|
/azure-mgmt-web/azure/mgmt/web/models/domain_registration_input.py
|
864529f0239c7032c4baa763d7558207f03f1109
|
[
"Apache-2.0"
] |
permissive
|
JammyBrand82/azure-sdk-for-python
|
333af194ff9143ec77f49203a5a71f15c399f278
|
c65e189cd41bd3464556b17bfcdee1303867996c
|
refs/heads/master
| 2021-01-17T18:31:10.661151 | 2016-03-17T21:03:08 | 2016-03-17T21:03:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,993 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class DomainRegistrationInput(Resource):
"""
Domain registration input for validation Api
:param str id: Resource Id
:param str name: Resource Name
:param str location: Resource Location
:param str type: Resource type
:param dict tags: Resource tags
:param str domain_registration_input_name: Name of the domain
:param Contact contact_admin: Admin contact information
:param Contact contact_billing: Billing contact information
:param Contact contact_registrant: Registrant contact information
:param Contact contact_tech: Technical contact information
:param str registration_status: Domain registration status. Possible
values include: 'Active', 'Awaiting', 'Cancelled', 'Confiscated',
'Disabled', 'Excluded', 'Expired', 'Failed', 'Held', 'Locked', 'Parked',
'Pending', 'Reserved', 'Reverted', 'Suspended', 'Transferred',
'Unknown', 'Unlocked', 'Unparked', 'Updated', 'JsonConverterFailed'
:param str provisioning_state: Domain provisioning state. Possible values
include: 'Succeeded', 'Failed', 'Canceled', 'InProgress', 'Deleting'
:param list name_servers: Name servers
:param bool privacy: If true then domain privacy is enabled for this
domain
:param datetime created_time: Domain creation timestamp
:param datetime expiration_time: Domain expiration timestamp
:param datetime last_renewed_time: Timestamp when the domain was renewed
last time
:param bool auto_renew: If true then domain will renewed automatically
:param bool ready_for_dns_record_management: If true then Azure can
assign this domain to Web Apps. This value will be true if domain
registration status is active and it is hosted on name servers Azure has
programmatic access to
:param list managed_host_names: All hostnames derived from the domain and
assigned to Azure resources
:param DomainPurchaseConsent consent: Legal agreement consent
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'domain_registration_input_name': {'key': 'properties.name', 'type': 'str'},
'contact_admin': {'key': 'properties.contactAdmin', 'type': 'Contact'},
'contact_billing': {'key': 'properties.contactBilling', 'type': 'Contact'},
'contact_registrant': {'key': 'properties.contactRegistrant', 'type': 'Contact'},
'contact_tech': {'key': 'properties.contactTech', 'type': 'Contact'},
'registration_status': {'key': 'properties.registrationStatus', 'type': 'DomainStatus'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'},
'name_servers': {'key': 'properties.nameServers', 'type': '[str]'},
'privacy': {'key': 'properties.privacy', 'type': 'bool'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'properties.expirationTime', 'type': 'iso-8601'},
'last_renewed_time': {'key': 'properties.lastRenewedTime', 'type': 'iso-8601'},
'auto_renew': {'key': 'properties.autoRenew', 'type': 'bool'},
'ready_for_dns_record_management': {'key': 'properties.readyForDnsRecordManagement', 'type': 'bool'},
'managed_host_names': {'key': 'properties.managedHostNames', 'type': '[HostName]'},
'consent': {'key': 'properties.consent', 'type': 'DomainPurchaseConsent'},
}
def __init__(self, location, id=None, name=None, type=None, tags=None, domain_registration_input_name=None, contact_admin=None, contact_billing=None, contact_registrant=None, contact_tech=None, registration_status=None, provisioning_state=None, name_servers=None, privacy=None, created_time=None, expiration_time=None, last_renewed_time=None, auto_renew=None, ready_for_dns_record_management=None, managed_host_names=None, consent=None, **kwargs):
super(DomainRegistrationInput, self).__init__(id=id, name=name, location=location, type=type, tags=tags, **kwargs)
self.domain_registration_input_name = domain_registration_input_name
self.contact_admin = contact_admin
self.contact_billing = contact_billing
self.contact_registrant = contact_registrant
self.contact_tech = contact_tech
self.registration_status = registration_status
self.provisioning_state = provisioning_state
self.name_servers = name_servers
self.privacy = privacy
self.created_time = created_time
self.expiration_time = expiration_time
self.last_renewed_time = last_renewed_time
self.auto_renew = auto_renew
self.ready_for_dns_record_management = ready_for_dns_record_management
self.managed_host_names = managed_host_names
self.consent = consent
|
[
"[email protected]"
] | |
cf44ce6aefffd95765ff8071f01abc34af978a80
|
41a0220bf117124bf281a50396582c0df1e0675f
|
/Pyrado/tests/environment_wrappers/test_action_delay.py
|
c04f64bbf23a9b2a2f2bfdc9db1d3b524b130d61
|
[
"BSD-3-Clause"
] |
permissive
|
jacarvalho/SimuRLacra
|
c071dfc22d4f2c54a198405e8974d03333c9961d
|
a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5
|
refs/heads/master
| 2022-11-24T20:08:52.376545 | 2020-08-03T09:01:35 | 2020-08-03T09:01:35 | 276,885,755 | 0 | 0 |
BSD-3-Clause
| 2020-07-03T11:39:21 | 2020-07-03T11:39:21 | null |
UTF-8
|
Python
| false | false | 2,392 |
py
|
import numpy as np
import pytest
from pyrado.spaces.box import BoxSpace
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from tests.environment_wrappers.mock_env import MockEnv
@pytest.mark.wrappers
def test_no_delay():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=0)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([4, 1]))
assert mockenv.last_act == [4, 1]
wenv.step(np.array([7, 5]))
assert mockenv.last_act == [7, 5]
@pytest.mark.wrappers
def test_act_delay():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=2)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 1]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 4]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 1]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [2, 4]
@pytest.mark.wrappers
def test_reset():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=1)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 4]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([4, 4]))
assert mockenv.last_act == [0, 4]
# The next action would be [4, 4], but now we reset again
wenv.reset()
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [1, 2]
@pytest.mark.wrappers
def test_domain_param():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=1)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 1]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 4]))
assert mockenv.last_act == [0, 1]
# change the delay and reset
wenv.domain_param = {'act_delay': 2}
wenv.reset()
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([8, 9]))
assert mockenv.last_act == [1, 2]
|
[
"[email protected]"
] | |
6673858896690ec1a546222c0f8b383b73cf8ac8
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/Sudoku_II_007_20180621235112.py
|
41f8ce78220a122f130868148e83683e6dcb7b73
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,084 |
py
|
from random import randint
import copy
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, " ", " "]
]
sudoku2 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
sudoku3 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
line = "|"
if i < 9:
print(' {2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku[i], line, i+1))
i = i + 1
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
# print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
print("\n\n\n\n")
s = 0
if choice == "R" or choice == "r":
listaSudoku = [sudoku1, sudoku2, sudoku3]
sudoku_number = randint(0, 2)
print("Plansza nr:", sudoku_number)
s = sudoku_number
sudoku = copy.deepcopy(listaSudoku[sudoku_number])
elif int(choice) == 1:
s = 1
sudoku = copy.deepcopy(sudoku1)
elif int(choice) == 2:
s = 2
sudoku = copy.deepcopy(sudoku2)
elif int(choice) == 3:
s = 3
sudoku = copy.deepcopy(sudoku3)
while True: # prints Sudoku until is solved
# print("Your sudoku to solve:")
printSudoku()
print("\nInput 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
if s == 1:
sudoku = copy.deepcopy(sudoku1)
elif s == 2:
sudoku = copy.deepcopy(sudoku2)
elif s == 3:
sudoku = copy.deepcopy(sudoku3)
elif x == "h": # show:
print(sudoku)
print(sudoku1)
else:
print("Error - wrong number format \n ")
continue
else:
sudoku[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try: # check if sudoku is solved
i = 0
list = []
while i < 9: # check are all column == 45
column = 0
for item in sudoku:
column = column + item[i]
list.append(column)
i += 1
is45 = 0 # check if sudoku is solved
for listElement in list:
if listElement == 45:
is45 = is45 + 1
#
i = 0
for item in sudoku:
if sum(item) == 45 and is45 == 9:
i = i + 1
if i == 9:
printSudoku()
print(" @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print(" @@@@@@@@@@ YOU WIN @@@@@@@@@@")
print(" @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
break
except TypeError:
print()
|
[
"[email protected]"
] | |
8f4065d632706b252e9aaa5aef0f380f65fed859
|
57c38487a6a689318c960fa7d6b0185f372241bc
|
/presalytics_ooxml_automation/models/theme_effect_map.py
|
d5e94240d7206cc938862efbf1be434f6ab396ab
|
[
"MIT"
] |
permissive
|
presalytics/ooxml-automation-python-client
|
2c88bae455b7e567ebdb6a4ea106bbdcd192ac47
|
fa6100eef1743e43b4d25b3faac79d39fe32c9d7
|
refs/heads/master
| 2020-06-05T23:42:32.964361 | 2019-12-27T22:51:40 | 2019-12-27T22:51:40 | 192,575,331 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,626 |
py
|
# coding: utf-8
"""
OOXML Automation
This API helps users convert Excel and Powerpoint documents into rich, live dashboards and stories. # noqa: E501
The version of the OpenAPI document: 0.1.0-no-tags
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from presalytics_ooxml_automation.configuration import Configuration
class ThemeEffectMap(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'theme_id': 'str',
'intensity_id': 'int',
'id': 'str'
}
attribute_map = {
'theme_id': 'themeId',
'intensity_id': 'intensityId',
'id': 'id'
}
def __init__(self, theme_id=None, intensity_id=None, id=None, local_vars_configuration=None): # noqa: E501
"""ThemeEffectMap - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._theme_id = None
self._intensity_id = None
self._id = None
self.discriminator = None
self.theme_id = theme_id
if intensity_id is not None:
self.intensity_id = intensity_id
if id is not None:
self.id = id
@property
def theme_id(self):
"""Gets the theme_id of this ThemeEffectMap. # noqa: E501
:return: The theme_id of this ThemeEffectMap. # noqa: E501
:rtype: str
"""
return self._theme_id
@theme_id.setter
def theme_id(self, theme_id):
"""Sets the theme_id of this ThemeEffectMap.
:param theme_id: The theme_id of this ThemeEffectMap. # noqa: E501
:type: str
"""
self._theme_id = theme_id
@property
def intensity_id(self):
"""Gets the intensity_id of this ThemeEffectMap. # noqa: E501
:return: The intensity_id of this ThemeEffectMap. # noqa: E501
:rtype: int
"""
return self._intensity_id
@intensity_id.setter
def intensity_id(self, intensity_id):
"""Sets the intensity_id of this ThemeEffectMap.
:param intensity_id: The intensity_id of this ThemeEffectMap. # noqa: E501
:type: int
"""
self._intensity_id = intensity_id
@property
def id(self):
"""Gets the id of this ThemeEffectMap. # noqa: E501
:return: The id of this ThemeEffectMap. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ThemeEffectMap.
:param id: The id of this ThemeEffectMap. # noqa: E501
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ThemeEffectMap):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ThemeEffectMap):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
f7de7c2ef755e5893d687912c0b74ed7148a8d02
|
6b453d913b1ae6697da738ddae9df013e8128d91
|
/app/members/urls.py
|
1a117fa9f25eacf78656a78f5d5d3ef9928e11ec
|
[] |
no_license
|
moorekwon/instagram
|
9703ecb1aed460ddec685c0bd06fe0fac3807548
|
aaeca79f0d2765a24dd780bb12848c2c7b76a009
|
refs/heads/master
| 2022-12-12T18:13:37.536048 | 2020-02-03T08:33:16 | 2020-02-03T08:33:16 | 229,711,643 | 0 | 0 | null | 2022-12-08T03:32:50 | 2019-12-23T08:39:38 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 267 |
py
|
from django.urls import path
from . import views
app_name = 'members'
urlpatterns = [
path('login/', views.login_view, name='login'),
path('logout/', views.logout_view, name='logout-view'),
path('naver-login/', views.naver_login, name='naver-login')
]
|
[
"[email protected]"
] | |
4b26d2288fe6cceaed839816ed06bdce9f6e52d8
|
f0ae65bddedea44b1e66f3d235a901e62afae3f2
|
/macgyyver_stuff/parse_input.py
|
9de1a35ebd5d51857f29e74f682675a2370a7704
|
[] |
no_license
|
Brandon-Valley/my_movie_tools
|
d0a2ba91cda054c1b68709f1a9082028842e83a1
|
371742bfeaa0cfa2985ce06a6865f6ae09445029
|
refs/heads/master
| 2023-01-10T00:06:48.696103 | 2022-12-28T14:09:20 | 2022-12-28T14:09:20 | 221,057,903 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
import pyperclip
INPUT_PATH = 'input.txt'
def read_text_file(file_path):
with open(file_path, 'r', encoding='utf-8') as text_file: # can throw FileNotFoundError
result = tuple(l.rstrip() for l in text_file.readlines())
return result
raw_in = read_text_file(INPUT_PATH)
print(raw_in)
in_str = ''
for line in raw_in:
in_str += line
print(in_str)
s_raw_in = in_str.split('"')
print(s_raw_in)
e_l = []
for elm_num, elm in enumerate(s_raw_in):
if elm_num % 2 != 0:
e_l.append(elm)
print(e_l)
pyperclip.copy(str(e_l))
spam = pyperclip.paste()
|
[
"[email protected]"
] | |
724778ba9809a4764fe8fb9db59911050b386395
|
8329282a8fda056d705c1af6dbcd0de1ed7ca25e
|
/.history/textutiles/textutiles/views_20210522223732.py
|
629f8ff15d006894a027b5cc2f0bacbc0ca08e6f
|
[] |
no_license
|
ritikalohia/Django-beginners-
|
c069b16867407ef883bb00c6faf4f601921c118a
|
829e28ab25201853de5c71a10ceff30496afea52
|
refs/heads/main
| 2023-05-04T03:34:29.082656 | 2021-05-22T17:38:21 | 2021-05-22T17:38:21 | 369,869,599 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,627 |
py
|
#created
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
#params = {'name' : 'Ritika', 'place' : 'Mars'}
return render(request, 'index.html')
#return HttpResponse("Home")
def contact(request):
return render(request, 'contact.html')
def about(request):
return render(request, 'about_us.html')
def analyze(request):
#get the text in head
djtext = request.GET.post('text', 'default' )
#check checkbox values
removepunc = request.GET.get('removepunc', 'off')
fullcaps = request.GET.get('fullcaps', 'off')
newlineremover = request.GET.get('newlineremover', 'off')
spaceremover = request.GET.get('spaceremover', 'off'),
charcount = request.GET.get('charcount', 'off')
if removepunc == "on":
#analyzed = djtext
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params ={'purpose':'removed punctuations', 'analyzed_text': analyzed}
#analyze the text
return render(request, 'analyze.html', params)
elif(fullcaps == "on"):
analyzed =""
for char in djtext:
analyzed = analyzed + char.upper()
params ={'purpose':'changed to UPPERCASE', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(newlineremover== "on"):
analyzed =""
for char in djtext:
if char != '\n' and char !="\r":
analyzed = analyzed + char
params ={'purpose':'Removed new lines', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(spaceremover== "on"):
analyzed =""
for index, char in enumerate(djtext):
if not djtext[index] == " " and djtext[index+1]==" ":
analyzed = analyzed + char
params ={'purpose':'extra space removed', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(charcount== "on"):
a=0
for char in djtext:
a = a + 1
params ={'purpose':'extra space removed', 'analyzed_text': a}
#analyze the text
#return render(request, 'analyze.html', params)
else:
return HttpResponse("Error")
# def capfirst(request):
# return HttpResponse("capitalize first")
|
[
"[email protected]"
] | |
c6a0f515e4061baa17af3a79b41c463f25758ff0
|
92429015d9a1f1cea9b9bf2c9f1a8a7a07586af5
|
/option.py
|
6e7b7892e37aa810a008bdf082451b08034a0125
|
[] |
no_license
|
arthur-qiu/adv_vis
|
46a953ce6c3d562137c8e566bc9b523e25bc5bbd
|
ba46c00cf38ca5186d7db84844892036ed714eaf
|
refs/heads/master
| 2021-01-03T23:00:45.065108 | 2020-04-05T03:47:01 | 2020-04-05T03:47:01 | 240,272,320 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,099 |
py
|
import argparse
import os
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(description='Trains a CIFAR Classifier',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
self.parser.add_argument('--model', '-m', type=str, default='wrn',
choices=['wrn'], help='Choose architecture.')
# Optimization options
self.parser.add_argument('--epochs', '-e', type=int, default=50, help='Number of epochs to train.')
self.parser.add_argument('--start_epoch', type=int, default=1, help='The start epoch to train. Design for restart.')
self.parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
self.parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
self.parser.add_argument('--test_bs', type=int, default=128)
self.parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
self.parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
self.parser.add_argument('--epoch_step', default='[40,42,44,46,48]', type=str,
help='json list with epochs to drop lr on')
self.parser.add_argument('--lr_decay_ratio', default=0.2, type=float)
# Checkpoints
self.parser.add_argument('--save', '-s', type=str, default='./logs/cifar10_adv', help='Folder to save checkpoints.')
self.parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
self.parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
self.parser.add_argument('--dataroot', default='.', type=str)
# Acceleration
self.parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
self.parser.add_argument('--prefetch', type=int, default=1, help='Pre-fetching threads.')
# Adversarial setting
self.parser.add_argument('--epsilon', type=float, default=8 / 255,
help='perturbation')
self.parser.add_argument('--num_steps', type=int, default=7,
help='perturb number of steps')
self.parser.add_argument('--step_size', type=float, default=2 / 255,
help='perturb step size')
self.parser.add_argument('--test_num_steps', type=int, default=20,
help='test perturb number of steps')
self.parser.add_argument('--test_step_size', type=float, default=2 / 255,
help='test perturb step size')
# Others
self.parser.add_argument('--random_seed', type=int, default=1)
def parse(self, save=True):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
# Make save directory
if not os.path.exists(self.opt.save):
os.makedirs(self.opt.save)
if not os.path.isdir(self.opt.save):
raise Exception('%s is not a dir' % self.opt.save)
if save and not self.opt.test:
file_name = os.path.join(self.opt.save, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
|
[
"Arthur"
] |
Arthur
|
5e6eab96a36af8362b1089b13514cebebf213f95
|
11812a0cc7b818292e601ecdd4aa4c4e03d131c5
|
/100days_of_python/day32/main.py
|
2d1a1c5e6332bb4dae8a588642e9e2d964c7be13
|
[] |
no_license
|
SunshineFaxixi/Python_Learning
|
f1e55adcfa898489cc9146ccfb220f0b48a31a22
|
ab3ca44d013311b6de02124091acc4c36a83c4d9
|
refs/heads/master
| 2021-08-16T05:47:29.963118 | 2021-01-04T13:48:30 | 2021-01-04T13:48:30 | 238,857,341 | 1 | 0 | null | 2020-03-03T13:53:08 | 2020-02-07T06:21:46 |
HTML
|
UTF-8
|
Python
| false | false | 1,364 |
py
|
##################### Extra Hard Starting Project ######################
import pandas
from datetime import datetime
import os
import random
import smtplib
MY_EMAIL = "[email protected]"
MY_PASSWORD = "TXHTVGKIOLEHXVCI"
today = datetime.now()
today_tuple = (today.month, today.day)
all_birth_info = pandas.read_csv("birthdays.csv")
birthday_dict = {(data_row["month"], data_row["day"]): data_row for (index, data_row) in all_birth_info.iterrows()}
# 2. Check if today matches a birthday in the birthdays.csv
if today_tuple in birthday_dict:
# 3. If step 2 is true, pick a random letter from letter templates and replace the [NAME] with the person's actual name from birthdays.csv
birthday_person = birthday_dict[today_tuple]
file_path = f"letter_templates/letter_{random.randint(1, 3)}.txt"
with open(file_path) as data:
content = data.read()
content = content.replace("[NAME]", birthday_person["name"])
# print(content)
# 4. Send the letter generated in step 3 to that person's email address.
with smtplib.SMTP("smtp.163.com") as connection:
connection.starttls()
connection.login(user=MY_EMAIL, password=MY_PASSWORD)
connection.sendmail(
from_addr=MY_EMAIL,
to_addrs=birthday_person["email"],
msg=f"Subject: Happy Birthday!\n\n{content}"
)
|
[
"[email protected]"
] | |
53e8ea169d0cfd5c2042f9ade08153f4669354fc
|
65b4522c04c2be071c2d42095956fe950fe1cebe
|
/inversions/inversion10/iter2/run5/analysis/pred_disp/create_predicted_disp_database.py
|
608cb3ba2bafea964917232a2b235b12007f7f0a
|
[] |
no_license
|
geodesy/viscojapan
|
ac0cd93f7a2134cd2651623b94879dcc21c0c46a
|
03e70265b56eb5994e73bcb6066f0be338e42f27
|
refs/heads/master
| 2021-03-03T18:19:07.779601 | 2015-07-16T03:50:49 | 2015-07-16T03:50:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 740 |
py
|
import sqlite3
import numpy as np
import viscojapan as vj
pred = vj.inv.DispPred(
file_G0 = '../../../green_function/G0_He50km_VisM6.3E18_Rake83.h5',
result_file = '../../outs/nrough_05_naslip_11.h5',
fault_file = '../../../fault_model/fault_bott80km.h5',
files_Gs = ['../../../green_function/G1_He50km_VisM1.0E19_Rake83.h5',
'../../../green_function/G2_He60km_VisM6.3E18_Rake83.h5',
'../../../green_function/G3_He50km_VisM6.3E18_Rake90.h5'
],
nlin_par_names = ['log10(visM)','log10(He)','rake'],
file_incr_slip0 = '../../slip0/v1/slip0.h5',
)
writer = vj.inv.PredDispToDatabaseWriter(
pred_disp = pred
)
writer.create_database()
writer.insert_all()
|
[
"[email protected]"
] | |
e254aa45d97a2f3ff329c8b06be41ad5a4e0aec5
|
3acb90a1e97a0e851c6e4b1b57dda78ec5e3e3b4
|
/problems/deep_copy_graph.py
|
0b0caab3f09a04fb8519c76e677dd80b5c5b183b
|
[] |
no_license
|
jhyang12345/algorithm-problems
|
fea3c6498cff790fc4932404b5bbab08a6d4a627
|
704355013de9965ec596d2e0115fd2ca9828d0cb
|
refs/heads/master
| 2023-05-15T10:26:52.685471 | 2021-06-01T14:57:38 | 2021-06-01T14:57:38 | 269,333,379 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,641 |
py
|
# Given a node in a connected directional graph, create a copy of it.
#
# Here's an example and some starter code.
class Node:
def __init__(self, value, adj=None):
self.value = value
self.adj = adj
# Variable to help print graph
self._print_visited = set()
if self.adj is None:
self.adj = []
# Able to print graph
def __repr__(self):
if self in self._print_visited:
return ''
else:
self._print_visited.add(self)
final_str = ''
for n in self.adj:
final_str += f'{n}\n'
self._print_visited.remove(self)
return final_str + f'({self.value}, ({[n.value for n in self.adj]}))'
def deep_copy_graph(graph_node, visited=None):
dummy_node = Node(0)
queue = [graph_node, dummy_node]
graph = {}
visited = [graph_node, dummy_node]
dummy_map = {}
while queue:
cur = queue.pop(0)
dummy = queue.pop(0)
dummy_map[cur] = dummy
dummy.value = cur.value
visited.append(cur)
for node in cur.adj:
if node not in visited:
queue.append(node)
new_dummy = Node(0)
queue.append(new_dummy)
dummy.adj.append(new_dummy)
else:
dummy.adj.append(dummy_map[node])
return dummy_node
n5 = Node(5)
n4 = Node(4)
n3 = Node(3, [n4])
n2 = Node(2)
n1 = Node(1, [n5])
n5.adj = [n3]
n4.adj = [n3, n2]
n2.adj = [n4]
graph_copy = deep_copy_graph(n1)
print(graph_copy)
# (2, ([4]))
# (4, ([3, 2]))
# (3, ([4]))
# (5, ([3]))
# (1, ([5]))
|
[
"[email protected]"
] | |
55bb3d82b80185533da7d4c85f2c8e6589933ab4
|
cd4eb25911d3e3b092aa97aaa7b8fbba6c3a0704
|
/lang/python/asyncio/chain.py
|
aa8d41fb55cb2b5319e1e90b9a6e8a96e55ad630
|
[
"MIT"
] |
permissive
|
liuyang1/test
|
29bb142982d2ef0d79b71e8fe5f5e0d51ec5258e
|
9a154e0161a1a33baad53f7223ee72e702532001
|
refs/heads/master
| 2023-08-05T08:56:50.526414 | 2023-07-21T05:49:53 | 2023-07-21T11:16:09 | 26,949,326 | 9 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
import asyncio
@asyncio.coroutine
def compute(x, y):
print("Compute %s + %s ..." % (x, y))
# yield from asyncio.sleep(1.0)
return x + y
@asyncio.coroutine
def print_sum(x, y):
result = yield from compute(x, y)
print("%s + %s = %s" % (x, y, result))
loop = asyncio.get_event_loop()
loop.run_until_complete(print_sum(1, 2))
loop.run_until_complete(print_sum(3, 2))
loop.close()
|
[
"[email protected]"
] | |
6000dedcf91921ea9a5a6cba05ff8fe17f2ae918
|
221d1ad342677d2fac8aa3f8d5c60e059a6316c9
|
/pm4py/objects/log/util/dataframe_utils.py
|
e8318a1daaeaa367f7ae496fe27ab3a705aca2da
|
[] |
no_license
|
niklasadams/explainable_concept_drift_pm
|
06ff651fbdebece4adf96f94bfb4d1026da14c48
|
6bf84d727ab0bae76716a04ad28c7de73250c89d
|
refs/heads/main
| 2023-08-26T18:21:49.955080 | 2021-10-29T18:53:48 | 2021-10-29T18:53:48 | 314,514,571 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,643 |
py
|
from pm4py.util import constants
from pm4py.objects.log.log import EventStream
from pm4py.objects.conversion.log import converter as log_converter
import pandas as pd
from pm4py.util.vers_checker import check_pandas_ge_024
from enum import Enum
from pm4py.util import exec_utils
from pm4py.util import points_subset
from pm4py.util import xes_constants
LEGACY_PARQUET_TP_REPLACER = "AAA"
LEGACY_PARQUET_CASECONCEPTNAME = "caseAAAconceptAAAname"
class Parameters(Enum):
PARTITION_COLUMN = "partition_column"
CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY
MANDATORY_ATTRIBUTES = "mandatory_attributes"
MAX_NO_CASES = "max_no_cases"
MIN_DIFFERENT_OCC_STR_ATTR = 5
MAX_DIFFERENT_OCC_STR_ATTR = 50
def insert_partitioning(df, num_partitions, parameters=None):
"""
Insert the partitioning in the specified dataframe
Parameters
-------------
df
Dataframe
num_partitions
Number of partitions
parameters
Parameters of the algorithm
Returns
-------------
df
Partitioned dataframe
"""
if parameters is None:
parameters = {}
case_id_key = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME)
partition_column = exec_utils.get_param_value(Parameters.PARTITION_COLUMN, parameters, "@@partitioning")
df[partition_column] = df[case_id_key].rank(method='dense', ascending=False).astype(int) % num_partitions
return df
def legacy_parquet_support(df, parameters=None):
"""
For legacy support, Parquet files columns could not contain
a ":" that has been arbitrarily replaced by a replacer string.
This string substitutes the replacer to the :
Parameters
---------------
dataframe
Dataframe
parameters
Parameters of the algorithm
"""
if parameters is None:
parameters = {}
df.columns = [x.replace(LEGACY_PARQUET_TP_REPLACER, ":") for x in df.columns]
return df
def table_to_stream(table, parameters=None):
"""
Converts a Pyarrow table to an event stream
Parameters
------------
table
Pyarrow table
parameters
Possible parameters of the algorithm
"""
if parameters is None:
parameters = {}
dict0 = table.to_pydict()
keys = list(dict0.keys())
# for legacy format support
if LEGACY_PARQUET_CASECONCEPTNAME in keys:
for key in keys:
dict0[key.replace(LEGACY_PARQUET_TP_REPLACER, ":")] = dict0.pop(key)
stream = EventStream([dict(zip(dict0, i)) for i in zip(*dict0.values())])
return stream
def table_to_log(table, parameters=None):
"""
Converts a Pyarrow table to an event log
Parameters
------------
table
Pyarrow table
parameters
Possible parameters of the algorithm
"""
if parameters is None:
parameters = {}
stream = table_to_stream(table, parameters=parameters)
return log_converter.apply(stream, parameters=parameters)
def convert_timestamp_columns_in_df(df, timest_format=None, timest_columns=None):
"""
Convert all dataframe columns in a dataframe
Parameters
-----------
df
Dataframe
timest_format
(If provided) Format of the timestamp columns in the CSV file
timest_columns
Columns of the CSV that shall be converted into timestamp
Returns
------------
df
Dataframe with timestamp columns converted
"""
needs_conversion = check_pandas_ge_024()
for col in df.columns:
if timest_columns is None or col in timest_columns:
if df[col].dtype == 'object':
try:
if timest_format is None:
if needs_conversion:
df[col] = pd.to_datetime(df[col], utc=True)
else:
df[col] = pd.to_datetime(df[col])
else:
if needs_conversion:
df[col] = pd.to_datetime(df[col], utc=True, format=timest_format)
else:
df[col] = pd.to_datetime(df[col])
except ValueError:
# print("exception converting column: "+str(col))
pass
return df
def sample_dataframe(df, parameters=None):
"""
Sample a dataframe on a given number of cases
Parameters
--------------
df
Dataframe
parameters
Parameters of the algorithm, including:
- Parameters.CASE_ID_KEY
- Parameters.CASE_ID_TO_RETAIN
Returns
-------------
sampled_df
Sampled dataframe
"""
if parameters is None:
parameters = {}
case_id_key = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME)
max_no_cases = exec_utils.get_param_value(Parameters.MAX_NO_CASES, parameters, 100)
case_ids = list(df[case_id_key].unique())
case_id_to_retain = points_subset.pick_chosen_points_list(min(max_no_cases, len(case_ids)), case_ids)
return df[df[case_id_key].isin(case_id_to_retain)]
def automatic_feature_selection_df(df, parameters=None):
"""
Performs an automatic feature selection on dataframes,
keeping the features useful for ML purposes
Parameters
---------------
df
Dataframe
parameters
Parameters of the algorithm
Returns
---------------
featured_df
Dataframe with only the features that have been selected
"""
if parameters is None:
parameters = {}
case_id_key = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME)
mandatory_attributes = exec_utils.get_param_value(Parameters.MANDATORY_ATTRIBUTES, parameters,
set(df.columns).intersection(
{constants.CASE_CONCEPT_NAME, xes_constants.DEFAULT_NAME_KEY,
xes_constants.DEFAULT_TIMESTAMP_KEY}))
min_different_occ_str_attr = exec_utils.get_param_value(Parameters.MIN_DIFFERENT_OCC_STR_ATTR, parameters, 5)
max_different_occ_str_attr = exec_utils.get_param_value(Parameters.MAX_DIFFERENT_OCC_STR_ATTR, parameters, 50)
cols_dtypes = {x: str(df[x].dtype) for x in df.columns}
other_attributes_to_retain = set()
no_all_cases = df[case_id_key].nunique()
for x, y in cols_dtypes.items():
attr_df = df.dropna(subset=[x])
this_cases = attr_df[case_id_key].nunique()
# in any case, keep attributes that appears at least once per case
if this_cases == no_all_cases:
if "float" in y or "int" in y:
# (as in the classic log version) retain always float/int attributes
other_attributes_to_retain.add(x)
elif "object" in y:
# (as in the classic log version) keep string attributes if they have enough variability, but not too much
# (that would be hard to explain)
unique_val_count = df[x].nunique()
if min_different_occ_str_attr <= unique_val_count <= max_different_occ_str_attr:
other_attributes_to_retain.add(x)
else:
# not consider the attribute after this feature selection if it has other types (for example, date)
pass
attributes_to_retain = mandatory_attributes.union(other_attributes_to_retain)
return df[attributes_to_retain]
|
[
"[email protected]"
] | |
40c34bd9c99fb2039166995b23dec6a86c82f436
|
b02c88bcad352811d22cadacd2aa573c426a2ca3
|
/scrapers/settings.py
|
06a6ae3c2620e357e3388b52d97f309471c0bf4b
|
[
"Apache-2.0"
] |
permissive
|
frankier/ties445
|
9e24c3e415b4a07584c41e0e4a3f241b133463b8
|
4ceee5390d81aedc0fb3904803797584dd5084b8
|
refs/heads/master
| 2020-12-31T07:18:42.938988 | 2016-05-17T18:45:14 | 2016-05-17T18:45:14 | 56,767,440 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,085 |
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for scrapers project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapers'
SPIDER_MODULES = ['scrapers.spiders']
NEWSPIDER_MODULE = 'scrapers.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapers (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapers.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.decompression.DecompressionMiddleware': 1,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 1,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scrapers.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"[email protected]"
] | |
8dc513728068e9e929518340bb44a7718efc33eb
|
3122ac39f1ce0a882b48293a77195476299c2a3b
|
/clients/python/generated/swaggyjenkins/models/extension_class_container_impl1links.py
|
71b82d350f0fa80dde4f8cb80c78f8b7b2e3c3c7
|
[
"MIT"
] |
permissive
|
miao1007/swaggy-jenkins
|
4e6fe28470eda2428cbc584dcd365a21caa606ef
|
af79438c120dd47702b50d51c42548b4db7fd109
|
refs/heads/master
| 2020-08-30T16:50:27.474383 | 2019-04-10T13:47:17 | 2019-04-10T13:47:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,766 |
py
|
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
OpenAPI spec version: 1.1.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionClassContainerImpl1links(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_self': 'Link',
'_class': 'str'
}
attribute_map = {
'_self': 'self',
'_class': '_class'
}
def __init__(self, _self=None, _class=None): # noqa: E501
"""ExtensionClassContainerImpl1links - a model defined in OpenAPI""" # noqa: E501
self.__self = None
self.__class = None
self.discriminator = None
if _self is not None:
self._self = _self
if _class is not None:
self._class = _class
@property
def _self(self):
"""Gets the _self of this ExtensionClassContainerImpl1links. # noqa: E501
:return: The _self of this ExtensionClassContainerImpl1links. # noqa: E501
:rtype: Link
"""
return self.__self
@_self.setter
def _self(self, _self):
"""Sets the _self of this ExtensionClassContainerImpl1links.
:param _self: The _self of this ExtensionClassContainerImpl1links. # noqa: E501
:type: Link
"""
self.__self = _self
@property
def _class(self):
"""Gets the _class of this ExtensionClassContainerImpl1links. # noqa: E501
:return: The _class of this ExtensionClassContainerImpl1links. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this ExtensionClassContainerImpl1links.
:param _class: The _class of this ExtensionClassContainerImpl1links. # noqa: E501
:type: str
"""
self.__class = _class
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionClassContainerImpl1links):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
e3e0ff71c09f66324bba160b6a4edccc40d93fff
|
ddc5aa77203bf76cd789c173dffbc382ed8ef004
|
/test/app_test/master.py
|
f1fe1995de473cf239f7fc143c31029ce2d5bca1
|
[] |
no_license
|
phroiland/FinBiotic
|
0b8183ce9f97c3fc4b1f7e20decc3472bffe8800
|
a30ef2e979b230e5424fd25ef7dd1fb49bbd5245
|
refs/heads/master
| 2023-08-18T15:26:15.948262 | 2023-08-15T15:13:23 | 2023-08-15T15:13:23 | 93,895,989 | 2 | 2 | null | 2023-03-01T20:08:37 | 2017-06-09T20:52:02 |
Python
|
UTF-8
|
Python
| false | false | 4,105 |
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon May 29 13:30:38 2017
@author: jonfroiland
"""
import sys
import argparse
import oandapyV20
import oandapyV20.endpoints.positions as openPos
# Data, Price, and Strategy Imports
import settings
import common.config
import common.args
from stream.streamingData import StreamingData
from stream.view import mid_string, heartbeat_to_string, instrument_string
from account.balance import Balance
from strategy.breakout import Breakout
from strategy.spreads import Spreads
from strategy.strategy import Strategy
from pivots.pivotImports import PivotImports
# from view import bid_string, ask_string, price_to_string
from datetime import datetime
import pandas as pd
pd.set_option('display.large_repr', 'truncate')
pd.set_option('display.max_columns', 0)
def main():
print "------ System online -------", datetime.now()
parser = argparse.ArgumentParser()
common.config.add_argument(parser)
parser.add_argument('--instrument', "-i", type=common.args.instrument,
required=True, action="append",
help="Instrument to get prices for")
parser.add_argument('--snapshot', action="store_true", default=True,
help="Request an initial snapshot")
parser.add_argument('--no-snapshot', dest="snapshot", action="store_false",
help="Do not request an initial snapshot")
parser.add_argument('--show-heartbeats', "-s", action='store_true',
default=False, help="display heartbeats")
args = parser.parse_args()
# print sys.argv[2]
account_id = args.config.active_account
api = args.config.create_streaming_context()
account_api = args.config.create_context()
response = api.pricing.stream(account_id, snapshot=args.snapshot,
instruments=",".join(args.instrument))
dfD = PivotImports(sys.argv[2]).daily()
# dfW = p.weekly()
balance = Balance(account_api, account_id).balance()
df = pd.DataFrame([])
for msg_type, msg in response.parts():
if msg_type == "pricing.Heartbeat" and args.show_heartbeats:
print heartbeat_to_string(msg)
if msg_type == "pricing.Price":
sd = StreamingData(datetime.now(), instrument_string(msg),
mid_string(msg), account_api, account_id, 's',
'5min', balance)
df = df.append(sd.df())
sd.resample(df)
print "df:", df.shape[0], "minuteData:", sd.minuteData().shape[0]
# print sd.minuteData(),'\n'
if sd.minuteData().shape[0] < 20:
continue
else:
client = oandapyV20.API(settings.ACCESS_TOKEN)
r = openPos.OpenPositions(accountID=account_id)
client.request(r)
openTrades = []
for i in r.response['positions']:
trades = i['instrument']
openTrades.append(trades)
print 'Open Trades', openTrades
if instrument_string(msg) in openTrades:
continue
else:
try:
b = Breakout(sd.minuteData())
breakout = b.breakout()
# print 'Breakout Units:',breakout
s = Spreads(dfD, mid_string(msg))
pivot, rl1, rl2, rl3, sl1, sl2, sl3 = s.spreads()
rate1, rate2 = s.spreads_out()
strat = Strategy(account_api, account_id,
instrument_string(msg), dfD,
mid_string(msg), breakout, pivot, rl1,
rl2, rl3, sl1, sl2, sl3, rate1, rate2)
strat.res_check()
strat.sup_check()
except Exception as e:
print e
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
026745467476e61080f1b8483e76fc80ed91ca82
|
8f337d7a1477eb9878bd252f45fadd967ba5dbbe
|
/run_galfit_disk_only.py
|
62c3df5903da86c2f2a4574520757cfb091c1fa8
|
[] |
no_license
|
bpRsh/b1_research_lib
|
bd4c293946329ea96d0fb37d8769aaa83d1ca15d
|
1de77f683b3ba18a1ab142b0fe86114c7a67791a
|
refs/heads/master
| 2021-01-15T19:04:32.177465 | 2020-11-23T19:55:34 | 2020-11-23T19:55:34 | 99,805,200 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,674 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-#
#
# Author : Bhishan Poudel; Physics Graduate Student, Ohio University
# Date : 26-Oct-2016 13:10
# Last update : Dec 15, 2016
# Est time : 3 min for one galaxy one filter.
# Main commands : rm -r imgblock.fits subcomps.fit ; galfit expdisk_devauc.sh
# galfit -o3 galfit.01 && rm -r galfit.01
# ds9 -multiframe imgblock.fits subcomps.fits &
# Imports
from __future__ import division, unicode_literals, print_function
import subprocess
import os
import time
from string import ascii_uppercase
import astropy.io
from astropy.io import fits
from astropy.io.fits import getdata
from astropy.io.fits import getheader
from astropy.io.fits import getval
paramfile = r'expdisk_devauc.sh'
def replace_galfit_param(name, value, object_num=1, fixed=True):
"""Replace input galfit parameter file with new configuration.
Arguments:
name : parameter name, e.g. A-P, 1-10, 'Z'
value: new value for the parameter in string form. e.g. '20.0'
object_num: For A-Z object_num is 1
For objects, object_num starts from 1.
fixed: True means parameter will be fixed (0) during fitting.
NOTE: Keep fixed = False while using this function to vary the parameter.
"""
name, value = str(name), str(value)
with open(paramfile) as f:
gf_file = f.readlines()
# Location of param.
# 3rd column is where one can hold the parameters fixed (0) or allow vary 1
loc = [i for i in range(len(gf_file)) if
gf_file[i].strip().startswith(name + ')')][object_num - 1]
param_str = gf_file[loc]
comment = param_str.find('#')
if name in ascii_uppercase:
fmt = '{}) {} {}'
param_str = fmt.format(name, value, param_str[comment:])
else:
fmt = '{}) {} {} {}'
param_str = fmt.format(name, value, '0' if fixed else '1',
param_str[comment:])
gf_file[loc] = param_str
with open(paramfile, 'w') as f:
f.writelines(gf_file)
def run_galfit(galaxy, outdir, count):
"""Run galfit on the input galaxy and create model and residual images.
Runs galfit on the given input galaxies and creates model
and residue images in the output directory
galaxy : base name of input galaxy, e.g f606w or f814w
outdir : output directory, e.g. galfit_outputs
count : count number of galaxy, e.g. 0 for f606w_gal0.fits
Needs : galfit_outputs/two_components/bulge/
galfit_outputs/two_components/disk/
galfit_outputs/two_components/residual/
Note: 1. This program will also read the values of mag and rad from the
input fitsfile header, and updates the value in the
galfit paramfile 'sim2.feedme'.
2. it will also create the mask file using ic command.
"""
# galaxy = f606w or f814w
# path = '/Users/poudel/jedisim/simdatabase/colors'
path = '/Users/poudel/jedisim/simdatabase/galaxies'
ingal = path + '/' + galaxy + '_gal' + str(count) + '.fits'
psf = galaxy + '_psf.fits' # psf in the script directory
# get the value of magnitude, radius and mag0 of input galaxy
try:
mag = getval(ingal, 'MAG')
except:
mag = 20.0
try:
rad = getval(ingal, 'RADIUS')
except:
rad = 10.0
mag0 = getval(ingal, 'MAG0')
# create galfit paramfile according to the input galaxy
# For A-Z object_num is 1
# fixed=True means it is fixed and not changed
print("\n\n\n")
print('+' * 80)
print('+' * 80)
print('+' * 80)
print('{} {} {}'.format('Current Galaxy : ', ingal, ''))
print('+' * 80)
print('+' * 80)
print('+' * 80)
replace_galfit_param('A', ingal, object_num=1, fixed=False)
replace_galfit_param('D', psf, object_num=1, fixed=False)
replace_galfit_param('J', mag0, object_num=1, fixed=False)
replace_galfit_param('3', mag, object_num=1, fixed=False)
replace_galfit_param('4', rad, object_num=1, fixed=False)
replace_galfit_param('3', mag, object_num=2, fixed=False)
replace_galfit_param('4', rad, object_num=2, fixed=False)
# create mask file according to the input galaxy
cmd = "ic '1 0 %1 0 == ?' " + ingal + " > mask.fits"
subprocess.call(cmd, shell=True)
# For objects, object_num starts from 1
# 1 = expdisk, 2 = devauc
# run galfit
# rm -r imgblock.fits subcomps.fits galfit.01 # removes these files.
# galfit sim.feedme # gives galfit.01, imgblock.fits,if succeed.
# galfit -o3 galfit.01 # runs only when galfit.01 exists
# we can delete galfit.01 immediately after it it used.
cmd1 = 'rm -r imgblock.fits; galfit ' + paramfile
cmd2 = 'rm -r subcomps.fits; galfit -o3 galfit.01; rm -r galfit.01'
print("\n\n\n")
print('*' * 80)
print('Running: {}'.format(cmd1))
print('*' * 80)
subprocess.call(cmd1, shell=True) # gives galfit.01 if succeed
if os.path.exists('galfit.01'):
print("\n\n\n")
print('!' * 80)
print('Running: {}'.format(cmd2))
print('!' * 80)
subprocess.call(cmd2, shell=True)
# get residual map from imgblock.fits
# residual = outdir + '/residual/' + galaxy + '_res' + str(count) + '.fits'
# get devauc and expdisk models from subcomps.fits
# galaxy = f606w or f814w
# devauc = bulge and expdisk+residual = disk
# devauc = outdir + '/bulge/' + galaxy + '_bulge' + str(count) + '.fits'
expdisk = outdir + galaxy + '_disk' +\
str(count) + '.fits'
# extracting frames of imgblock.fits and subcomps.fits if they exists.
if os.path.isfile('subcomps.fits') and os.path.isfile('imgblock.fits'):
# for imgblock.fits : 0 is empty, 1 is input, 2 is model, 3 is residual
# dat_res, hdr_res = fits.getdata(r'imgblock.fits', ext=3, header=True)
# for subcomps.fits: 0 is input, 1 is expdisk, 2 is devauc etc.
dat_exp, hdr_exp = fits.getdata(r'subcomps.fits', ext=1, header=True)
# dat_dev, hdr_dev = fits.getdata(r'subcomps.fits', ext=2, header=True)
# fits.writeto(expdisk, dat_exp, hdr_exp, clobber=False)
# fits.writeto(residual, dat_res, hdr_res, clobber=True)
# fits.writeto(devauc, dat_dev, hdr_dev, clobber=True)
fits.writeto(expdisk, dat_exp, hdr_exp, clobber=True)
# print('{} {} {}'.format('Output file: ', expdisk, ''))
# print('{} {} {}'.format('Output file: ', residual, ''))
# print('{} {} {}'.format('Output file: ', devauc, ''))
print('{} {} {}'.format('Output file: ', expdisk, ''))
def main():
"""Main program."""
# output directory without '/' in the end
# range is from 0 to 101 and both f606w and f814w
galfit_outdir = 'disk_only_280_301/'
# there are 302 galaxies for each filter
# for count in list(range(101, 303)):
for count in range(280, 301):
run_galfit('f606w', galfit_outdir, count)
run_galfit('f814w', galfit_outdir, count)
if __name__ == '__main__':
# beginning time
program_begin_time = time.time()
begin_ctime = time.ctime()
# run main program
main()
# print the time taken
program_end_time = time.time()
end_ctime = time.ctime()
seconds = program_end_time - program_begin_time
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
print('\nBegin time: ', begin_ctime)
print('End time: ', end_ctime, '\n')
print("Time taken: {0:.0f} days, {1:.0f} hours, \
{2:.0f} minutes, {3:f} seconds.".format(d, h, m, s))
|
[
"[email protected]"
] | |
44f6551cecf87e0cc64db8a41ab7784033adc958
|
586e60b4bbf80e3da9c1051182a42cb81bb2ea1b
|
/scripts/generate-demo-users.py
|
787052a0fab94bece1059cc3565abb512a20e0bd
|
[
"Apache-2.0"
] |
permissive
|
DD-DeCaF/caffeine-bootstrap
|
daa0cb844fd694b87430451baee664d816e366a7
|
ec65cd5f135f86c7bf2faeb96930637e910c380f
|
refs/heads/master
| 2021-07-09T15:18:56.476754 | 2020-08-18T11:16:37 | 2020-08-18T11:16:37 | 161,489,310 | 1 | 0 |
Apache-2.0
| 2020-08-18T11:16:38 | 2018-12-12T13:03:41 |
Shell
|
UTF-8
|
Python
| false | false | 409 |
py
|
from iam.models import User, db
from iam.app import app, init_app
init_app(app, db)
app.app_context().push()
print("Adding user: demo@demo")
user = User(email="demo@demo")
user.set_password("demo")
db.session.add(user)
for i in range(40):
print(f"Adding user: demo{i}@demo (password demo)")
user = User(email=f"demo{i}@demo")
user.set_password("demo")
db.session.add(user)
db.session.commit()
|
[
"[email protected]"
] | |
a562ea5925bb853287c30692e331db3ad17821e2
|
8c42964a29af1d5a2f4541ab634b54e25a90b9f4
|
/Example2/configuration.py
|
5a64a7d9aada01e4a7e1e383119cbc7d566d617f
|
[] |
no_license
|
lenzip/CMSDataAnalysisSchoolPisa2019ScalarToWW
|
a21dc572ae2e152410a867ae5013703c886e4bbf
|
8cff1dea08887b78a9efc26a142609ba1b7ba296
|
refs/heads/master
| 2020-04-14T21:13:03.028961 | 2019-01-23T16:22:23 | 2019-01-23T16:22:23 | 164,121,564 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 803 |
py
|
# example of configuration file
tag = 'Inclusive'
# used by mkShape to define output directory for root files
outputDir = 'rootFile'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 35.867
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plotsInclusive'
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacardsInclusive'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
|
[
"[email protected]"
] | |
3c7c5139a5cd6dd8e33834de89b98fdd8bba4a33
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/length_20200529113854.py
|
76b776e2932e64a11975284ff9a772f9332ca676
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 431 |
py
|
def removeDuplicates(nums):
i = 0
while i <len(nums):
print(nums[i])
if nums[i] == nums[i+1]:
nums.remove(nums[i])
else:
nums.add(nums[i])
# for i in range(length):
# print('i--------->',i)
# for j in range(i+1,length):
# print('j----->',j)
removeDuplicates([1,1,2])
|
[
"[email protected]"
] | |
8097d71b8ebae32d7fdc01e7873b5ee6d6ad0fb4
|
c01ab71f681efdeb9f4e7d52ed083745b6d42590
|
/old/6th sem/cpp/TRIKA/test_modules/testCases.py
|
96b35814c7b3c3e9a1a25b8848bf226225f18b05
|
[] |
no_license
|
anant-pushkar/competetive_programming_codes
|
398a39c85a761c8d242f42f368933239a438ac06
|
127c67d7d4e2cef2d1f25189b6535606f4523af6
|
refs/heads/master
| 2021-01-20T11:57:07.528790 | 2014-11-14T08:29:21 | 2014-11-14T08:29:21 | 23,577,655 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 475 |
py
|
import testTemplate
'''number of test suites'''
nSuites=1
def getTests():
tests = []
suite=testTemplate.testSuite("Sample 1")
testcase = testTemplate.testInstance("4 4\n1 1\n100 55 10 2\n20 10 90 1\n60 20 22 4\n1 30 70 5" , "Y 23" , "")
suite.add(testcase)
tests.append(suite)
suite=testTemplate.testSuite("Sample 2")
testcase = testTemplate.testInstance("2 2\n1 1\n1 55 \n20 10 " , "N" , "")
suite.add(testcase)
tests.append(suite)
return tests
|
[
"[email protected]"
] | |
15fc22e8fd23bf75543afca8ce167e6017251fa0
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/decoding/GAD/fairseq/dataclass/constants.py
|
93bc6d03cb81618c47a58009dc22f7953a106eb3
|
[
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 |
MIT
| 2023-08-19T11:33:20 | 2019-07-23T04:15:28 |
Python
|
UTF-8
|
Python
| false | false | 1,626 |
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, EnumMeta
from typing import List
class StrEnumMeta(EnumMeta):
# this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see
# https://github.com/facebookresearch/hydra/issues/1156
@classmethod
def __instancecheck__(cls, other):
return "enum" in str(type(other))
class StrEnum(Enum, metaclass=StrEnumMeta):
def __str__(self):
return self.value
def __eq__(self, other: str):
return self.value == other
def __repr__(self):
return self.value
def __hash__(self):
return hash(str(self))
def ChoiceEnum(choices: List[str]):
"""return the Enum class used to enforce list of choices"""
return StrEnum("Choices", {k: k for k in choices})
LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"])
DDP_BACKEND_CHOICES = ChoiceEnum([
"c10d", # alias for pytorch_ddp
"legacy_ddp",
"no_c10d", # alias for legacy_ddp
"pytorch_ddp",
"slow_mo",
])
DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta"])
GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"])
GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum(
["unigram", "ensemble", "vote", "dp", "bs"]
)
ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"])
PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"])
PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"])
|
[
"[email protected]"
] | |
226980fdf20772f3a2d26e3b993584790ded886b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/299/100769/submittedfiles/testes.py
|
b90b88a3a2fbbabb9a6af0cc8e965ec6c94201cb
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,871 |
py
|
from minha_bib import verificar_vitoria
from minha_bib import sorteio
from minha_bib import sorteio2
from minha_bib import maquinainteligente
import time
c=0
tabuleiro=[[1,2,3],[1,2,3],[1,2,3]]
for i in range(0,3,1):
for j in range(0,3,1):
tabuleiro[i][j]=" "
print('---------------------------------------')
print('JOGO DA VELHA')
print('Olá\nSeja Bem Vindo ao jogo da velha!')
#JOGO ENTRE DUAS PESSOAS
nome1=str(input('Qual seu nome(ou apelido)? '))
'''nome2=str(input('Qual o nome do segundo jogador? '))'''
s1=str(input('Qual símbolo você deseja utilizar,'+nome1+'?[X/O]'))
if s1=='X':
s2='O'
'''print('Ok, vamos começar,'+nome2+' ficará com "O"')'''
else:
s2='X'
'''print('Ok, vamos começar,'+nome2+'ficará com "X"')'''
print('Esse é o nosso tabuleiro \n',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
print('Você vai me informar a casa que quer jogar com números.\n E cada um desses números representa as seguintes casas:')
print('00 | 01 | 02\n10 | 11 | 12\n20 | 21 | 22')
print('E aí eu vou lá e substituo a casa pelo seu símbolo, por exemplo:\nO você me informa a seguinte jogada: 22')
print('Eu vou lá e...')
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',s2)
print('----------------------------------------------')
#COMEÇO DO JOGO
inicio=sorteio(0,1)
if inicio==0:
inicio=str('Usuário')
else:
inicio=str('Máquina')
print('O vencedor do sorteio para incio foi '+inicio)
if inicio=='Usuário':
print('Então você começa')
k=0
while k<10:
k+=1
if k%2!=0:
jogada=str(input('Qual a sua jogada '+nome1+'?'))
i=jogada[0]
j=jogada[1]
i=int(i)
j=int(j)
while tabuleiro[i][j]!=" ":
print('Jogada inválida')
jogada=str(input('Qual a sua jogada?'))
i=jogada[0]
j=jogada[1]
i=int(i)
j=int(j)
tabuleiro[i][j]=s1
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
if verificar_vitoria(tabuleiro)==True:
print('PARABÉNS,VOCÊ VENCEU')
break
elif k%2==0:
print('Minha vez')
time.sleep(1)
x=str(maquinainteligente(tabuleiro))
i=int(x[0])
j=int(x[1])
while tabuleiro[i][j]!=' ':
i=int(sorteio2(0,2))
j=int(sorteio2(0,2))
tabuleiro[i][j]
tabuleiro[i][j]=s2
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
if verificar_vitoria(tabuleiro)==True:
print('Ahh, não foi dessa vez')
break
elif inicio=='Máquina':
print('Então eu começo')
for k in range(1,10,1):
if k%2!=0:
print('Minha vez')
time.sleep(1)
x=str(maquinainteligente(tabuleiro))
i=int(x[0])
j=int(x[1])
while tabuleiro[i][j]!=' ':
i=int(sorteio2(0,2))
j=int(sorteio2(0,2))
tabuleiro[i][j]
tabuleiro[i][j]=s2
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
if verificar_vitoria(tabuleiro)==True:
print('Ahh, não foi dessa vez')
break
elif k%2==0:
jogada=str(input('Qual a sua jogada '+nome1+'?'))
i=jogada[0]
j=jogada[1]
i=int(i)
j=int(j)
while tabuleiro[i][j]!=" ":
print('Jogada inválida')
jogada=str(input('Qual a sua jogada?'))
i=jogada[0]
j=jogada[1]
i=int(i)
j=int(j)
tabuleiro[i][j]=s1
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
if verificar_vitoria(tabuleiro)==True:
print('PARABÉNS,VOCÊ VENCEU')
break
elif k==9 and verificar_vitoria(tabuleiro)==False:
print('ihhhh, Deu velha')
|
[
"[email protected]"
] | |
891cf68c8f2e5a2d7b2c3c9baf3fd45f36ba1c93
|
3e3a835ee885eb9a71fd35ea58acd04361f72f47
|
/python基础/复习.py/石头剪刀布.py
|
df86dfa2ef1429a31cb3268c524f245a54ab4e82
|
[] |
no_license
|
hanfang302/py-
|
dbb259f24e06fbe1a900df53ae6867acb8cb54ea
|
dd3be494ccef5100c0f06ed936f9a540d8ca0995
|
refs/heads/master
| 2020-03-16T01:59:57.002135 | 2018-05-07T12:02:21 | 2018-05-07T12:02:21 | 132,454,341 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 337 |
py
|
player = int(input('请出拳 石头(1),剪刀(2),布(3):'))
computer = 2
if ((player == 1 and computer == 2) or
(player == 2 and computer == 3) or
(player == 3 and computer == 1)):
print('电脑输了')
elif player == computer:
print('心有灵犀,再来一局')
else:
print('不行,我要和你决战到底')
|
[
"[email protected]"
] | |
5b86d1ba8124f7ae022306cd7979e8aa97754314
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/HdrqkdT4r9DeKPjCM_15.py
|
b8f9cf3d649052ff9b6b798b8d9e233d02626467
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 400 |
py
|
def is_polygonal(n):
if n==1:
return "0th of all"
if n <= 3:
return False
list = []
for k in range(3, n):
i=1
current=k+1
while current < n:
i+=1
current += k*i
if current == n:
i = str(i)
i += "th" if i[-2:-1]=="1" else {"1":"st","2":"nd","3":"rd"}.get(i[-1],"th")
list.append("{ith} {k}-gonal number".format(ith=i,k=k))
return list
|
[
"[email protected]"
] | |
a2a3823e6435408a754b473b37f7233309d5ef3f
|
4754d6b05b7eb255983f58474164d8690f4d8684
|
/figurines/tests/test_views.py
|
4ad1ab56cb491358a3a1c8c3bb9812ce62ef1085
|
[] |
no_license
|
pythonmentor/benjamin-p13
|
4f629be3cd9b2e8af6934fb69dfca63d6a294346
|
ada744761d3a3c6ecde1aec5db20770960cb2146
|
refs/heads/master
| 2023-01-24T17:10:30.235330 | 2020-11-30T17:29:09 | 2020-11-30T17:29:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,155 |
py
|
from django.test import TestCase
from django.urls import reverse
from figurines.models import Category, DidYouSee, Figurine
from users.models import User
class FigurineTestViews(TestCase):
def setUp(self):
self.user_test = User.objects.create_user(
username="UserTest", password="PaswordOfTheTest&120"
)
category_figurine = Category.objects.create(
name="super heroes"
)
figurine = Figurine.objects.create(
figurine_number="1",
category=category_figurine,
name="batman"
)
figurine.user.add(self.user_test)
return super().setUp()
def test_figurine_add_figurine(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.post(
"/figurines/add_figurine/",
{"figurine_number": "31", "category": "World of Warcraft", "name": "Thrall"},
)
self.assertEqual(response.status_code, 302)
self.assertTemplateUsed('figurines/collection.html')
def test_figurine_collection_user(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.get('/figurines/collection/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/collection.html')
def test_figurine_search_with_all_figurines(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
user = User.objects.get(username="UserTest")
response = self.client.get('/figurines/search/?all=all')
user_figurine = user.figurine_set.all()
self.assertQuerysetEqual(
response.context['figurines_list'],
[repr(figurine) for figurine in user_figurine]
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/search.html')
def test_figurine_search_without_all_figurines(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
user = User.objects.get(username="UserTest")
user_figurine = user.figurine_set.all().delete()
response = self.client.get('/figurines/search/?all=all')
self.assertFalse(response.context['figurines_list'])
self.assertContains(response, 'Pas de résultat.')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/search.html')
def test_figurine_search_with_figurines(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
user = User.objects.get(username="UserTest")
response = self.client.get('/figurines/search/?q=batman')
user_figurine = user.figurine_set.filter(name__icontains='batman')
self.assertQuerysetEqual(
response.context['figurines_list'],
[repr(figurine) for figurine in user_figurine]
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/search.html')
def test_figurine_search_without_all_figurines(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
user = User.objects.get(username="UserTest")
user_figurine = user.figurine_set.filter(name__icontains='batman').delete()
response = self.client.get('/figurines/search/?q=batman')
self.assertFalse(response.context['figurines_list'])
self.assertContains(response, 'Pas de résultat.')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/search.html')
def test_figurine_did_you_see(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.get("/figurines/did_you_see/")
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed("figurines/did_you_see.html")
def test_create_question(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.post(
"/figurines/create_question",
{
"title": "Je recherche batman",
"text": "Bonjour, je recherche Batman",
"date": "03/07/2020",
},
)
self.assertRedirects(response, '/figurines/did_you_see/')
response = self.client.get('/figurines/did_you_see/')
self.assertContains(response, 'Je recherche batman')
self.assertTemplateUsed('figurines/did_you_see.html')
def test_can_respond_to_question(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.post(
"/figurines/create_question",
{
"title": "Je recherche batman2",
"text": "Bonjour, je recherche Batman2",
"date": "03/07/2020",
},
)
post = DidYouSee.objects.get(title='Je recherche batman2')
response_second_message = self.client.post(
f"/figurines/create_question/{post.id}",
{
"title": "J'ai batman2",
"text": "j'ai batman",
"date": "20/07/2020",
}
)
response_detail = self.client.get(f'/figurines/post_detail/{post.id}/')
self.assertContains(response_detail, "j'ai batman")
self.assertTemplateUsed('figurines/post_detail.html')
def test_post_detail(self):
self.client.force_login(self.user_test)
user = User.objects.get(username="UserTest")
post = DidYouSee(
author=user,
title="Je recherche batman",
text="Bonjour, j'ai trouvé Batman",
)
post.save()
post.parent = post
post.save()
response = self.client.get(
f"/figurines/post_detail/{post.id}"
)
self.assertContains(response, "Je recherche batman")
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/post_detail.html')
"""
def test_delete_figurine(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.post('/figurines/collection/?q=logan')
user = User.objects.get(username="UserTest")
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/collection.html')
"""
# def test_report_post(self):
# self.client.login(username="UserTest", password="PaswordOfTheTest&120")
# response = self.client.post(
# "/figurines/post_detail/51/",
# {
# "title": "Je recherche batman",
# "text": "Bonjour, j'ai trouvé Batman",
# },
# )
# self.assertEqual(response.status_code, 200)
# self.assertTemplateUsed('figurines/report_post.html')
|
[
"[email protected]"
] | |
6ad1ec33ed60cb67164cba8e6c216bf23b7eff14
|
09592939eaf88d46f7d2d760d9587cb9fc22707e
|
/entity/cards/LETLT_083/LETLT_083.py
|
c575c2ef97600aa10d16c30ba708043ebfac001e
|
[
"MIT"
] |
permissive
|
fulln/lushi_script
|
5deb2fb99956988ee4884836443f74277b361939
|
f2c5250f6ce7e3ea2b8d3ba280d999ae8c7beb8b
|
refs/heads/main
| 2023-09-04T16:50:24.696142 | 2021-11-24T03:44:41 | 2021-11-24T03:44:41 | 431,565,901 | 0 | 0 |
MIT
| 2021-11-24T17:04:06 | 2021-11-24T17:04:05 | null |
UTF-8
|
Python
| false | false | 470 |
py
|
# -*- coding: utf-8 -*-
from hearthstone.entities import Entity
from entity.spell_entity import SpellEntity
class LETLT_083(SpellEntity):
"""
剧烈爆发
对本回合中已经行动过的敌人造成10点伤害。在下一场战斗开始时,重复此伤害。
"""
def __init__(self, entity: Entity):
super().__init__(entity)
self.damage = 0
self.range = 0
def play(self, game, hero, target):
pass
|
[
"[email protected]"
] | |
92d3f6d6dc1e477f6b89f1665b180bf5ab4360da
|
968913bda3879ef316100410cdb2b01333ac14a8
|
/004_Algorithm_Implementations_In_Python/data_structures/queue/queue_on_list.py
|
898ffac3a9c7c1fda92bb8b75af1826ee7ec17f0
|
[
"MIT"
] |
permissive
|
sm2774us/2021_Interview_Prep
|
02b6a81ee52f3cb14d9e060839a01aadd84e231f
|
c6689411a4334d53c88581a296e57c314b50f46c
|
refs/heads/main
| 2023-03-02T05:30:17.156821 | 2021-01-26T04:31:02 | 2021-01-26T04:31:02 | 332,603,676 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,213 |
py
|
"""Queue represented by a python list"""
class Queue():
def __init__(self):
self.entries = []
self.length = 0
self.front=0
def __str__(self):
printed = '<' + str(self.entries)[1:-1] + '>'
return printed
"""Enqueues {@code item}
@param item
item to enqueue"""
def put(self, item):
self.entries.append(item)
self.length = self.length + 1
"""Dequeues {@code item}
@requirement: |self.length| > 0
@return dequeued
item that was dequeued"""
def get(self):
self.length = self.length - 1
dequeued = self.entries[self.front]
#self.front-=1
#self.entries = self.entries[self.front:]
self.entries = self.entries[1:]
return dequeued
"""Rotates the queue {@code rotation} times
@param rotation
number of times to rotate queue"""
def rotate(self, rotation):
for i in range(rotation):
self.put(self.get())
"""Enqueues {@code item}
@return item at front of self.entries"""
def front(self):
return self.entries[0]
"""Returns the length of this.entries"""
def size(self):
return self.length
|
[
"[email protected]"
] | |
09b392b45aef0ce2b082eaa210be15285a463e0c
|
45015c94a4376a4af66e4134f0552288cd15a2d8
|
/services/authentication_service.py
|
ee9f1e65813dcf31637b0a0974cb9c00e4c7b390
|
[] |
no_license
|
Anubhav722/trello
|
971111af8cbc1f6c344ace200e2741e809e9a1fa
|
600b5410cde7fd2a51720fa4ca7cc2ecfbff322e
|
refs/heads/master
| 2023-07-13T18:24:51.937539 | 2021-08-21T13:22:17 | 2021-08-21T13:22:17 | 398,563,384 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 280 |
py
|
class AuthenticationService:
def __init__(self, ttl):
self.tokens = {} # Map<token_id, user_obj>
def renew_token(self, token_id):
pass
def authenticate_request(self, token_id, timestamp):
pass
def register_user(self, ):
pass
|
[
"[email protected]"
] | |
b3afdc5ed5a2cd8de578e1fd31eb490f17a5db95
|
2455062787d67535da8be051ac5e361a097cf66f
|
/Producers/BSUB/TrigProd_amumu_a5_dR5/trigger_amumu_producer_cfg_TrigProd_amumu_a5_dR5_499.py
|
14a070c95d6dc5d7822dce37415383786cbf8e82
|
[] |
no_license
|
kmtos/BBA-RecoLevel
|
6e153c08d5ef579a42800f6c11995ee55eb54846
|
367adaa745fbdb43e875e5ce837c613d288738ab
|
refs/heads/master
| 2021-01-10T08:33:45.509687 | 2015-12-04T09:20:14 | 2015-12-04T09:20:14 | 43,355,189 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,360 |
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
#process.load("BBA/Analyzer/bbaanalyzer_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load('Configuration.EventContent.EventContent_cff')
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.load("Configuration.StandardSequences.MagneticField_cff")
####################
# Message Logger
####################
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
## switch to uncheduled mode
process.options.allowUnscheduled = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
####################
# Input File List
####################
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('root://eoscms//eos/cms/store/user/ktos/RECO_Step3_amumu_a5/RECO_Step3_amumu_a5_499.root'),
secondaryFileNames = cms.untracked.vstring()
)
############################################################
# Defining matching in DeltaR, sorting by best DeltaR
############################################################
process.mOniaTrigMatch = cms.EDProducer("PATTriggerMatcherDRLessByR",
src = cms.InputTag( 'slimmedMuons' ),
matched = cms.InputTag( 'patTrigger' ), # selections of trigger objects
matchedCuts = cms.string( 'type( "TriggerMuon" ) && path( "HLT_Mu16_TkMu0_dEta18_Onia*")' ), # input does not yet have the 'saveTags' parameter in HLT
maxDPtRel = cms.double( 0.5 ), # no effect here
maxDeltaR = cms.double( 0.3 ), #### selection of matches
maxDeltaEta = cms.double( 0.2 ), # no effect here
resolveAmbiguities = cms.bool( True ),# definition of matcher output
resolveByMatchQuality = cms.bool( True )# definition of matcher output
)
# talk to output module
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("file:RECO_Step3_amumu_a5_TrigProd_499.root"),
outputCommands = process.MINIAODSIMEventContent.outputCommands
)
process.out.outputCommands += [ 'drop *_*_*_*',
'keep *_*slimmed*_*_*',
'keep *_pfTausEI_*_*',
'keep *_hpsPFTauProducer_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep *_TriggerResults_*_HLT',
'keep *_patTrigger*_*_*',
'keep *_prunedGenParticles_*_*',
'keep *_mOniaTrigMatch_*_*'
]
################################################################################
# Running the matching and setting the the trigger on
################################################################################
from PhysicsTools.PatAlgos.tools.trigTools import *
switchOnTrigger( process ) # This is optional and can be omitted.
switchOnTriggerMatching( process, triggerMatchers = [ 'mOniaTrigMatch'
])
process.outpath = cms.EndPath(process.out)
|
[
"[email protected]"
] | |
52cf3aac7e139b3a4d760b80cc223a9bd88e323d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03496/s023860422.py
|
3418e271fe6d39c5afd0834fa668eb6252fedf15
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 553 |
py
|
n = int(input())
a = list(map(int,input().split()))
mi = a[0]
mii = 1
ma = a[0]
mai = 1
for i in range(n):
if a[i] > ma:
ma = a[i]
mai = i+1
if a[i] < mi:
mi = a[i]
mii = i+1
if mi >= 0:
print(n-1)
for i in range(1,n):
print(i,i+1)
elif ma <= 0:
print(n-1)
for i in range(n,1,-1):
print(i,i-1)
elif abs(ma) >= abs(mi):
print(n*2-1)
for i in range(n):
print(mai,i+1)
for i in range(1,n):
print(i,i+1)
else:
print(n*2-1)
for i in range(n):
print(mii,i+1)
for i in range(n,1,-1):
print(i,i-1)
|
[
"[email protected]"
] | |
f86f1440c1dfce4772c26f8bd9d40aeb6c368956
|
27a066c48096e30e3cf4a795edf6e8387f63728b
|
/mysite/django_vises/runtimes/misc.py
|
dbb4cc342ce1012cbf1a9397f2dea0e09cf202d4
|
[] |
no_license
|
26huitailang/django-tutorial
|
2712317c3f7514743e90fb4135e5fe3fed5def90
|
28a0b04ee3b9ca7e2d6e84e522047c63b0d19c8f
|
refs/heads/master
| 2023-01-07T11:55:37.003245 | 2019-09-04T09:19:50 | 2019-09-04T09:19:50 | 113,199,279 | 1 | 0 | null | 2023-01-03T15:24:01 | 2017-12-05T15:27:52 |
Python
|
UTF-8
|
Python
| false | false | 2,885 |
py
|
#!/usr/bin/env python
# coding=utf-8
# import glob
import os
import operator
from django.utils.six import text_type
# copy from rest_framework
# Header encoding (see RFC5987)
HTTP_HEADER_ENCODING = 'iso-8859-1'
def get_request_client_ip_address(request):
"""获取 request 请求来源 ip address, 支持 nginx 使用 X-Real-IP/X-FORWARDED-FOR 传递来源 ip 地址
"""
ip = request.META.get('X-Real-IP') or request.META.get('HTTP_X_FORWARDED_FOR')
if ip:
ip = ip.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_authorization_header(request):
"""
Return request's 'Authorization:' header, as a bytestring.
Hide some test client ickyness where the header can be unicode.
"""
auth = request.META.get('HTTP_AUTHORIZATION', b'')
if isinstance(auth, text_type):
# Work around django test client oddness
auth = auth.encode(HTTP_HEADER_ENCODING)
return auth
def get_authorization_token_from_header(request):
"""
Return request's 'Authorization:' token
"""
keyword = 'Token'
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != keyword.lower().encode():
return None
# if len(auth) == 1:
# msg = _('Invalid token header. No credentials provided.')
# raise exceptions.AuthenticationFailed(msg)
# elif len(auth) > 2:
# msg = _('Invalid token header. Token string should not contain spaces.')
# raise exceptions.AuthenticationFailed(msg)
#
# try:
# token = auth[1].decode()
# except UnicodeError:
# msg = _('Invalid token header. Token string should not contain invalid characters.')
# raise exceptions.AuthenticationFailed(msg)
if len(auth) != 2:
return None
try:
token = auth[1].decode()
except UnicodeError:
return None
return token
def str_to_boolean(text):
"""将字符转为布尔值,if条件可以扩展"""
if text.lower() in ['false']:
return False
elif text.lower() in ['true']:
return True
def sort_dict_list(dict_to_sort: dict = None, sort_key='', reverse=False) -> list:
sorted_list = sorted(dict_to_sort, key=operator.itemgetter(sort_key), reverse=reverse)
return sorted_list
def get_local_suite_img_list(suite_path: str = None, format='jpg') -> list:
"""获取本地suite的图片列表"""
if suite_path is None:
return []
# 复杂的路径glob匹配不了
# img_file_list = glob.glob('{}/*.{}'.format(suite_path, format))
files_list = os.listdir(suite_path)
img_file_list = list(filter(lambda x: x.endswith(format), files_list))
return img_file_list
def get_local_suite_count(suite_path: str = None) -> int:
"""本地suite图片数量"""
return len(get_local_suite_img_list(suite_path))
|
[
"[email protected]"
] | |
9c7d677d074b5d250abc200c103cff8fb806b269
|
df94f543424f47f87bd6d546cca23d1c5a7b024c
|
/easy/easy922.py
|
f8409367478a0930ddc49d9bb4bc49ab8b62ce17
|
[] |
no_license
|
wangpeibao/leetcode-python
|
c13cb63304e91dcd55ffacee541d9197cafd01ff
|
392a272a799decdd77c2410a89787ea8e1aa76d3
|
refs/heads/master
| 2023-01-31T05:09:34.850459 | 2020-12-04T03:25:21 | 2020-12-04T03:25:21 | 257,457,585 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,263 |
py
|
'''
922. 按奇偶排序数组 II
给定一个非负整数数组 A, A 中一半整数是奇数,一半整数是偶数。
对数组进行排序,以便当 A[i] 为奇数时,i 也是奇数;当 A[i] 为偶数时, i 也是偶数。
你可以返回任何满足上述条件的数组作为答案。
示例:
输入:[4,2,5,7]
输出:[4,5,2,7]
解释:[4,7,2,5],[2,5,4,7],[2,7,4,5] 也会被接受。
提示:
2 <= A.length <= 20000
A.length % 2 == 0
0 <= A[i] <= 1000
'''
from typing import List
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
# 双指针
start = 0
length = len(A)
while start < length:
if (start % 2 == 0 and A[start] % 2 == 0) or (start % 2 == 1 and A[start] % 2 == 1):
start += 1
continue
# 处理找到下个不符合的
end = start + 1
while end < length:
if (end % 2 == 0 and A[end] % 2 == 0) or (end % 2 == 1 and A[end] % 2 == 1):
end += 2
else:
A[start], A[end] = A[end], A[start]
start = start + 2
return A
so = Solution()
print(so.sortArrayByParityII([4,2,5,7]) == [4,5,2,7])
|
[
"[email protected]"
] | |
61ee902f9aec9bdeff25f6e72569396187f62aff
|
01afa0be1c3acbf562fd87bd8fec8b4101c1e461
|
/Mining-Massive-Dataset/week5/advanced_quiz3.py
|
8c96a6d7d682c7d9d8f2ec6fe73c3b09bf879b97
|
[] |
no_license
|
listiani13/coursera
|
e4f1116cc619b62336c5bb4d2e714e7051ae775c
|
5c84cf7171a440261de639b53558e9767b1cd85e
|
refs/heads/master
| 2021-01-22T03:54:31.657656 | 2016-04-04T11:07:25 | 2016-04-04T11:07:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 424 |
py
|
from math import sqrt
def euclidean(x, y):
return sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)
points = [(1, 6), (3, 7), (4, 3), (7, 7), (8, 2), (9, 5)]
chosen = [(0, 0), (10, 10)]
for _ in range(5):
pos, mx = -1, -1
for i, p in enumerate(points):
distance = min([euclidean(p, pc) for pc in chosen])
if distance > mx:
mx, pos = distance, i
print 'choose:', points[pos]
chosen.append(points[pos])
del points[pos]
|
[
"[email protected]"
] | |
f32b08a5dadf9bf4dbc0b238e4cb160e93b689f5
|
3a01d6f6e9f7db7428ae5dc286d6bc267c4ca13e
|
/pylith/meshio/OutputMatElastic.py
|
75bd619e57bb719fa4f7cc5e470df1ff774171da
|
[
"MIT"
] |
permissive
|
youngsolar/pylith
|
1ee9f03c2b01560706b44b4ccae99c3fb6b9fdf4
|
62c07b91fa7581641c7b2a0f658bde288fa003de
|
refs/heads/master
| 2020-12-26T04:04:21.884785 | 2014-10-06T21:42:42 | 2014-10-06T21:42:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,792 |
py
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2014 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pyre/meshio/OutputMatElastic.py
##
## @brief Python object for managing output of finite-element
## information for material state variables.
##
## Factory: output_manager
from OutputManager import OutputManager
# OutputMatElastic class
class OutputMatElastic(OutputManager):
"""
Python object for managing output of finite-element information for
material state variables.
Factory: output_manager
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(OutputManager.Inventory):
"""
Python object for managing OutputMatElastic facilities and properties.
"""
## @class Inventory
## Python object for managing OutputMatElastic facilities and properties.
##
## \b Properties
## @li \b cell_info_fields Names of cell info fields to output.
## @li \b cell_data_fields Names of cell data fields to output.
##
## \b Facilities
## @li None
import pyre.inventory
cellInfoFields = pyre.inventory.list("cell_info_fields",
default=["mu",
"lambda",
"density"])
cellInfoFields.meta['tip'] = "Names of cell info fields to output."
cellDataFields = pyre.inventory.list("cell_data_fields",
default=["total_strain", "stress"])
cellDataFields.meta['tip'] = "Names of cell data fields to output."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="outputmatelastic"):
"""
Constructor.
"""
OutputManager.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based using inventory.
"""
OutputManager._configure(self)
self.vertexInfoFields = []
self.vertexDataFields = []
self.cellInfoFields = self.inventory.cellInfoFields
self.cellDataFields = self.inventory.cellDataFields
return
# FACTORIES ////////////////////////////////////////////////////////////
def output_manager():
"""
Factory associated with OutputManager.
"""
return OutputMatElastic()
# End of file
|
[
"[email protected]"
] | |
5bd234d032a1cef724c7d19f94ecdca75497c3b5
|
803bab6f782099d995bcdb99d163486f4fff8c50
|
/test/test_pointnav_resnet_policy.py
|
f58a4a45e857196c0ab6b215a39c3fce54de9832
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-NC-SA-3.0"
] |
permissive
|
facebookresearch/habitat-lab
|
7088506509f64da6d682f5dc69427589f71a58a9
|
f5b29e62df0788d70ba3618fc738fa4e947428ba
|
refs/heads/main
| 2023-08-24T14:00:02.707343 | 2023-08-23T04:53:48 | 2023-08-23T04:53:48 | 169,164,391 | 792 | 298 |
MIT
| 2023-09-14T15:20:03 | 2019-02-04T23:12:51 |
Python
|
UTF-8
|
Python
| false | false | 4,432 |
py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path
import shlex
import subprocess
import numpy as np
import pytest
import torch
from gym import spaces
from habitat import read_write
from habitat_baselines.config.default import get_config
from habitat_baselines.rl.ddppo.policy import PointNavResNetPolicy
ACTION_SPACE = spaces.Discrete(4)
OBSERVATION_SPACES = {
"depth_model": spaces.Dict(
{
"depth": spaces.Box(
low=0,
high=1,
shape=(256, 256, 1),
dtype=np.float32,
),
"pointgoal_with_gps_compass": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
),
}
),
"rgb_model": spaces.Dict(
{
"rgb": spaces.Box(
low=0,
high=255,
shape=(256, 256, 3),
dtype=np.uint8,
),
"pointgoal_with_gps_compass": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
),
}
),
"blind_model": spaces.Dict(
{
"pointgoal_with_gps_compass": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
),
}
),
}
MODELS_DEST_DIR = "data/ddppo-models"
MODELS_BASE_URL = "https://dl.fbaipublicfiles.com/habitat/data/baselines/v1/ddppo/ddppo-models"
MODELS_TO_TEST = {
"gibson-2plus-resnet50.pth": {
"backbone": "resnet50",
"observation_space": OBSERVATION_SPACES["depth_model"],
"action_space": ACTION_SPACE,
},
"gibson-2plus-mp3d-train-val-test-se-resneXt50-rgb.pth": {
"backbone": "se_resneXt50",
"observation_space": OBSERVATION_SPACES["rgb_model"],
"action_space": ACTION_SPACE,
},
"gibson-0plus-mp3d-train-val-test-blind.pth": {
"backbone": None,
"observation_space": OBSERVATION_SPACES["blind_model"],
"action_space": ACTION_SPACE,
},
}
def _get_model_url(model_name):
return f"{MODELS_BASE_URL}/{model_name}"
def _get_model_path(model_name):
return f"{MODELS_DEST_DIR}/{model_name}"
@pytest.fixture(scope="module", autouse=True)
def download_data():
for model_name in MODELS_TO_TEST:
model_url = _get_model_url(model_name)
model_path = _get_model_path(model_name)
if not os.path.exists(model_path):
print(f"Downloading {model_name}.")
download_command = (
"wget --continue " + model_url + " -P " + MODELS_DEST_DIR
)
subprocess.check_call(shlex.split(download_command))
assert os.path.exists(
model_path
), "Download failed, no package found."
@pytest.mark.parametrize(
"pretrained_weights_path,backbone,observation_space,action_space",
[
(
_get_model_path(model_name),
params["backbone"],
params["observation_space"],
params["action_space"],
)
for model_name, params in MODELS_TO_TEST.items()
],
)
def test_pretrained_models(
pretrained_weights_path, backbone, observation_space, action_space
):
config = get_config(
"test/config/habitat_baselines/ddppo_pointnav_test.yaml"
)
with read_write(config):
ddppo_config = config.habitat_baselines.rl.ddppo
ddppo_config.pretrained = True
ddppo_config.pretrained_weights = pretrained_weights_path
if backbone is not None:
ddppo_config.backbone = backbone
policy = PointNavResNetPolicy.from_config(
config=config,
observation_space=observation_space,
action_space=action_space,
)
pretrained_state = torch.load(pretrained_weights_path, map_location="cpu")
prefix = "actor_critic."
policy.load_state_dict(
{ # type: ignore
k[len(prefix) :]: v
for k, v in pretrained_state["state_dict"].items()
}
)
|
[
"[email protected]"
] | |
8562913d19df6e29366246a74cfb3818c2b42ba8
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/pygame/pygameweb/pygameweb/config.py
|
93d8fc8e758f4623cd6c55d2070b53f047f96a2d
|
[
"BSD-2-Clause"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:3ead591f9a215ef79ce67657f0809e549584a72ef37757eb3272ca4fbba1ab78
size 2948
|
[
"[email protected]"
] | |
fefc253d22ba5bb0ef9b94bef1230f18761a0a2b
|
afa456bb3792e433d84684260cdce1dbc6302cde
|
/authors/apps/tests/test_validation.py
|
d124f479a99ca4cf8c7e3e77f3b359a31f4e9213
|
[
"BSD-3-Clause"
] |
permissive
|
andela/ah-backend-poseidon
|
23ac16e9fcdce49f78df04126f9f486b8c39ebd4
|
d2b561e83ed1e9a585853f4a4e2e37805e86c35c
|
refs/heads/develop
| 2022-12-09T07:38:04.843476 | 2019-07-19T13:44:13 | 2019-07-19T13:44:13 | 158,799,017 | 1 | 4 |
BSD-3-Clause
| 2022-12-08T01:19:16 | 2018-11-23T07:55:00 |
Python
|
UTF-8
|
Python
| false | false | 3,979 |
py
|
from .base import BaseTestCase
from rest_framework import status
from authors.apps.authentication.models import User
from . import (new_user, data2, invalid_email, invalid_password,
short_password, dup_username, user_login)
class AccountTests(BaseTestCase):
"""handles user registration tests"""
def test_new_user_registration(self):
"""check if new user can be registered"""
response = self.register_user(new_user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIn("token", response.data)
def test_user_login(self):
"""new user can be logged in\
and token returned on successful login"""
self.verify_user(new_user)
response = self.login_user(user_login)
#raise Exception(response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("token", response.data)
def test_wrong_token_header_prefix(self):
"""invalid prefix header provided"""
self.client.credentials(HTTP_AUTHORIZATION='hgfds ' + 'poiuytfd')
response = self.client.get("/api/user/", format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_for_invalid_token(self):
"""validates token"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + 'yyuug')
response = self.client.get("/api/user/", format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_no_token_in_header(self):
"""no token in header"""
self.add_credentials(response='')
response = self.client.get("/api/user/", format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_super_user(self):
"""checks for registration of a super user in the User model"""
user = User.objects.create_superuser(
username='ayebare',
password='sampletestcase')
self.assertIn(str(user), str(user.username))
def test_create_non_user(self):
"""check for registration of a client user in the User model"""
user = User.objects.create_user(
email='[email protected]',
username='ayebare',
password='sampletestcase')
self.assertIn(str(user), str(user.email))
def test_get_user_details(self):
"""get user details"""
self.user_access()
response = self.client.get('/api/user/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_user_details(self):
"""assert update route for user details is accessed"""
self.user_access()
response = self.client.put('/api/user/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_invalid_email_message(self):
"""test invalid email provided."""
response = self.register_user(invalid_email)
self.assertIn(response.data["errors"]["email"][0],
'Please enter a valid email in the format [email protected]')
def test_invalid_password(self):
"""asserts invalid password provided."""
response = self.register_user(invalid_password)
self.assertIn(response.data["errors"]["password"][0],
'Password should be alphanuemric (a-z,A_Z,0-9).')
def test_short_password(self):
"""test short password provided."""
response = self.register_user(short_password)
self.assertIn(response.data["errors"]["password"][0],
'Password should not be less than 8 characters.')
def test_duplicate_username(self):
"user with same username provided exists"""
self.register_user(new_user)
response = self.register_user(dup_username)
self.assertIn(response.data["errors"]["username"][0],
'user with this username already exists.')
|
[
"[email protected]"
] | |
d31cbc5e81c667f85f43dbf60c55f2703673fc8c
|
5e66a11717a4760646c0e02bf9ffff2f82f66d18
|
/chemistry/qchem_make_opt_input_from_opt.py
|
ca09c35ad793d6cf3c29ac90a3ae9a121f288104
|
[] |
no_license
|
berquist/personal_scripts
|
4517678fa57e524e9765dc71f05594e34bdd9c72
|
d6c40ba6e5a607d26ffabf809cfdfdf3ce29bfb3
|
refs/heads/master
| 2023-07-21T08:44:36.401893 | 2023-07-07T19:55:55 | 2023-07-07T19:55:55 | 37,238,106 | 7 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,017 |
py
|
#!/usr/bin/env python
"""qchem_make_opt_input_from_opt.py: Make an input file for a Q-Chem
geometry optimization based on the last possible geometry from a
Q-Chem geometry optimization; this effectively 'restarts' the geometry
with a new filename.
The script assumes the output file being read from is called
'*opt(\d*).out', where 'opt' might be followed by a number. The script
will write an input file called '*opt(\d*)+1.in', with the previous
number incremented by one.
"""
import os.path
import re
from collections import OrderedDict
import cclib
from cclib.parser.utils import PeriodicTable
def make_file_iterator(filename):
"""Return an iterator over the contents of the given file name."""
# pylint: disable=C0103
with open(filename) as f:
contents = f.read()
return iter(contents.splitlines())
def getargs():
"""Get command-line arguments."""
import argparse
# pylint: disable=C0103
parser = argparse.ArgumentParser()
parser.add_argument("outputfilename", nargs="+")
parser.add_argument("--fragment", action="store_true")
args = parser.parse_args()
return args
def parse_user_input(outputfilename):
"""Parse the $rem section in the repeated 'User input:' section of the
output.
The reason we do it this way rather than with shell tools is to
handle any $section more easily and in a case-insensitive manner.
"""
user_input = dict()
outputfile = make_file_iterator(outputfilename)
line = ""
while "User input:" not in line:
line = next(outputfile)
line = next(outputfile)
assert "----" in line
line = next(outputfile)
while "--------------------------------------------------------------" not in line:
if line.strip() == "":
pass
elif line[0] == "$" and line.strip().lower() != "$end":
section_header = line[1:].lower()
user_input[section_header] = []
elif line.strip().lower() == "$end":
user_input[section_header] = "\n".join(user_input[section_header])
else:
user_input[section_header].append(line)
line = next(outputfile)
return user_input
def parse_fragments_from_molecule(molecule):
"""Given a $molecule section (without the $ lines), identify the
charges and multiplicities of each fragment and the zero-based indices
for the starting atom of each fragment.
"""
charges = []
multiplicities = []
start_indices = []
it = iter(molecule.splitlines())
line = next(it)
# sys_charge, sys_multiplicity = line.split()
counter = 0
# Gather the charges, spin multiplicities, and starting positions
# of each fragment.
for line in it:
if "--" in line:
line = next(it)
charge, multiplicity = line.split()
charges.append(charge)
multiplicities.append(multiplicity)
start_indices.append(counter)
else:
counter += 1
assert len(charges) == len(multiplicities) == len(start_indices)
return charges, multiplicities, start_indices
def form_molecule_section_from_fragments(
elements, geometry, charges, multiplicities, start_indices
):
"""Form the Q-Chem $molecule section containing the charge,
multiplicity, and atomic symbols and coordinates for multiple
fragments.
Returns a list that will need to be joined with newlines.
"""
assert len(charges) == len(multiplicities) == (len(start_indices) + 1)
s = "{:3s} {:15.10f} {:15.10f} {:15.10f}"
# The first elements of the charge and multiplicity lists are for
# the supersystem (whole molecule).
molecule_section = ["{} {}".format(charges[0], multiplicities[0])]
from itertools import count
for (charge, multiplicity, idx_iter) in zip(charges[1:], multiplicities[1:], count(0)):
molecule_section.append("--")
molecule_section.append("{} {}".format(charge, multiplicity))
idx_start = start_indices[idx_iter]
try:
idx_end = start_indices[idx_iter + 1]
except IndexError:
idx_end = len(elements)
for element, coords in zip(elements[idx_start:idx_end], geometry[idx_start:idx_end]):
molecule_section.append(s.format(element, *coords))
return molecule_section
def form_molecule_section(elements, geometry, charge, multiplicity):
"""Form the Q-Chem $molecule section containing the charge,
multiplicity, and atomic symbols and coordinates.
Returns a list that will need to be joined with newlines.
"""
s = "{:3s} {:15.10f} {:15.10f} {:15.10f}"
molecule_section = ["{} {}".format(charge, multiplicity)]
for (
element,
coords,
) in zip(elements, geometry):
molecule_section.append(s.format(element, *coords))
return molecule_section
if __name__ == "__main__":
args = getargs()
pt = PeriodicTable()
for outputfilename in args.outputfilename:
job = cclib.io.ccopen(outputfilename)
assert isinstance(job, cclib.parser.qchemparser.QChem)
try:
data = job.parse()
# this is to deal with the Q-Chem parser not handling
# incomplete SCF cycles properly
except StopIteration:
print("no output made: StopIteration in {}".format(outputfilename))
continue
# Determine the name of the file we're writing.
assert outputfilename.endswith(".out")
numstr = re.search(r"opt(\d*)", outputfilename).groups()[0]
if numstr == "":
optnum = 2
else:
optnum = int(numstr) + 1
inputfilename = re.sub(r"opt\d*", "opt{}".format(optnum), outputfilename)
inputfilename = inputfilename.replace(".out", ".in")
inputfilename = os.path.basename(inputfilename)
user_input = parse_user_input(outputfilename)
# Form the atomic symbols and coordinates for each atom in
# $molecule.
element_list = [pt.element[Z] for Z in data.atomnos]
last_geometry = data.atomcoords[-1]
if args.fragment:
charges, multiplicities, start_indices = parse_fragments_from_molecule(
user_input["molecule"]
)
charges.insert(0, data.charge)
multiplicities.insert(0, data.mult)
molecule_section = form_molecule_section_from_fragments(
element_list, last_geometry, charges, multiplicities, start_indices
)
else:
molecule_section = form_molecule_section(
element_list, last_geometry, data.charge, data.mult
)
user_input["molecule"] = "\n".join(molecule_section)
with open(inputfilename, "w") as fh:
for section_header in user_input:
fh.write("${}\n".format(section_header))
fh.write(user_input[section_header])
fh.write("\n$end\n\n")
print(inputfilename)
|
[
"[email protected]"
] | |
baf6d43bb76cf966f9aafce6ee12d8dd8e818f72
|
a74cabbe1b11fc8ef575ea86f2543cd95db78ec9
|
/python_program/q783_Minimum_Distance_Between_BST_Nodes.py
|
4e4fe499e62126c9b084fde6bd89e951b18accbf
|
[] |
no_license
|
tszandy/leetcode
|
87e3ccf291b2879637d2d8238935a455b401a78a
|
f1f4361541dcffbb291285663c8820d7ffb37d2f
|
refs/heads/master
| 2023-04-06T15:34:04.847875 | 2023-03-26T12:22:42 | 2023-03-26T12:22:42 | 204,069,234 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,628 |
py
|
from typing import List
from collections import Counter,defaultdict
from math import *
from functools import reduce,lru_cache,total_ordering
import numpy as np
from heapq import *
from bisect import bisect_left,bisect_right
from itertools import count
import queue
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def minDiffInBST(self, root: Optional[TreeNode]) -> int:
return self.min_difference(root)
def min_difference(self,node):
if node == None:
return float("inf")
node_left_min = float("inf")
if node.left!=None:
node_left_min = node.val-self.max_left(node.left)
node_right_min = float("inf")
if node.right!=None:
node_right_min = self.max_right(node.right)-node.val
left_min = self.min_difference(node.left)
right_min = self.min_difference(node.right)
return min(node_left_min,node_right_min,left_min,right_min)
def max_left(self,node):
if node.right == None:
return node.val
else:
return self.max_left(node.right)
def max_right(self,node):
if node.left == None:
return node.val
else:
return self.max_right(node.left)
sol = Solution()
# input
[4,2,6,1,3]
[1,0,48,null,null,12,49]
[1,0]
[2,0,5]
[2,0,6]
[5,0,13]
# output
output = sol.minDiffInBST(root)
# answer
answer = ""
print(output, answer, answer == output)
|
[
"[email protected]"
] | |
2c4815d72b5155adfdf7058fe4a14ff7f245285f
|
6497bc5638453877744c900f7accef0203f36e89
|
/leedcode1_twosum.py
|
e4bfcfdfe9201a15782286e8a9d575f229c34ec0
|
[] |
no_license
|
budaLi/leetcode-python-
|
82e9affb3317f63a82d89d7e82650de3c804a5ac
|
4221172b46d286ab6bf4c74f4d015ee9ef3bda8d
|
refs/heads/master
| 2022-01-30T00:55:26.209864 | 2022-01-05T01:01:47 | 2022-01-05T01:01:47 | 148,323,318 | 46 | 23 | null | null | null | null |
UTF-8
|
Python
| false | false | 868 |
py
|
#-*-coding:utf8-*-
#author : Lenovo
#date: 2018/7/23
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d = {}# d is a dictionary to map the value of nums and the index in nums
size = 0
for size in range(len(nums)):
if not nums[size] in d:
d[nums[size]] = size #if nums[size] doesn't exist in d ,create it
if target - nums[size] in d: #if nums[size] and target - nums[size] are both in d
# if d[target-nums[size]] < size + 1: # one situation should be minded nums[size] == target - nums[size]
ans = [d[target - nums[size]] , size ]# for example [0,1,2] 0 and [0,1,2,0],0
return ans
ex=Solution()
e=ex.twoSum([1,2,5,7,8],16)
print(e)
|
[
"[email protected]"
] | |
d76e46afa9347a3212afc1f391dab391766e7696
|
a36501f44a09ca03dd1167e1d7965f782e159097
|
/app/extensions/mongobeat/models.py
|
27451e0eefe1a01350156a088481e408b9a33cd9
|
[
"Apache-2.0"
] |
permissive
|
ssfdust/full-stack-flask-smorest
|
9429a2cdcaa3ff3538875cc74cff802765678d4b
|
4f866b2264e224389c99bbbdb4521f4b0799b2a3
|
refs/heads/master
| 2023-08-05T08:48:03.474042 | 2023-05-07T01:08:20 | 2023-05-07T01:08:20 | 205,528,296 | 39 | 10 |
Apache-2.0
| 2023-08-31T00:18:42 | 2019-08-31T10:12:25 |
Python
|
UTF-8
|
Python
| false | false | 7,149 |
py
|
# Copyright 2019 RedLotus <[email protected]>
# Author: RedLotus <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 Regents of the University of Michigan
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
"""
app.extensions.mongobeat
~~~~~~~~~~~~~~~~~~~~~~~~~
MongoBeat的ORM模块
"""
import datetime
from ast import literal_eval
import celery.schedules
from celery import current_app
from mongoengine import (
BooleanField,
DateTimeField,
DictField,
DynamicDocument,
DynamicField,
EmbeddedDocument,
EmbeddedDocumentField,
IntField,
ListField,
StringField,
)
def get_periodic_task_collection():
"""获取表名"""
if (
hasattr(current_app.conf, "CELERY_MONGODB_SCHEDULER_COLLECTION")
and current_app.conf.CELERY_MONGODB_SCHEDULER_COLLECTION
):
return current_app.conf.CELERY_MONGODB_SCHEDULER_COLLECTION # pragma: no cover
return "schedules"
#: Authorized values for PeriodicTask.Interval.period
PERIODS = ("days", "hours", "minutes", "seconds", "microseconds")
class PeriodicTask(DynamicDocument):
"""
周期任务的ORM
:attr name: 定时名称
:attr task: 任务名称
:attr interval: 定时
:attr crontab: crontab
:attr args: 参数
:attr kwargs: 键值参数
:attr queue: 队列
:attr no_changes: nochanges
:attr exchange: AMPQ的交换器
:attr routing_key: AMPQ路由
:attr soft_time_limit: 软时间限制
:attr expires: 过期时间
:attr start_after: 在某时间后运行
:attr enabled: 启用
:attr last_run_at: 最后运行时间
:attr total_run_count: 总计运行次数
:attr max_run_count: 最大运行次数
:attr date_changed: 改变日期
:attr description: 描述
:attr run_immediately: 立刻运行
"""
meta = {"collection": get_periodic_task_collection(), "allow_inheritance": True}
class Interval(EmbeddedDocument):
"""
:attr every 每(周期)
:attr period 周期区间
"""
meta = {"allow_inheritance": True}
every = IntField(min_value=0, default=0, required=True, verbose_name="周期")
period = StringField(choices=PERIODS, verbose_name="每")
@property
def schedule(self):
return celery.schedules.schedule(
datetime.timedelta(**{self.period: self.every})
)
@property
def period_singular(self):
return self.period[:-1]
def __str__(self):
if self.every == 1:
return "every {0.period_singular}".format(self)
return "every {0.every} {0.period}".format(self)
class Crontab(EmbeddedDocument):
"""
:attr minute 分钟
:attr hour 小时
:attr day_of_week 周
:attr day_of_month 日
:attr mouth_of_year 月
"""
meta = {"allow_inheritance": True}
minute = StringField(default="*", required=True, verbose_name="分钟")
hour = StringField(default="*", required=True, verbose_name="小时")
day_of_week = StringField(default="*", required=True, verbose_name="周")
day_of_month = StringField(default="*", required=True, verbose_name="日")
month_of_year = StringField(default="*", required=True, verbose_name="月")
@property
def schedule(self):
return celery.schedules.crontab(
minute=self.minute,
hour=self.hour,
day_of_week=self.day_of_week,
day_of_month=self.day_of_month,
month_of_year=self.month_of_year,
)
def __str__(self):
def rfield(f):
return f and str(f).replace(" ", "") or "*"
return "{0} {1} {2} {3} {4} (分/时/周/日/月)".format(
rfield(self.minute),
rfield(self.hour),
rfield(self.day_of_week),
rfield(self.day_of_month),
rfield(self.month_of_year),
)
name = StringField(unique=True, verbose_name="定时名称")
task = StringField(required=True, verbose_name="任务名称")
args = ListField(DynamicField(), verbose_name="参数")
kwargs = DictField(verbose_name="键值参数")
queue = StringField(verbose_name="队列")
exchange = StringField(verbose_name="AMPQ的交换器")
routing_key = StringField(verbose_name="AMPQ路由")
soft_time_limit = IntField(verbose_name="软时间限制")
expires = DateTimeField(verbose_name="过期时间")
start_after = DateTimeField(verbose_name="在某时间后运行")
enabled = BooleanField(default=False, verbose_name="启用")
last_run_at = DateTimeField(verbose_name="最后运行时间")
total_run_count = IntField(min_value=0, default=0, verbose_name="总计运行次数")
max_run_count = IntField(min_value=0, default=0, verbose_name="最大运行次数")
date_changed = DateTimeField(verbose_name="改变日期")
description = StringField(verbose_name="描述")
run_immediately = BooleanField(verbose_name="立刻运行")
type = StringField(
required=True, verbose_name="类型", choices=["crontab", "interval"]
)
interval = EmbeddedDocumentField(Interval, verbose_name="定时")
crontab = EmbeddedDocumentField(Crontab, verbose_name="周期")
# objects = managers.PeriodicTaskManager()
no_changes = False
def clean(self):
"""透过MongoEngine验证interval和crontab不是同时存在"""
if self.type == "crontab":
self.interval = None
else:
self.crontab = None
if isinstance(self.args, str):
self.args = literal_eval(self.args)
if isinstance(self.kwargs, str):
self.kwargs = literal_eval(self.kwargs)
@property
def schedule(self):
if self.interval:
return self.interval.schedule
elif self.crontab:
return self.crontab.schedule
else:
raise Exception("must define interval or crontab schedule")
def __str__(self):
fmt = "{0.name}: {{no schedule}}"
if self.interval:
fmt = "{0.name}: {0.interval}"
elif self.crontab:
fmt = "{0.name}: {0.crontab}"
else:
raise Exception("must define interval or crontab schedule")
return fmt.format(self)
|
[
"[email protected]"
] | |
f3287e42a48321132242a2d84b76e9deee52f5db
|
7834e7a48399b156401ea62c0c6d2de80ad421f5
|
/pysparkling/fileio/codec/codec.py
|
c057cfaa4b9cab5df56f5d5f9ac4badb66914438
|
[
"MIT"
] |
permissive
|
vojnovski/pysparkling
|
b9758942aba0d068f6c51797c8fb491cf59c3401
|
21b36464371f121dc7963dac09d300e7235f587e
|
refs/heads/master
| 2020-04-08T18:33:55.707209 | 2016-07-27T15:12:59 | 2016-07-27T15:12:59 | 62,555,929 | 0 | 0 | null | 2016-07-04T11:06:18 | 2016-07-04T11:06:18 | null |
UTF-8
|
Python
| false | false | 222 |
py
|
import logging
log = logging.getLogger(__name__)
class Codec(object):
def __init__(self):
pass
def compress(self, stream):
return stream
def decompress(self, stream):
return stream
|
[
"[email protected]"
] | |
354cd069b9195ce2cabedf5b537fbef6f1713e6b
|
8c7b03f24517e86f6159e4d74c8528bfbcbf31af
|
/test/python_api/lldbutil/frame/TestFrameUtils.py
|
04d398bc5fa1b95d457aa1aaae5bd15ded01ab94
|
[
"NCSA"
] |
permissive
|
markpeek/lldb
|
f849567fbd7791be10aacd41be44ee15f1a4fdc4
|
58c8d5af715a3da6cbb7e0efc6905e9d07410038
|
refs/heads/master
| 2021-01-15T17:01:57.014568 | 2011-12-24T01:08:58 | 2011-12-24T01:08:58 | 3,042,888 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,103 |
py
|
"""
Test utility functions for the frame object.
"""
import os
import unittest2
import lldb
from lldbtest import *
class FrameUtilsTestCase(TestBase):
mydir = os.path.join("python_api", "lldbutil", "frame")
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.c',
"// Find the line number here.")
@python_api_test
def test_frame_utils(self):
"""Test utility functions for the frame object."""
self.buildDefault()
self.frame_utils()
def frame_utils(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.c", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(None, None, os.getcwd())
if not process:
self.fail("SBTarget.LaunchProcess() failed")
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
import lldbutil
thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)
frame0 = thread.GetFrameAtIndex(0)
frame1 = thread.GetFrameAtIndex(1)
parent = lldbutil.get_parent_frame(frame0)
self.assertTrue(parent and parent.GetFrameID() == frame1.GetFrameID())
frame0_args = lldbutil.get_args_as_string(frame0)
parent_args = lldbutil.get_args_as_string(parent)
self.assertTrue(frame0_args and parent_args and "(int)val=1" in frame0_args)
if self.TraceOn():
lldbutil.print_stacktrace(thread)
print "Current frame: %s" % frame0_args
print "Parent frame: %s" % parent_args
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
[
"[email protected]"
] | |
44ca2e8649630c0f338c6636d11ae3d772d89710
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03418/s842655187.py
|
e812523bc9e5891268bd0c4350311e175da8ddc3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 161 |
py
|
N,K=map(int,input().split())
a=0
for i in range(K+1,N+1):
t=N//i
n=N-t*i
a+=t*(i-K)
if K:
a+=max(0,n-K+1)
else:
a+=n
print(a)
|
[
"[email protected]"
] | |
1c3d4834bbc46156f17c4081635c11a08d327dd1
|
710e96b1435bc43cc260512df75af5dd3b2afd13
|
/code/1044.py
|
6b217c61734025d6ab42ff1303588769ee0ced7d
|
[
"MIT"
] |
permissive
|
minssoj/Learning_Algorithm_Up
|
94ca8166c9a5d87917cf033ad8415871684241c4
|
45ec4e2eb4c07c9ec907a74dbd31370e1645c50b
|
refs/heads/main
| 2023-01-08T20:52:32.983756 | 2020-11-05T17:49:45 | 2020-11-05T17:49:45 | 301,926,859 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
# [기초-산술연산] 정수 1개 입력받아 1 더해 출력하기(설명)
# [email protected]
'''
문제링크 : https://www.codeup.kr/problem.php?id=1044
'''
n = int(input())
print(n + 1)
|
[
"[email protected]"
] | |
0afbaee6a3d11e935314a77c986fca4852eeb54e
|
d326cd8d4ca98e89b32e6a6bf6ecb26310cebdc1
|
/rosalind/bioinformatics/stronghold/tran/main.py
|
131ebf08d6ecc2fe6278e7b4127c11468845a825
|
[] |
no_license
|
dswisher/rosalind
|
d6af5195cdbe03adb5a19ed60fcbf8c05beac784
|
4519740350e47202f7a45ce70e434f7ee15c6afc
|
refs/heads/master
| 2021-08-09T02:58:17.131164 | 2017-11-12T01:26:26 | 2017-11-12T01:26:26 | 100,122,283 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 923 |
py
|
import sys
from rosalind.common import util
from rosalind.bioinformatics.common import fasta
def compute_ratio(seq1, seq2):
transitions = set(['AG', 'GA', 'CT', 'TC'])
transversions = set(['AC', 'CA', 'GT', 'TG', 'AT', 'TA', 'CG', 'GC'])
numTransitions = 0
numTransversions = 0
for i in xrange(len(seq1)):
x = seq1[i] + seq2[i]
if x in transitions:
numTransitions += 1
elif x in transversions:
numTransversions += 1
return float(numTransitions) / numTransversions
def main(fname):
seqs, _ = fasta.read(util.find_file(fname))
if len(seqs[0]) != len(seqs[1]):
print "Sequences have different lengths!"
sys.exit(1)
print compute_ratio(seqs[0], seqs[1])
if __name__ == '__main__':
if len(sys.argv) != 2:
print ("You must specify the name of the data file to load!")
sys.exit(1)
main(sys.argv[1])
|
[
"[email protected]"
] | |
2150af8db3f4f64b86685075d6ed96e3845861c3
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_chauffeur.py
|
97f43e805d5aba06eb05a5fd2bd9c150fd38b7be
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 354 |
py
|
#calss header
class _CHAUFFEUR():
def __init__(self,):
self.name = "CHAUFFEUR"
self.definitions = [u'someone whose job is to drive a car for a rich or important person: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
7b1dfacee784f7b05375188302ab051e0b005603
|
ad28a59209239be285d1127a87bc08893fb62cb9
|
/python/aad/test_concept_drift_classifier.py
|
76ebbe63d04d5f1a6934a18ec97cdc667445b71c
|
[
"MIT"
] |
permissive
|
Karl-Wu/ad_examples
|
9e6f894c2414640b23ccdeb39db9e9b8352ef077
|
6fb0a2a72db51d82645e377945327eb9e1ecf8b8
|
refs/heads/master
| 2020-03-26T19:33:45.128414 | 2018-08-17T21:42:15 | 2018-08-17T21:42:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,961 |
py
|
from aad.data_stream import *
from common.gen_samples import read_anomaly_dataset
from aad.anomaly_dataset_support import *
from aad.classifier_trees import RandomForestAadWrapper
"""
Check data drift with a Random Forest classifier.
NOTE: The classifier is trained only once in this example with the
first window of data. The drift is tested for the rest of the
windows *without* updating the model.
To run:
pythonw -m aad.test_concept_drift_classifier --debug --plot --log_file=temp/test_concept_drift_classifier.log --dataset=weather
"""
def test_kl_data_drift_classifier():
logger = logging.getLogger(__name__)
args = get_command_args(debug=False)
configure_logger(args)
dataset_config = dataset_configs[args.dataset]
stream_window = dataset_config[2]
alpha = 0.05
n_trees = 100
X_full, y_full = read_anomaly_dataset(args.dataset)
logger.debug("dataset: %s (%d, %d), stream_window: %d, alpha: %0.3f" %
(args.dataset, X_full.shape[0], X_full.shape[1], stream_window, alpha))
stream = DataStream(X_full, y_full, IdServer(initial=0))
# get first window of data
training_set = stream.read_next_from_stream(stream_window)
x, y, ids = training_set.x, training_set.y, training_set.ids
logger.debug("First window loaded (%s): %d" % (args.dataset, x.shape[0]))
# train classifier with the window of data
rf = RFClassifier.fit(x, y, n_estimators=n_trees)
logger.debug("Random Forest classifier created with %d trees" % rf.clf.n_estimators)
# prepare wrapper over the classifier which will compute KL-divergences
# NOTE: rf.clf is the scikit-learn Random Forest classifier instance
model = RandomForestAadWrapper(x=x, y=y, clf=rf.clf)
logger.debug("Wrapper model created with %d nodes" % len(model.w))
# compute KL replacement threshold *without* p
ref_kls, kl_q_alpha = model.get_KL_divergence_distribution(x, p=None, alpha=alpha)
# now initialize reference p
p = model.get_node_sample_distributions(x)
window = 0
while not stream.empty():
window += 1
# get next window of data and check KL-divergence
training_set = stream.read_next_from_stream(n=stream_window)
x, y = training_set.x, training_set.y
logger.debug("window %d loaded: %d" % (window, x.shape[0]))
# compare KL-divergence of current data dist against reference dist p
comp_kls, _ = model.get_KL_divergence_distribution(x, p=p)
# find which trees exceed alpha-level threshold
trees_exceeding_kl_q_alpha = model.get_trees_to_replace(comp_kls, kl_q_alpha)
n_threshold = int(2 * alpha * n_trees)
logger.debug("[%d] #trees_exceeding_kl_q_alpha: %d, threshold number of trees: %d\n%s" %
(window, len(trees_exceeding_kl_q_alpha), n_threshold, str(list(trees_exceeding_kl_q_alpha))))
if __name__ == "__main__":
test_kl_data_drift_classifier()
|
[
"[email protected]"
] | |
ff22a34e9a956fa4c76ccb221f9d964d39375203
|
6cd3de9d6aa0c52602010aa857966d5dc4d57442
|
/mlprodict/onnxrt/ops_cpu/op_lp_normalization.py
|
b8f5b8b7274ac89c8fb4bd972bdfda60a72b2e4c
|
[
"MIT"
] |
permissive
|
xadupre/mlprodict
|
2307ca96eafeeafff08d5322184399bb5dc1c37e
|
f82c8a26a60104948c67849b1c4af95ca812c153
|
refs/heads/master
| 2022-12-10T18:50:36.953032 | 2020-09-03T08:53:58 | 2020-09-03T08:53:58 | 292,824,744 | 1 | 0 |
NOASSERTION
| 2020-09-04T10:56:45 | 2020-09-04T10:56:44 | null |
UTF-8
|
Python
| false | false | 837 |
py
|
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRunUnaryNum
class LpNormalization(OpRunUnaryNum):
atts = {'axis': -1, 'p': 2}
def __init__(self, onnx_node, desc=None, **options):
OpRunUnaryNum.__init__(self, onnx_node, desc=desc,
expected_attributes=LpNormalization.atts,
**options)
def _run(self, x): # pylint: disable=W0221
norm = numpy.power(numpy.power(x, self.p).sum(
axis=self.axis), 1. / self.p)
norm = numpy.expand_dims(norm, self.axis)
if self.inplaces.get(0, False):
return self._run_inplace(x, norm)
return (x / norm, )
def _run_inplace(self, x, norm):
x /= norm
return (x, )
|
[
"[email protected]"
] | |
64ced12d14e6ef07689ff4230e0e91e5529ae4b7
|
44849991f507933ebc7ed4e8e37819a529be539e
|
/steps/step09.py
|
f4696c0a213f1cce610937e96f56827da22d84d5
|
[
"MIT"
] |
permissive
|
NukeA/deep-learning-from-scratch-3
|
4ff60e8ac5b157a05079fc3b8a2ea69acec9ece5
|
e48a7b8788827a16cc9f81adc135a3a14989bea5
|
refs/heads/master
| 2022-11-02T00:58:56.621011 | 2020-06-16T04:36:23 | 2020-06-16T04:36:23 | 273,873,741 | 1 | 0 |
MIT
| 2020-06-21T09:34:25 | 2020-06-21T09:34:24 | null |
UTF-8
|
Python
| false | false | 1,780 |
py
|
import numpy as np
class Variable:
def __init__(self, data):
if data is not None:
if not isinstance(data, np.ndarray):
raise TypeError('{} is not supported'.format(type(data)))
self.data = data
self.grad = None
self.creator = None
def set_creator(self, func):
self.creator = func
def backward(self):
if self.grad is None:
self.grad = np.ones_like(self.data)
funcs = [self.creator]
while funcs:
f = funcs.pop()
x, y = f.input, f.output
x.grad = f.backward(y.grad)
if x.creator is not None:
funcs.append(x.creator)
def as_array(x):
if np.isscalar(x):
return np.array(x)
return x
class Function:
def __call__(self, input):
x = input.data
y = self.forward(x)
output = Variable(as_array(y))
output.set_creator(self)
self.input = input
self.output = output
return output
def forward(self, x):
raise NotImplementedError()
def backward(self, gy):
raise NotImplementedError()
class Square(Function):
def forward(self, x):
y = x ** 2
return y
def backward(self, gy):
x = self.input.data
gx = 2 * x * gy
return gx
class Exp(Function):
def forward(self, x):
y = np.exp(x)
return y
def backward(self, gy):
x = self.input.data
gx = np.exp(x) * gy
return gx
def square(x):
return Square()(x)
def exp(x):
return Exp()(x)
x = Variable(np.array(0.5))
y = square(exp(square(x)))
y.backward()
print(x.grad)
x = Variable(np.array(1.0)) # OK
x = Variable(None) # OK
x = Variable(1.0) # NG
|
[
"[email protected]"
] | |
5dd63b2b9df8c5af5403c212e5f8fa25f11a8055
|
49536aafb22a77a6caf249c7fadef46d63d24dfe
|
/tensorflow/tensorflow/contrib/learn/python/learn/learn_io/generator_io.py
|
784781d7368490a10d5dbc9cd5842f4bed98eda3
|
[
"Apache-2.0"
] |
permissive
|
wangzhi01/deeplearning-1
|
4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d
|
46ab82253d956953b8aa98e97ceb6cd290e82288
|
refs/heads/master
| 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,499 |
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow generator of dict with numpy arrays."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Container
from types import FunctionType
from types import GeneratorType
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
def generator_input_fn(x,
target_key=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
pad_value=None):
"""Returns input function that returns dicts of numpy arrays
yielded from a generator.
It is assumed that every dict of numpy arrays yielded from the dictionary
represents a single sample. The generator should consume a single epoch of the
data.
This returns a function outputting `features` and `target` based on the dict
of numpy arrays. The dict `features` has the same keys as an element yielded
from x.
Example:
```python
def generator():
for index in range(10):
yield {'height': np.random.randint(32,36),
'age': np.random.randint(18, 80),
'label': np.ones(1)}
with tf.Session() as session:
input_fn = generator_io.generator_input_fn(
generator, target_key="label", batch_size=2, shuffle=False,
num_epochs=1)
```
Args:
x: Generator Function, returns a `Generator` that will yield the data
in `dict` of numpy arrays
target_key: String or Container of Strings, the key or Container of keys of
the numpy arrays in x dictionaries to use as target.
batch_size: Integer, size of batches to return.
num_epochs: Integer, number of epochs to iterate over data. If `None` will
run forever.
shuffle: Boolean, if True shuffles the queue. Avoid shuffle at prediction
time.
queue_capacity: Integer, size of queue to accumulate.
num_threads: Integer, number of threads used for reading and enqueueing.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
Function, that returns a feature `dict` with `Tensors` and an optional
label `dict` with `Tensors`, or if target_key is `str` label is a `Tensor`
Raises:
TypeError: `x` is not `FunctionType`.
TypeError: `x()` is not `GeneratorType`.
TypeError: `next(x())` is not `dict`.
TypeError: `target_key` is not `str` or `target_key` is not `Container`
of `str`.
KeyError: `target_key` not a key or `target_key[index]` not in next(`x()`).
KeyError: `key` mismatch between dicts emitted from `x()`
"""
if not isinstance(x, FunctionType):
raise TypeError(
'x must be generator function; got {}'.format(type(x).__name__))
generator = x()
if not isinstance(generator, GeneratorType):
raise TypeError(
'x() must be generator; got {}'.format(type(generator).__name__))
data = next(generator)
if not isinstance(data, dict):
raise TypeError('x() must yield dict; got {}'.format(type(data).__name__))
input_keys = sorted(next(x()).keys())
if target_key is not None:
if isinstance(target_key, str):
target_key = [target_key]
elif isinstance(target_key, Container):
for item in target_key:
if not isinstance(item, str):
raise TypeError('target_key must be str or Container of str; got {}'.
format(type(item).__name__))
if item not in input_keys:
raise KeyError(
'target_key not in yielded dict. Expected {} keys; got {}'.format(
input_keys, item))
else:
raise TypeError('target_key must be str or Container of str; got {}'.
format(type(target_key).__name__))
def _generator_input_fn():
"""generator input function."""
queue = enqueue_data(
x,
queue_capacity,
shuffle=shuffle,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs,
pad_value=pad_value)
features = (queue.dequeue_many(batch_size)
if num_epochs is None else queue.dequeue_up_to(batch_size))
if not isinstance(features, list):
features = [features]
features = dict(zip(input_keys, features))
if target_key is not None:
if len(target_key) > 1:
target = {key: features.pop(key) for key in target_key}
else:
target = features.pop(target_key[0])
return features, target
return features
return _generator_input_fn
|
[
"[email protected]"
] | |
bbbb760b22d3a07d2b3d10445c267f72ed9fcfbd
|
e0b6f5bd451aa8af3273fbc948799637681342e1
|
/scripts/wm_representation/functions/IEM_conditions/IEM_condition.py
|
244e5b35232d3da6732fe524c6e5c3d6790c863a
|
[] |
no_license
|
davidbestue/encoding
|
6b304f6e7429f94f97bd562c7544d1fdccf7bdc1
|
c27319aa3bb652b3bfc6b7340044c0fda057bc62
|
refs/heads/master
| 2022-05-05T23:41:42.419252 | 2022-04-27T08:34:52 | 2022-04-27T08:34:52 | 144,248,690 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,024 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 18:24:32 2019
@author: David Bestue
"""
## Import functions prom the previous path
import sys
import os
previous_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
sys.path.insert(1, previous_path)
from model_functions import *
from fake_data_generator import *
from Weights_matrixs import *
from Representation import *
from process_encoding import *
from process_wm import *
from data_to_use import *
from bootstrap_functions import *
from joblib import Parallel, delayed
import multiprocessing
import time
import random
from sklearn.model_selection import KFold
import multiprocessing
multiprocessing.cpu_count()
### use the cores so we do not run out of memory
numcores = multiprocessing.cpu_count()
if numcores>20:
numcores=numcores-10
if numcores<10:
numcores=numcores-3
##paths to save the files
path_save_signal ='/home/david/Desktop/Reconstructions/IEM/IEM_target_far_delay.xlsx' #cross_b001_target_mix_octave_1_7_far.xlsx'
path_save_shuffle = '/home/david/Desktop/Reconstructions/IEM/shuff_IEM_target_far_delay.xlsx'
## options (chek the filename too!)
decoding_thing = 'Target' #'Distractor' #'Target'
Distance_to_use = 'far' #'close' 'far'
training_time= 'delay' #'stim_p' 'delay' 'respo'
## depending on the options, I will use one condition or the other
if decoding_thing=='Distractor':
cond_t = '2_7'
elif decoding_thing=='Target': ##at some point we can go for the response, though it should be similar
cond_t = '1_7'
# depending on the options, the TRs used for the training will be different
if training_time=='stim_p':
tr_st=3
tr_end=4
elif training_time=='delay':
tr_st=4
tr_end=6
elif training_time=='respo':
if decoding_thing=='Target':
tr_st=8
tr_end=9
elif decoding_thing=='Distractor':
tr_st=11
tr_end=12
## dictionary and list to save the files
Reconstructions={}
Reconstructions_shuff=[]
## elements for the loop
Conditions=['1_0.2', '1_7', '2_0.2', '2_7'] # '1_0.2', '1_7', '2_0.2', '2_7'
Subjects=['d001', 'n001', 'b001', 'r001', 's001', 'l001'] #'d001', 'n001', 'b001', 'r001', 's001', 'l001'
brain_regions = ['visual', 'ips', 'pfc'] # 'visual', 'ips', 'pfc'
ref_angle=180
num_shuffles = 10 #00
for Subject in Subjects:
for Brain_region in brain_regions:
#plt.figure()
### Data to use
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
##### Process training data
training_activity, training_behaviour = preprocess_wm_files(wm_fmri_paths, masks, wm_beh_paths, condition=cond_t,
distance=Distance_to_use, sys_use='unix', nscans_wm=nscans_wm, TR=2.335)
#
#training activity
if training_time=='stim_p':
delay_TR_cond = training_activity[:, tr_st, :]
if training_time=='delay':
delay_TR_cond = np.mean(training_activity[:, tr_st:tr_end, :], axis=1) ## training_activity[:, 8, :]
if training_time=='respo':
delay_TR_cond = training_activity[:, tr_st, :]
#
if decoding_thing=='Distractor':
training_thing = training_behaviour['Dist']
elif decoding_thing=='Target':
training_thing = training_behaviour['T']
##### Train your weigths
WM, Inter = Weights_matrix_LM( delay_TR_cond, training_thing )
WM_t = WM.transpose()
for idx_c, Condition in enumerate(Conditions):
if Condition == cond_t:
training_activity, training_behaviour = delay_TR_cond, training_thing
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
testing_activity, testing_behaviour = preprocess_wm_files(wm_fmri_paths, masks, wm_beh_paths,
condition=Condition, distance=Distance_to_use, sys_use='unix', nscans_wm=nscans_wm, TR=2.335)
#
Reconstruction = IEM_cross_condition_kfold(testing_activity= testing_activity, testing_behaviour=testing_behaviour,
decode_item= decoding_thing, WM=WM, WM_t=WM_t, Inter=Inter, tr_st=tr_st, tr_end=tr_end, n_slpits=10)
Reconstructions[Subject + '_' + Brain_region + '_' + Condition]=Reconstruction
shuff = IEM_cross_condition_kfold_shuff(testing_activity=testing_activity, testing_behaviour=testing_behaviour,
decode_item=decoding_thing, WM=WM, WM_t=WM_t, Inter=Inter, condition=Condition, subject=Subject, region=Brain_region,
iterations=num_shuffles, tr_st=tr_st, tr_end=tr_end, ref_angle=180, n_slpits=10)
Reconstructions_shuff.append(shuff)
###Reconstructions_shuff.append(shuff)
else:
Reconstruction, shuff = all_process_condition_shuff( Subject=Subject, Brain_Region=Brain_region, WM=WM, WM_t=WM_t,
distance=Distance_to_use, decode_item= decoding_thing, iterations=num_shuffles, Inter=Inter, Condition=Condition,
method='together', heatmap=False) #100
Reconstructions[Subject + '_' + Brain_region + '_' + Condition]=Reconstruction
Reconstructions_shuff.append(shuff)
### Save signal
### Get signal from the reconstructions (get the signal before; not done in the function in case you want to save the whole)
### If you want to save the whole recosntruction, uncomment the following lines
### Save Recosntructions
# path_save_reconstructions = #
# writer = pd.ExcelWriter(path_save_reconstructions)
# for i in range(len(Reconstructions.keys())):
# Reconstructions[Reconstructions.keys()[i]].to_excel(writer, sheet_name=Reconstructions.keys()[i]) #each dataframe in a excel sheet
# writer.save() #save reconstructions (heatmaps)
#Save just the signal (around the decoding thing)
Decoding_df =[]
for dataframes in Reconstructions.keys():
df = Reconstructions[dataframes]
a = pd.DataFrame(df.iloc[ref_angle*2,:]) ##*2 because there are 720
a = a.reset_index()
a.columns = ['times', 'decoding'] # column names
a['decoding'] = [sum(df.iloc[:,i] * f2(ref_angle)) for i in range(len(a))] #"population vector method" scalar product
a['times']=a['times'].astype(float)
a['region'] = dataframes.split('_')[1]
a['subject'] = dataframes.split('_')[0]
a['condition'] = dataframes.split('_')[-2] + '_' + dataframes.split('_')[-1]
Decoding_df.append(a)
Df = pd.concat(Decoding_df)
Df['label'] = 'signal' #ad the label of signal (you will concatenate this df with the one of the shuffleing)
Df.to_excel( path_save_signal ) #save signal
### Save Shuffle (in shuffles you do not need to get the *2 thing becuase it is done inside the function)
Df_shuffs = pd.concat(Reconstructions_shuff)
Df_shuffs['label'] = 'shuffle' ## add the label of shuffle
Df_shuffs.to_excel(path_save_shuffle) #save shuffle
|
[
"[email protected]"
] | |
ba41f7aef79a7bcb7d8f12a8308d4d45eacd1ce8
|
6f4e925bf4538d104f1e3e9754d4297c5504ab80
|
/python/recall/app/domain/__init__.py
|
55879d938b278a1a92324ce2cb752388ae90ff9d
|
[
"MIT"
] |
permissive
|
kingreatwill/openself
|
7f02282da3e0b1f328c3627d83ba2b5ed4563dc8
|
8517d24e665b39371835ecd2ed0cd3509a5d9d62
|
refs/heads/master
| 2023-01-23T13:15:49.491402 | 2020-11-19T02:39:52 | 2020-11-19T02:39:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 596 |
py
|
import models
class Domain:
def __init__(self, model: models.db.Document):
self.model = model
# 列表;
def list(self, size=10, index=1, **kwargs):
size = int(size)
index = int(index)
return self.model.objects(**kwargs).skip((index - 1) * size).limit(size)
# 明细;
def get(self, id):
return self.model.objects(**{self.model.key(): id}).first()
def update(self, id, **kwargs):
model = self.model.objects(**{self.model.key(): id}).first()
if model:
return model.update(**kwargs)
return True
|
[
"[email protected]"
] | |
f8d8ee4061dbff936f37094f60a8e6e5b2dbd040
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy241.py
|
d59a29805cadc73e57d6ef26c5940d08ffc753ef
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,185 |
py
|
# qubit number=2
# total number=11
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.swap(input_qubit[1],input_qubit[0]) # number=2
prog.swap(input_qubit[1],input_qubit[0]) # number=3
prog.cx(input_qubit[0],input_qubit[1]) # number=8
prog.x(input_qubit[1]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=10
prog.cx(input_qubit[0],input_qubit[1]) # number=7
prog.rx(-2.73004401596953,input_qubit[1]) # number=6
prog.z(input_qubit[1]) # number=4
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = FakeVigo()
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_noisy241.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
5a0c7f2232c9b5b9b6aebd0299f3b756198fbcab
|
a1488a281e582373b7270d85059f08330c0b685d
|
/dueros/directive/Display/tag/NewTag.py
|
86f19cc6e51950cde6ea45a11b1821aef4bfab0f
|
[
"Apache-2.0"
] |
permissive
|
xuchengzhi/bot-sdk-python
|
473fb8e7df629a6168983e26de74546bbca32768
|
966d103d55f9f1220c00d806ac13d0754015a31c
|
refs/heads/master
| 2020-12-06T17:00:55.587643 | 2019-10-18T02:54:45 | 2019-10-18T02:54:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
#!/usr/bin/env python3
# -*- encoding=utf-8 -*-
# description:
# author:jack
# create_time: 2018/9/17
"""
desc:pass
"""
from dueros.directive.Display.tag.TagTypeEnum import TagTypeEnum
from dueros.directive.Display.tag.BaseTag import BaseTag
class NewTag(BaseTag):
def __init__(self):
super(NewTag, self).__init__(TagTypeEnum.TAG_TYPE_NEW, '最新')
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
71de969b7a916bf5eab2c78a574d3186cf9d290b
|
365186abceefc51e811706ad325a2f53a63a25f8
|
/tests/scripts/comparisonPlots.py
|
e0fbf4cda1982e9d1ea7945d69f32ca47e3b51b7
|
[
"MIT"
] |
permissive
|
tboudreaux/pytopsscrape
|
a857bdca8558bf86f7afd5f8f3e6d2e5ca90fa64
|
c9f95e6a6419debb0b6a22f22d1574a8bbf73bd0
|
refs/heads/master
| 2023-04-06T17:48:08.812425 | 2023-04-04T00:01:13 | 2023-04-04T00:01:13 | 532,559,997 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,499 |
py
|
import matplotlib.pyplot as plt
from pyTOPSScrape.parse import load_opal
import os
import datetime
def make_comparision_plot():
TargetPath = "./GS98Target.opac"
TestPath = "./GS98TestResult.opac"
OPALPath = "./GS98OPAL.opac"
targetTime = datetime.datetime.fromtimestamp(os.path.getmtime(TargetPath))
testTime = datetime.datetime.fromtimestamp(os.path.getmtime(TestPath))
OPALTime = datetime.datetime.fromtimestamp(os.path.getmtime(OPALPath))
print(f"Target File Last Modified at: {targetTime}")
print(f"Test File Last Modified at: {testTime}")
print(f"OPAL Comp File Last Modified at: {OPALTime}")
Target = load_opal(TargetPath)
Test = load_opal(TestPath)
OPAL = load_opal(OPALPath)
fig, ax = plt.subplots(1,1,figsize=(10,7))
ax.plot(Target[0], Target[2][75, :, 13], label="Current Test Target")
ax.plot(Test[0], Test[2][75, :, 13], label="Test Result")
ax.plot(OPAL[0], OPAL[2][75, :, 13], label="OPAL")
ax.legend()
ax.set_xlabel("Log T")
ax.set_ylabel("Opacity")
ax.set_title("Comparision made at log(R)=-1.5")
plt.savefig("comparison.pdf", bbox_inches='tight')
fig, ax = plt.subplots(1,1,figsize=(10,7))
ax.plot(Target[0], Target[2][75, :, 13] - Test[2][75, :, 13])
ax.set_xlabel("Log T")
ax.set_ylabel("Opacity")
ax.set_title("Target - Result Residuals made at log(R)=-1.5")
plt.savefig("TRResid.pdf", bbox_inches='tight')
if __name__ == "__main__":
make_comparision_plot()
|
[
"[email protected]"
] | |
6c7175ef0bf5c454553094b3f009ebac86114775
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/violin/_metasrc.py
|
466d497ebca4399340d12c5b16b62c2cd713264a
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 |
MIT
| 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null |
UTF-8
|
Python
| false | false | 392 |
py
|
import _plotly_utils.basevalidators
class MetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="metasrc", parent_name="violin", **kwargs):
super(MetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
[
"[email protected]"
] | |
c3e40fb6c21a8b78a1912a6dddd65973f62ce9b6
|
a82dfb61b17fa66b9c75fe871401cff77aa77f56
|
/libmcell/definition/doc.py
|
48cdb0ffba9e2e4089d530fbc690a17b0601d563
|
[
"MIT"
] |
permissive
|
mcellteam/mcell
|
49ca84048a091de8933adccc083d31b7bcb1529e
|
3920aec22c55013b78f7d6483b81f70a0d564d22
|
refs/heads/master
| 2022-12-23T15:01:51.931150 | 2021-09-29T16:49:14 | 2021-09-29T16:49:14 | 10,253,341 | 29 | 12 |
NOASSERTION
| 2021-07-08T01:56:40 | 2013-05-23T20:59:54 |
C++
|
UTF-8
|
Python
| false | false | 7,463 |
py
|
"""
Copyright (C) 2021 by
The Salk Institute for Biological Studies
Use of this source code is governed by an MIT-style
license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
"""
import sys
import os
import yaml
from constants import *
from gen import indent_and_fix_rst_chars, yaml_type_to_py_type, get_default_or_unset_value_py
def cat_to_title(cat):
if cat == CATEGORY_CONSTANTS:
return 'Enums and Constants'
else:
return cat.replace('_', ' ').capitalize()
def write_cat_label(f, cat):
f.write('.. _api-' + cat + ':\n\n')
def gen_example_links(base_links):
split_links = base_links.strip().split()
n = len(split_links)
if n == 0:
return ''
res = 'Example' + ('' if n == 1 else 's') + ': '
for l in split_links:
name = os.path.basename(os.path.dirname(l)) + '/' + os.path.basename(l)
res += '`' + name + ' <' + EXAMPLES_BASE_URL + l + '>`_ '
return res
def write_h4(f, text, name, class_name):
f.write('.. _' + class_name + '__' + name + ':\n\n')
f.write(text + '\n')
f.write('-' * len(text) + '\n\n')
def get_method_declaration(method):
res = method[KEY_NAME] + ' ('
if KEY_PARAMS in method:
num_params = len(method[KEY_PARAMS])
for i in range(num_params):
param = method[KEY_PARAMS][i]
t = yaml_type_to_py_type(param[KEY_TYPE])
res += param[KEY_NAME] + ': ' + t
if KEY_DEFAULT in param:
res += '=' + get_default_or_unset_value_py(param)
if i != num_params - 1:
res += ', '
res += ')'
if KEY_RETURN_TYPE in method:
res += ' -> ' + yaml_type_to_py_type(method[KEY_RETURN_TYPE])
return res
def generate_class_documentation(f, class_name, class_def):
f.write(class_name + '\n' + '='*len(class_name) + '\n\n')
if KEY_DOC in class_def:
f.write(class_def[KEY_DOC].strip() + '\n\n')
if KEY_EXAMPLES in class_def:
f.write(gen_example_links(class_def[KEY_EXAMPLES]) + '\n\n')
if KEY_ITEMS in class_def and class_def[KEY_ITEMS]:
f.write('Attributes:\n' + '*'*len('Attributes:') + '\n')
num_items = len(class_def[KEY_ITEMS])
for item in class_def[KEY_ITEMS]:
t = yaml_type_to_py_type(item[KEY_TYPE])
header = item[KEY_NAME] + ': ' + t
write_h4(f, header, item[KEY_NAME], class_name)
if KEY_DOC in item and item[KEY_DOC]:
f.write(' | ' + indent_and_fix_rst_chars(item[KEY_DOC].strip(), ' | ') + '\n')
if KEY_DEFAULT in item:
f.write(' | - default argument value in constructor: ' + get_default_or_unset_value_py(item))
f.write('\n')
if KEY_EXAMPLES in item:
f.write('\n | ' + gen_example_links(item[KEY_EXAMPLES]) + '\n\n')
f.write('\n')
if KEY_METHODS in class_def and class_def[KEY_METHODS]:
f.write('\nMethods:\n' + '*'*len('nMethods:') + '\n')
for method in class_def[KEY_METHODS]:
method_name = method[KEY_NAME]
header = get_method_declaration(method)
write_h4(f, header, method_name, class_name)
if KEY_DOC in method:
f.write('\n | ' + indent_and_fix_rst_chars(method[KEY_DOC].strip(), ' | ') + '\n\n')
if KEY_PARAMS in method:
num_params = len(method[KEY_PARAMS])
for param in method[KEY_PARAMS]:
t = yaml_type_to_py_type(param[KEY_TYPE])
f.write('* | ' + param[KEY_NAME] + ': ' + t)
if KEY_DEFAULT in param:
f.write(' = ' + get_default_or_unset_value_py(param))
if KEY_DOC in param:
f.write('\n | ' + indent_and_fix_rst_chars(param[KEY_DOC].strip(), ' | ') + '\n\n')
else:
f.write('\n')
if KEY_EXAMPLES in method:
f.write(' | ' + gen_example_links(method[KEY_EXAMPLES]) + '\n\n')
f.write('\n')
f.write('\n')
def generate_documentation(data_classes):
# generate constants
with open(os.path.join(DOC_DIRECTORY, CATEGORY_CONSTANTS + EXT_RST), 'w') as f:
write_cat_label(f, CATEGORY_CONSTANTS)
f.write(
'*******************\n' +
cat_to_title(CATEGORY_CONSTANTS) + '\n' +
'*******************\n\n'
)
# generate enums first, then constants
enums = data_classes[KEY_ENUMS]
for enum in enums:
enum_name = enum[KEY_NAME]
f.write(enum_name + '\n' + '='*len(enum_name) + '\n\n')
if KEY_DOC in enum:
f.write('\n | ' + indent_and_fix_rst_chars(enum[KEY_DOC].strip(), ' | ') + '\n\n')
for value in enum[KEY_VALUES]:
f.write('* | **' + value[KEY_NAME] + '** = ' + str(value[KEY_VALUE]) + '\n')
if KEY_DOC in value:
f.write(' | ' + indent_and_fix_rst_chars(value[KEY_DOC].strip(), ' | ') + '\n\n')
f.write('\n')
f.write('\n\n')
c = 'Constants'
f.write(c + '\n' + '='*len(c) + '\n\n')
constants = data_classes[KEY_CONSTANTS]
for const in constants:
const_name = const[KEY_NAME]
f.write('* | **' + const_name + '**: ' + yaml_type_to_py_type(const[KEY_TYPE]) + \
' = ' + str(const[KEY_VALUE]) +'\n')
if KEY_DOC in const:
f.write(' | ' + indent_and_fix_rst_chars(const[KEY_DOC].strip(), ' | ') + '\n\n')
f.write('\n\n')
# then generate classes into files by category
for cat in CATEGORIES:
if cat == CATEGORY_CONSTANTS:
continue
input_file = cat + EXT_RST
with open(os.path.join(DOC_DIRECTORY, input_file), 'w') as f:
write_cat_label(f, cat)
cat_name = cat_to_title(cat)
f.write('*'*len(cat_name) + '\n' + cat_name + '\n' + '*'*len(cat_name) + '\n')
for key, value in sorted(data_classes.items()):
if key != KEY_CONSTANTS and key != KEY_ENUMS and value[KEY_CATEGORY] == cat:
generate_class_documentation(f, key, value)
# and generate api.rst file
with open(os.path.join(DOC_DIRECTORY, API_RST), 'w') as f:
title = 'Python API Reference'
f.write(
title + '\n' +
'='*len(title) + '\n\n'
)
f.write(
'.. toctree::\n'
' :maxdepth: 2\n'
' :hidden:\n'
' :caption: Contents\n\n'
)
for cat in CATEGORIES:
f.write(' ' + cat + '\n')
f.write('\nThis section contains automatically generated documentation on Python classes, enums, '
'and constants provided by MCell.\n\n')
for cat in CATEGORIES:
f.write('- :ref:`api-' + cat + '`\n')
|
[
"[email protected]"
] | |
23458d70bd4f9ae696d8d81fa5c01f56971f7da7
|
3b7474148c07df7f4755106a3d0ada9b2de5efdc
|
/training/c25_flask/examples/world_api/original/tools.py
|
1dc1684dc799c4417844116947e284621a8d0bee
|
[] |
no_license
|
juancsosap/pythontraining
|
7f67466846138f32d55361d64de81e74a946b484
|
1441d6fc9544042bc404d5c7efffd119fce33aa7
|
refs/heads/master
| 2021-08-26T05:37:15.851025 | 2021-08-11T22:35:23 | 2021-08-11T22:35:23 | 129,974,006 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,890 |
py
|
import pymysql
from flask import request, jsonify, render_template, make_response, abort
def xmlify(template, value):
text = render_template(template, value=value)
response = make_response(text)
response.headers['Content-Type'] = 'application/xml'
return response
def prepare_response(template, info):
if len(info) > 0:
formats = ['application/json', 'application/xml']
accept = request.accept_mimetypes.best_match(formats)
if accept == 'application/json':
return jsonify(info)
elif accept == 'application/xml':
return xmlify(template, info)
else:
abort(406)
return make_response(jsonify({}), 204)
class MySQLDBManager:
def __init__(self, **kwargs):
self.host = kwargs['host'] if 'host' in kwargs else 'localhost'
self.port = kwargs['port'] if 'port' in kwargs else 3306
self.user = kwargs['user'] if 'user' in kwargs else 'root'
self.password = kwargs['password']
self.db = kwargs['db']
def connect(self):
self.conn = pymysql.connect(host=self.host,
port=self.port,
db=self.db,
user=self.user,
password=self.password)
self.cursor = self.conn.cursor()
def disconnect(self):
if self.conn:
self.conn.close()
def execute(self, sql, *args):
if len(args) > 0:
self.cursor.execute(sql, args)
else:
self.cursor.execute(sql)
result = self.cursor.fetchall()
return result
dbman = MySQLDBManager(password='roottoor', db='world')
module_name = 'tools.tools'
if __name__ == '__main__':
print('Loading {} module'.format(module_name))
else:
print('Importing {} module'.format(module_name))
|
[
"[email protected]"
] | |
d37148f04674bda7b996896bf1686b7c6b7db8c4
|
a8b0599af76b5393039431f876be00d628a1fe43
|
/backend/kangas/server/__init__.py
|
4f348efc3c7c9d70831cec668eae3966b7210d79
|
[
"Apache-2.0"
] |
permissive
|
comet-ml/kangas
|
c951f648d890dca5a66cbab405d3437be2f3e9e3
|
df0c1a495032cc4f1c367c74fcb0ef6e5a2063be
|
refs/heads/main
| 2023-06-12T23:38:43.068259 | 2023-06-05T18:38:34 | 2023-06-05T19:28:33 | 550,324,241 | 944 | 41 |
Apache-2.0
| 2023-06-05T19:28:35 | 2022-10-12T15:10:04 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,379 |
py
|
# -*- coding: utf-8 -*-
######################################################
# _____ _____ _ _ #
# (____ \ _ | ___) (_) | | #
# _ \ \ ____| |_ ____| | ___ ___ _ _ | | #
# | | | )/ _ | _)/ _ | |(_ / __) |/ || | #
# | |__/ ( ( | | | ( ( | | |__| | | | ( (_| | #
# |_____/ \_||_|___)\_||_|_____/|_| |_|\____| #
# #
# Copyright (c) 2023 Kangas Development Team #
# All rights reserved #
######################################################
import os
from .queries import KANGAS_ROOT # noqa
def start_tornado_server(port, debug_level=None, max_workers=None):
"""
Args:
port: (int) the port to start the frontend server
debug_level: (str) None means suppress output from servers
"""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import tornado
import tornado.log
import tornado.options
import tornado.web
from .tornado_server import datagrid_handlers
async def main():
if debug_level is not None:
tornado.options.options["logging"] = debug_level
tornado.log.enable_pretty_logging()
# set max_workers
executor = ThreadPoolExecutor(max_workers=max_workers)
print(
"Kangas tornado backend server starting with %s max workers"
% executor._max_workers
)
for handler in datagrid_handlers:
handler[1].executor = executor
app = tornado.web.Application(datagrid_handlers)
app.listen(port)
await asyncio.Event().wait()
try:
asyncio.run(main())
except KeyboardInterrupt:
print()
print("Exiting Kangas tornado backend server")
def start_flask_server(host, port, debug_level=None, max_workers=None):
from .flask_server import run
if max_workers is None:
max_workers = min(32, os.cpu_count() + 4)
print("Kangas flask backend server starting with %s max workers" % max_workers)
try:
run(
host=host,
port=port,
debug_level=debug_level,
max_workers=max_workers,
)
except KeyboardInterrupt:
print()
print("Exiting Kangas flask backend server")
|
[
"[email protected]"
] | |
1e1ae4854016d822cbf704f310b243729c7e3e4a
|
21acc25dd3969318dd8476e364fe2fb4eabfe4f4
|
/podcastninja/migrations/0005_auto_20150423_1005.py
|
5c94549f4db338ebc255bd4a0a32cb7727b5426a
|
[] |
no_license
|
monty5811/podcastninja
|
72dc98375974b1714a8457b09126981a76166b9a
|
94a55536270f3e1c4e4f2160e0a24e79c9f40b7f
|
refs/heads/master
| 2020-05-17T01:24:57.312486 | 2015-05-25T15:18:03 | 2015-05-25T15:18:03 | 35,883,288 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 526 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('podcastninja', '0004_podcastitem_s3_url'),
]
operations = [
migrations.AlterField(
model_name='podcastitem',
name='s3_url',
field=models.TextField(blank=True, null=True, verbose_name=b's3 url', validators=[django.core.validators.URLValidator()]),
),
]
|
[
"[email protected]"
] | |
dc9bc77e75ec86cb2ad265207209d03d37bf69a4
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/1929.0_Concatenation_of_Array.py
|
d8ab060fd5948df008b621e9dca0f8d6bf0d9362
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 528 |
py
|
'''
执行用时:36 ms, 在所有 Python3 提交中击败了38.78% 的用户
内存消耗:15.1 MB, 在所有 Python3 提交中击败了51.26% 的用户
'''
class Solution:
def getConcatenation(self, nums: List[int]) -> List[int]:
return nums + nums
'''
执行用时:36 ms, 在所有 Python3 提交中击败了38.78% 的用户
内存消耗:15.1 MB, 在所有 Python3 提交中击败了47.15% 的用户
'''
class Solution:
def getConcatenation(self, nums: List[int]) -> List[int]:
return nums * 2
|
[
"[email protected]"
] | |
3668163b33ba19dd7eff00d702f7712c5fd93349
|
8a41a7f9340cfa784cb36d35dca1ecb1630e4097
|
/Programming/Python/Databases/mongodb_practice/mongodb_with_docker_container_class_based.py
|
2b5256a980b7d9de036f2423af2cae13cf65bfc6
|
[] |
no_license
|
anishst/Learn
|
02e6b6cce43cf21621d328ef0fc25168267a9a3d
|
a1aed8b78b19acdb23e20be57b67fb242e0aefc5
|
refs/heads/master
| 2022-05-13T10:17:40.293640 | 2022-03-30T12:44:21 | 2022-03-30T12:44:21 | 173,595,812 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,266 |
py
|
# UNDER DEV NOT FULLY WORKING
import uuid
import pymongo
class Database(object):
URI = "mongodb://192.168.1.50:27017"
DATABASE = None
@staticmethod
def initialize():
client = pymongo.MongoClient(Database.URI)
Database.DATABASE = client['items_test']
@staticmethod
def insert(collection, data):
Database.DATABASE[collection].insert(data)
@staticmethod
def find(collection, query):
return Database.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
return Database.DATABASE[collection].find_one(query)
@staticmethod
def update(collection, query, data):
Database.DATABASE[collection].update(query, data, upsert=True)
@staticmethod
def remove(collection, query):
return Database.DATABASE[collection].remove(query)
class Items:
def __init__(self, store, url, desc, target_price, _id=None):
self._id = uuid.uuid4().hex if _id is None else _id
self.store = store
self.url = url
self.desc = desc
self.target_price = target_price
def __repr__(self):
return "<Item {} with URL {}>".format(self.store, self.url)
def save_to_mongo(self):
Database.update("items_test", {'_id': self._id}, self.json())
def json(self):
return {
"_id": self._id,
"name": self.store,
"url": self.url,
"desc": self.desc,
"target_price": self.target_price
}
def delete(self):
Database.remove('items_test', {'_id': self._id})
@staticmethod
def get_all_items():
return [elem for elem in Database.find('items_test', {})]
@staticmethod
def get_by_id(id):
return Database.find_one('items_test', {"_id": id})
Database.initialize()
# add new item
# new_item = Items('amazon', 'url', 'desc1', '30')
# new_item.save_to_mongo()
# print(len(new_item.get_all_items()))
all_items = Database.find('items_test',{})
for item in all_items:
print(item["_id"])
print(item["name"])
print(item["url"])
# get by id
print(Items.get_by_id('67913520e1af4ca2b0ed7f9abb5b5019'))
# delete item
Items.delete()
# total count
print(len(Items.get_all_items()))
|
[
"[email protected]"
] | |
c389303e3146bc35ff821cb1d46e512bb30de237
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AnttechMorseMarketingSrtaConsultModel.py
|
ed15c1198c9962d4b975315a6f97b110b2a9d905
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 3,452 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechMorseMarketingSrtaConsultModel(object):
def __init__(self):
self._anonymous_mobile_sha_256_list = None
self._blind_mobile_sha_256 = None
self._extend_params = None
self._order_amount = None
self._resource_id = None
@property
def anonymous_mobile_sha_256_list(self):
return self._anonymous_mobile_sha_256_list
@anonymous_mobile_sha_256_list.setter
def anonymous_mobile_sha_256_list(self, value):
self._anonymous_mobile_sha_256_list = value
@property
def blind_mobile_sha_256(self):
return self._blind_mobile_sha_256
@blind_mobile_sha_256.setter
def blind_mobile_sha_256(self, value):
self._blind_mobile_sha_256 = value
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def extend_params(self, value):
self._extend_params = value
@property
def order_amount(self):
return self._order_amount
@order_amount.setter
def order_amount(self, value):
self._order_amount = value
@property
def resource_id(self):
return self._resource_id
@resource_id.setter
def resource_id(self, value):
self._resource_id = value
def to_alipay_dict(self):
params = dict()
if self.anonymous_mobile_sha_256_list:
if hasattr(self.anonymous_mobile_sha_256_list, 'to_alipay_dict'):
params['anonymous_mobile_sha_256_list'] = self.anonymous_mobile_sha_256_list.to_alipay_dict()
else:
params['anonymous_mobile_sha_256_list'] = self.anonymous_mobile_sha_256_list
if self.blind_mobile_sha_256:
if hasattr(self.blind_mobile_sha_256, 'to_alipay_dict'):
params['blind_mobile_sha_256'] = self.blind_mobile_sha_256.to_alipay_dict()
else:
params['blind_mobile_sha_256'] = self.blind_mobile_sha_256
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.order_amount:
if hasattr(self.order_amount, 'to_alipay_dict'):
params['order_amount'] = self.order_amount.to_alipay_dict()
else:
params['order_amount'] = self.order_amount
if self.resource_id:
if hasattr(self.resource_id, 'to_alipay_dict'):
params['resource_id'] = self.resource_id.to_alipay_dict()
else:
params['resource_id'] = self.resource_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechMorseMarketingSrtaConsultModel()
if 'anonymous_mobile_sha_256_list' in d:
o.anonymous_mobile_sha_256_list = d['anonymous_mobile_sha_256_list']
if 'blind_mobile_sha_256' in d:
o.blind_mobile_sha_256 = d['blind_mobile_sha_256']
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'order_amount' in d:
o.order_amount = d['order_amount']
if 'resource_id' in d:
o.resource_id = d['resource_id']
return o
|
[
"[email protected]"
] | |
c89f1e925348210ada55438f3e47f2b3572cbe03
|
0412893529999de784ab9cb914f385ba788a3684
|
/test/test_ack_collector_down.py
|
af6a6ac2ccc5999ccfadc2c84f1e1ec9cacdf9c9
|
[
"Apache-2.0"
] |
permissive
|
JeremyTangCD/lm-sdk-python
|
0326bf034c16b022b760600dc18fe7aaad42fa26
|
2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983
|
refs/heads/master
| 2020-04-15T15:39:59.276224 | 2019-01-09T09:55:36 | 2019-01-09T09:55:36 | 164,803,314 | 0 | 0 |
Apache-2.0
| 2019-01-09T09:58:55 | 2019-01-09T06:33:40 |
Python
|
UTF-8
|
Python
| false | false | 1,154 |
py
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import logicmonitor_sdk
from logicmonitor_sdk.models.ack_collector_down import AckCollectorDown # noqa: E501
from logicmonitor_sdk.rest import ApiException
class TestAckCollectorDown(unittest.TestCase):
"""AckCollectorDown unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAckCollectorDown(self):
"""Test AckCollectorDown"""
# FIXME: construct object with mandatory attributes with example values
# model = logicmonitor_sdk.models.ack_collector_down.AckCollectorDown() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
8860fd14e571f6895267fbdf6e37de2a1b996050
|
dfab6798ece135946aebb08f93f162c37dd51791
|
/timber/luban.timber/__init__.py
|
a1439a83d3cd277a7cb77b0454e6d6e5598f66c6
|
[] |
no_license
|
yxqd/luban
|
405f5f7dcf09015d214079fe7e23d644332be069
|
00f699d15c572c8bf160516d582fa37f84ac2023
|
refs/heads/master
| 2020-03-20T23:08:45.153471 | 2012-05-18T14:52:43 | 2012-05-18T14:52:43 | 137,831,650 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 840 |
py
|
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ************************************************************
# bad bad
import luban
luban.__doc__ += """* timber: default extension of luban core
"""
# ************************************************************
# activate extensions
from . import elements, actions
from . import luban_ext
from . import controller # replace the core controllers with timber controllers. see eg .controllers.CherrypyController
from .controller import setUploadPath
# End of file
|
[
"[email protected]"
] | |
87477ba53d15435cb55aa99b65ce10afdee5a360
|
e52501eb4db862d90ae5541bd512a50df30e0726
|
/Chapter 2+3 Intro + Variables + Strings/Chapter3-7 ShrinkingGuestList.py
|
47c9f766034b09d285472e34a1f448ce0ac89821
|
[] |
no_license
|
ericnwin/Python-Crash-Course-Lessons
|
b2b12c221f545c961a47f2343b2aa3dac901927b
|
7b755c0b4ce65528f4880b3583aca3be9547b33b
|
refs/heads/master
| 2022-12-19T10:55:35.987330 | 2020-09-13T23:23:54 | 2020-09-13T23:23:54 | 295,255,228 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,190 |
py
|
# You just found out that your new dinner table won’t
# arrive in time for the dinner, and you have space for only two guests.
# • Start with your program from Exercise 3-6. Add a new line that prints a
# message saying that you can invite only two people for dinner.
# • Use pop() to remove guests from your list one at a time until only two
# names remain in your list. Each time you pop a name from your list, print
# a message to that person letting them know you’re sorry you can’t invite
# them to dinner.
# • Print a message to each of the two people still on your list, letting them
# know they’re still invited.
# • Use del to remove the last two names from your list, so you have an empty
# list. Print your list to make sure you actually have an empty list at the end
# of your program.
dinner_guests = ['Joeji', 'Elon Musk', 'OpenAI']
print(
f"Hey {dinner_guests[0]} I'm a huge fan of your music! Please join me for dinner. ")
print(f"Hey {dinner_guests[1]} can I get a free car? We can talk over dinner.")
print(f"Hey {dinner_guests[2]} teach me AI. I gib food as payment.")
# Declare who can't make it
declined_invitations = "OpenAI"
dinner_guests.remove(declined_invitations)
print(f"Unfortunately {declined_invitations} can't make it.\n")
# Adding new person to invite list
new_person_invite = "Kanye West"
dinner_guests.append(new_person_invite)
print(dinner_guests)
# Making 2nd set of invitations
print(
'\n' f"Hey {dinner_guests[0]} I'm a huge fan of your music! Please join me for dinner. ")
print(f"Hey {dinner_guests[1]} can I get a free car? We can talk over dinner.")
print(f"Hey {dinner_guests[2]} I loved you in Titanic. Please eat with me.\n")
# shrinking down to 2 people and sending msg to those who are invited
print(f"Hey sorry we only have room for two... I'm uninviting one of you sorry.\n")
uninvited = dinner_guests.pop()
print(f"Hey sorry {uninvited} you've been uninvited :( \n")
print(f"Hey {dinner_guests[0]} you're still invited.")
print(f"Hey {dinner_guests[1]} you're still invited.")
# Remove last 2 names from list and printing out an empty list
del dinner_guests[0]
del dinner_guests[0]
print(dinner_guests)
|
[
"[email protected]"
] | |
644f53da5330e99d42a57e2457baa4815d3cc52f
|
d0a54a3faa1891b647f8c621521cd26c13bd2926
|
/backend/mytts.py
|
b5bd2373d11ec245d0b144f5f903e259d2fd903f
|
[
"MIT"
] |
permissive
|
ishine/PTTS-WebAPP
|
166318593d3247c88d458c9d4fe39dca27ef408f
|
dcc07a79d8dd695ca15e4dd5a69811b3ddd91709
|
refs/heads/main
| 2023-04-02T06:03:41.237351 | 2021-04-14T02:37:16 | 2021-04-14T02:37:16 | 357,388,655 | 0 | 0 |
MIT
| 2021-04-14T02:37:17 | 2021-04-13T01:31:11 | null |
UTF-8
|
Python
| false | false | 2,595 |
py
|
#!/usr/bin/env python
import os.path as osp
import librosa
import torch
from .hparams import HParam
from .transform import StandardNorm, TextProcessor
from .models import MelGenerator, ParallelText2Mel
from .synthesizer import Synthesizer
try:
from .manager import GPUManager
except ImportError as err:
print(err); gm = None
else:
gm = GPUManager()
def select_device(device):
cpu_request = device.lower() == 'cpu'
# if device requested other than 'cpu'
if device and not cpu_request:
c = 1024 ** 2 # bytes to MB
x = torch.cuda.get_device_properties(int(device))
s = f'Using torch {torch.__version__} '
print("%sCUDA:%s (%s, %dMB)" % (s, device, x.name, x.total_memory / c))
return torch.device(f'cuda:{device}')
else:
print(f'Using torch {torch.__version__} CPU')
return torch.device('cpu')
class MyTTS:
def __init__(self, config=None, device=None):
if torch.cuda.is_available():
index = device if device else str(0 if gm is None else gm.auto_choice())
else:
index = 'cpu'
self.device = device = select_device(index)
self.hparams = hparams = HParam(config) \
if config else HParam(osp.join(osp.dirname(osp.abspath(__file__)), "config", "default.yaml"))
checkpoint = osp.join(osp.dirname(osp.abspath(__file__)), "pretrained", hparams.parallel.checkpoint)
vocoder_checkpoint = osp.join(osp.dirname(osp.abspath(__file__)), "pretrained", hparams.vocoder.checkpoint)
normalizer = StandardNorm(hparams.audio.spec_mean, hparams.audio.spec_std)
processor = TextProcessor(hparams.text)
text2mel = ParallelText2Mel(hparams.parallel)
text2mel.eval()
vocoder = MelGenerator(hparams.audio.n_mel_channels).to(device)
vocoder.eval(inference=True)
self.synthesizer = Synthesizer(
model=text2mel,
checkpoint=checkpoint,
vocoder=vocoder,
vocoder_checkpoint=vocoder_checkpoint,
processor=processor,
normalizer=normalizer,
device=device
)
def __call__(self, texts, speed, volume, tone):
rate = int(tone) / 3
alpha = (4 / int(speed)) * rate
beta = int(volume) / 3
wave = self.synthesizer.inference(texts, alpha=alpha, beta=beta)
wave = wave.cpu().detach().numpy()
sr = self.hparams.audio.sampling_rate
# use TSM + resample to change tone
wave = librosa.core.resample(wave, int(sr*rate), sr)
return wave, sr
|
[
"[email protected]"
] | |
b2fcc624e79ef9ef10c62818cb0c7d2d93c0d250
|
080bbe77da955b3917435c25fc63b90b0f3c724e
|
/botorch/utils/multi_objective/box_decomposition.py
|
e566f0c69e493acd4370a0a28582374334f572aa
|
[
"MIT"
] |
permissive
|
irinaespejo/botorch
|
3d15d962ff0f5bb34fbd11b2eb7549db755af705
|
e4dcf603fdaf83f0e5f8b9b392f943c89dfff7eb
|
refs/heads/master
| 2023-07-11T18:02:11.853790 | 2021-08-19T15:57:21 | 2021-08-19T15:58:12 | 316,017,084 | 0 | 0 |
MIT
| 2020-11-25T18:02:11 | 2020-11-25T18:02:09 | null |
UTF-8
|
Python
| false | false | 744 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
DEPRECATED - Box decomposition algorithms.
Use the botorch.utils.multi_objective.box_decompositions instead.
"""
import warnings
from botorch.utils.multi_objective.box_decompositions.non_dominated import ( # noqa F401
NondominatedPartitioning,
)
warnings.warn(
"The botorch.utils.multi_objective.box_decomposition module has "
"been renamed to botorch.utils.multi_objective.box_decompositions. "
"botorch.utils.multi_objective.box_decomposition will be removed in "
"the next release.",
DeprecationWarning,
)
|
[
"[email protected]"
] | |
a8569f82ed1a73ffbd59f8b49866754ec53e411d
|
9dfb3372a1e4516d970a6e9d0a9fd8360580eae7
|
/python pySerial/maping_data.py
|
feb9a76200b26899373a1eeba25711e6b4835877
|
[] |
no_license
|
clambering-goat/cameron_pyton
|
d1cd0e7b04da14e7ba4f89dcb4d973f297a4626c
|
df0b0365b86e75cfcfc2c1fc21608f1536a3b79f
|
refs/heads/master
| 2021-07-14T20:37:37.021401 | 2019-02-28T07:52:11 | 2019-02-28T07:52:11 | 137,251,669 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 418 |
py
|
import serial
y_points=[]
with serial.Serial('COM4', 9600, timeout=1) as ser:
for q in range(20000):
line =ser.readline()
x=line.decode("utf-8")
#print(x)
y_points.append(int(x))
import matplotlib.pyplot as plt
x_points=[]
for q in range(len(y_points)):
x_points.append(q)
plt.plot(x_points,y_points)
plt.ylabel('some numbers')
plt.xlabel('some numbers')
plt.show()
|
[
"[email protected]"
] | |
664fef8dbbee5f880d4f0a0866edc6ccd5676737
|
0facb323be8a76bb4c168641309972fa77cbecf2
|
/Configurations/HWWSemiLepHighMass/Full_v6Production/template_seed/templates_jhchoi/MassPoints2018/List_MX.py
|
ca93c1c06e444ba9cee292c6bdab834fd117111f
|
[] |
no_license
|
bhoh/SNuAnalytics
|
ef0a1ba9fa0d682834672a831739dfcfa1e7486b
|
34d1fc062e212da152faa83be50561600819df0e
|
refs/heads/master
| 2023-07-06T03:23:45.343449 | 2023-06-26T12:18:28 | 2023-06-26T12:18:28 | 242,880,298 | 0 | 1 | null | 2020-02-25T01:17:50 | 2020-02-25T01:17:49 | null |
UTF-8
|
Python
| false | false | 396 |
py
|
List_MX=[
115 ,
120 ,
124 ,
125 ,
126 ,
130 ,
135 ,
140 ,
145 ,
150 ,
155 ,
160 ,
165 ,
170 ,
175 ,
180 ,
190 ,
200 ,
210 ,
230 ,
250 ,
270 ,
300 ,
350 ,
400 ,
450 ,
500 ,
550 ,
600 ,
650 ,
700 ,
750 ,
800 ,
900 ,
1000 ,
1500 ,
2000 ,
2500 ,
3000 ,
4000 ,
5000 ,
]
if __name__ == '__main__':
#print('( '+" ".join(str(MX) for MX in List_MX)+' )')
print " ".join(str(MX) for MX in List_MX)
|
[
"[email protected]"
] | |
145a90c675971039d677b9e3411c7b6f30d2cde6
|
59be93c710d9e1750d2767f1c98f347ed3dc635c
|
/elements/when.py
|
308f8c8d7c8fbbc825abfaf9cd8a8914f92fd203
|
[
"MIT"
] |
permissive
|
artemZholus/elements
|
802d14eb574be0c3f18a50fdbc87ee262fbcd01a
|
21b4f27e854d91a65619e8fc81b3916386c5ef66
|
refs/heads/main
| 2023-07-10T05:21:28.947510 | 2021-08-18T18:02:05 | 2021-08-18T18:02:05 | 397,594,638 | 0 | 0 |
MIT
| 2021-08-18T12:35:49 | 2021-08-18T12:35:49 | null |
UTF-8
|
Python
| false | false | 711 |
py
|
class Every:
def __init__(self, every):
self._every = every
self._last = None
def __call__(self, step):
step = int(step)
if not self._every:
return False
if self._last is None:
self._last = step
return True
if step >= self._last + self._every:
self._last += self._every
return True
return False
class Once:
def __init__(self):
self._once = True
def __call__(self):
if self._once:
self._once = False
return True
return False
class Until:
def __init__(self, until):
self._until = until
def __call__(self, step):
step = int(step)
if not self._until:
return True
return step < self._until
|
[
"[email protected]"
] | |
c07aa82c886d791ed37e80ecf66b26fe3ba26449
|
f59860bb4d04007cf03258753aefcbf58e760db0
|
/music/migrations/0005_song_datetime.py
|
a64764e5215f82e94025a21d14a4720153be91ab
|
[] |
no_license
|
Arefeh902/station_49
|
fc306d7668d64c68df7dba35adbdc25d5600544a
|
3076e4ab616759f5aa0a973525c0436b603f942f
|
refs/heads/master
| 2023-07-01T10:25:39.820956 | 2021-08-10T18:47:28 | 2021-08-10T18:47:28 | 391,368,241 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 386 |
py
|
# Generated by Django 2.1.9 on 2021-08-07 08:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0004_auto_20210807_0806'),
]
operations = [
migrations.AddField(
model_name='song',
name='datetime',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"[email protected]"
] | |
b563563bd985a3f9d737ea973f8314bd6fb8f40d
|
9d2b33eb85fca3a81ccb7272422c41a08467a8c4
|
/pdm/formats/requirements.py
|
37c230c8eab420dab9e877204074086b6fe605a6
|
[
"MIT"
] |
permissive
|
skyoo2003/pdm
|
9b9d20079f325c087855c7a37a4270d36fa71131
|
95a758ee259dff02f00f0a3eab79fa23e5d2aa97
|
refs/heads/master
| 2022-12-12T18:01:36.331560 | 2020-09-04T01:16:41 | 2020-09-04T01:16:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,729 |
py
|
import hashlib
import urllib.parse
from pip._internal.req.req_file import parse_requirements
from pdm.models.markers import Marker
from pdm.models.requirements import parse_requirement
from pdm.utils import get_finder
def _requirement_to_str_lowercase_name(requirement):
"""Formats a packaging.requirements.Requirement with a lowercase name."""
parts = [requirement.name.lower()]
if requirement.extras:
parts.append("[{0}]".format(",".join(sorted(requirement.extras))))
if requirement.specifier:
parts.append(str(requirement.specifier))
if requirement.url:
parts.append("@ {0}".format(requirement.url))
if requirement.marker:
parts.append("; {0}".format(requirement.marker))
return "".join(parts)
def requirement_from_ireq(ireq):
"""Formats an `InstallRequirement` instance as a
`pdm.models.requirement.Requirement`.
Generic formatter for pretty printing InstallRequirements to the terminal
in a less verbose way than using its `__str__` method.
:param :class:`InstallRequirement` ireq: A pip **InstallRequirement** instance.
:return: A formatted string for prettyprinting
:rtype: str
"""
if ireq.editable:
line = "{}".format(ireq.link)
else:
line = _requirement_to_str_lowercase_name(ireq.req)
if str(ireq.req.marker) != str(ireq.markers):
if not ireq.req.marker:
line = "{}; {}".format(line, ireq.markers)
else:
name, markers = line.split(";", 1)
markers = Marker(markers) & ireq.markers
line = "{}; {}".format(name, markers)
return parse_requirement(line, ireq.editable)
def parse_requirement_file(filename):
from pip._internal.req.constructors import install_req_from_parsed_requirement
finder = get_finder([])
ireqs = [
install_req_from_parsed_requirement(pr)
for pr in parse_requirements(filename, finder.session, finder)
]
return ireqs, finder
def check_fingerprint(project, filename):
import tomlkit
with open(filename, encoding="utf-8") as fp:
try:
tomlkit.parse(fp.read())
except ValueError:
# the file should be a requirements.txt if it not a TOML document.
return True
else:
return False
def convert_url_to_source(url, name=None):
if not name:
name = hashlib.sha1(url.encode("utf-8")).hexdigest()[:6]
return {"name": name, "url": url, "verify_ssl": url.startswith("https://")}
def convert(project, filename):
ireqs, finder = parse_requirement_file(str(filename))
reqs = [requirement_from_ireq(ireq) for ireq in ireqs]
data = {"dependencies": dict(req.as_req_dict() for req in reqs)}
if finder.index_urls:
sources = [convert_url_to_source(finder.index_urls[0], "pypi")]
sources.extend(convert_url_to_source(url) for url in finder.index_urls[1:])
data["source"] = sources
return data
def export(project, candidates, options):
lines = []
for candidate in candidates:
req = candidate.req.as_line()
lines.append(req)
if options.hashes and candidate.hashes:
for item in candidate.hashes.values():
lines.append(f" \\\n --hash={item}")
lines.append("\n")
sources = project.tool_settings.get("source", [])
for source in sources:
url = source["url"]
prefix = "--index-url" if source["name"] == "pypi" else "--extra-index-url"
lines.append(f"{prefix} {url}\n")
if not source["verify_ssl"]:
host = urllib.parse.urlparse(url).hostname
lines.append(f"--trusted-host {host}\n")
return "".join(lines)
|
[
"[email protected]"
] | |
bef3b9ad03bdc33f7171cc9b588f198ce873e861
|
62922a76e40003f3d3a7d02282853f9a2b76c6fc
|
/cv2/ch22/test1.py
|
1172f8de48d2bc9bfba7168431a2727b16325054
|
[] |
no_license
|
cchangcs/ai_learning_record
|
a7d0d9c7fcdc1e97d8869aa7e63b535f8cf62df2
|
235a90ff5fe0205334376a927d462b8ae64e4e70
|
refs/heads/master
| 2020-04-01T16:59:31.203223 | 2018-11-21T11:12:34 | 2018-11-21T11:12:34 | 153,408,023 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,103 |
py
|
# encoding:utf-8
'''
斑点检测SimpleBlodDetector()
斑点检测:默认检测黑色点,如果要检测白色的点需要设置bycolor为true,并且color数值为255
斑点通常是指与周围有着颜色和灰度差别的区域,在实际的图中,往往存在着大量这样的斑点,如一棵树是一个斑点,一块草地是一个斑点。
由于斑点代表的是一个区域,相比单纯的角点,它的稳定性更好,抗噪声能力更强,所以它在图像配准上扮演着重要的角色。
同时有时图像中的斑点也是我们关心的区域,比如在医学与生物领域,我们需要从一些X光照片或细胞显微照片中提取一些具有特殊意义的斑点的位置或数量
'''
import cv2
import numpy as np
im = cv2.imread('blob.jpg', cv2.IMREAD_GRAYSCALE)
detector = cv2.SimpleBlobDetector_create()
keypoints = detector.detect(im)
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
b1d84ff6d8719c6d1cb346458bafaa88df886d86
|
0facb323be8a76bb4c168641309972fa77cbecf2
|
/Configurations/HWWSemiLepHighMass/nanoAODv5/v6_production/2017/NJET_biined_WJets/SKIM10/HMVar10_Full_ALL_var/MassPoints/structure_M1500.py
|
006d035cd83abd3e70ffc306361571ee477e383b
|
[] |
no_license
|
bhoh/SNuAnalytics
|
ef0a1ba9fa0d682834672a831739dfcfa1e7486b
|
34d1fc062e212da152faa83be50561600819df0e
|
refs/heads/master
| 2023-07-06T03:23:45.343449 | 2023-06-26T12:18:28 | 2023-06-26T12:18:28 | 242,880,298 | 0 | 1 | null | 2020-02-25T01:17:50 | 2020-02-25T01:17:49 | null |
UTF-8
|
Python
| false | false | 1,725 |
py
|
#['WW', 'ggHWWlnuqq_M1500', 'DY', 'DATA', 'WZ', 'ggHWWlnuqq_M125', 'ZZZ', 'ggHWWlnuqq_M900', 'vbfHWWlnuqq_M500', 'Wjets1j', 'QCD_MU', 'WZZ', 'vbfHWWlnuqq_M900', 'QCD_bcToE', 'Wjets2j', 'QCD_EM', 'ggHWWlnuqq_M500', 'ZZ', 'WWW', 'vbfHWWlnuqq_M1500', 'vbfHWWlnuqq_M125', 'WWZ', 'Wjets0j', 'top']
QCD_MU=['QCD_Pt-15to20_MuEnrichedPt5',
'QCD_Pt-20to30_MuEnrichedPt5',
'QCD_Pt-30to50_MuEnrichedPt5',
'QCD_Pt-50to80_MuEnrichedPt5',
'QCD_Pt-80to120_MuEnrichedPt5',
'QCD_Pt-120to170_MuEnrichedPt5',
'QCD_Pt-170to300_MuEnrichedPt5',
'QCD_Pt-300to470_MuEnrichedPt5',
'QCD_Pt-470to600_MuEnrichedPt5',
'QCD_Pt-600to800_MuEnrichedPt5',
'QCD_Pt-800to1000_MuEnrichedPt5',
'QCD_Pt-1000toInf_MuEnrichedPt5',
]
QCD_EM=[
'QCD_Pt-20to30_EMEnriched',
'QCD_Pt-30to50_EMEnriched',
'QCD_Pt-50to80_EMEnriched',
'QCD_Pt-80to120_EMEnriched',
'QCD_Pt-120to170_EMEnriched',
'QCD_Pt-170to300_EMEnriched',
'QCD_Pt-300toInf_EMEnriched'
]
QCD_bcToE=[
'QCD_Pt_20to30_bcToE',
'QCD_Pt_30to80_bcToE',
'QCD_Pt_80to170_bcToE',
'QCD_Pt_170to250_bcToE',
'QCD_Pt_250toInf_bcToE',
]
for name in [ 'DY', 'WZZ', 'WWZ','WWW','ZZZ', 'ZZ', 'WZ', 'WW', 'WpWmJJ_EWK_QCD_noHiggs', 'top', 'Wjets0j', 'Wjets1j', 'Wjets2j','vbfHWWlnuqq_M125','ggHWWlnuqq_M125'] + ['QCD_MU','QCD_EM','QCD_bcToE']:
structure[name] = {
'isSignal' : 0,
'isData' : 0
}
#ggHWWlnuqq_M1500_S_B_I
structure['ggHWWlnuqq_M1500'] = {
'isSignal' : 1,
'isData' : 0
}
structure['vbfHWWlnuqq_M1500'] = {
'isSignal' : 1,
'isData' : 0
}
structure['PseudoData'] = {
'isSignal' : 0,
'isData' : 1
}
|
[
"[email protected]"
] | |
622882398cd5c1e5077722f60c7aa9e77ef203af
|
0ad8fc76aebe7ce22abe771fbeadf227e5b471cb
|
/app/productdb/tasks.py
|
8cb6cd358ef178da1d7cd5290af4ab4a8389c040
|
[
"MIT"
] |
permissive
|
ppavlu/product-database
|
354c6a1a3e9ebfdc931f2aacf8751ed0f149401c
|
09610c09600c63eb91106c0b5a2fa995b134dbf4
|
refs/heads/master
| 2021-01-17T22:51:43.247027 | 2015-10-11T11:37:12 | 2015-10-11T11:37:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,251 |
py
|
from django_project.celery import app as app
from app.productdb.models import Settings
import app.productdb.crawler.cisco_eox_api_crawler as cisco_eox_api_crawler
import logging
logger = logging.getLogger(__name__)
@app.task(serializer='json', name="synchronize_with_cisco_eox_api")
def execute_task_to_synchronize_cisco_eox_states():
"""
This task will automatically synchronize the Cisco EoX states with the local database. It will execute the
configured queries and saves the information to the local database. There are two types of operation:
* cisco_eox_api_auto_sync_auto_create_elements is set to true - will create any element which is not part of the blacklist and not in the
database
* cisco_eox_api_auto_sync_auto_create_elements is set to false - will only update entries, which are already included in the database
:return:
"""
logger.info("execute synchronize Cisco EoX update task...")
# update based on the configured query settings
result = cisco_eox_api_crawler.synchronize_with_cisco_eox_api()
logger.info("result: %s" % str(result))
s = Settings.objects.get(id=0)
s.eox_api_sync_task_id = ""
s.save()
return result
|
[
"[email protected]"
] | |
89c2127cdb13c6c6248abfba21d3cdb0eba90d73
|
3b9fdd117bfcfa86b1e354b2c193727e7567cb76
|
/proxyuser17/proxyuser17/apps/myapp/models.py
|
9a06f4b7a626979e8ea1971891cbd06fbebd22c1
|
[
"BSD-3-Clause"
] |
permissive
|
marcofucci/django-ticket-24506
|
84beb913b2db2993034e199c3a63c2cde60fa9aa
|
88a36d792e77fb70c12224ea6a02774015ddbc84
|
refs/heads/master
| 2021-01-21T11:37:23.621884 | 2015-03-22T12:55:25 | 2015-03-22T12:55:25 | 32,672,309 | 0 | 1 | null | 2015-03-22T12:55:25 | 2015-03-22T11:11:24 |
Python
|
UTF-8
|
Python
| false | false | 314 |
py
|
from django.db import models
class FKUserModel(models.Model):
user = models.ForeignKey('core.User')
def __unicode__(self):
return u'%s' % self.user
class OneToOneUserModel(models.Model):
user = models.OneToOneField('core.User')
def __unicode__(self):
return u'%s' % self.user
|
[
"[email protected]"
] | |
c3ca98e5d8c7a2b7a60bee0667267c57f753f0a6
|
6bf97e57103b9ddd639a91a0c371f86b3bce60d8
|
/pure_ee/earth_engine_start.py
|
48154028788ff39dfead3649a9176c9cbc69a261
|
[
"Apache-2.0"
] |
permissive
|
geosconsulting/gee_wapor
|
655372722eed9e1c00ef7880870bd85c7953d64a
|
c3c451fcb21664172a74647fe5d9e56f312aa1df
|
refs/heads/master
| 2021-01-13T05:01:37.233979 | 2017-03-01T14:09:08 | 2017-03-01T14:09:08 | 81,427,303 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 327 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 3 04:59:03 2017
@author: fabio
"""
# Import the Earth Engine Python Package
import ee
# Initialize the Earth Engine object, using the authentication credentials.
ee.Initialize()
# Print the information for an image asset.
image = ee.Image('srtm90_v4')
print(image.getInfo())
|
[
"[email protected]"
] | |
b2516c9040789df5a0e98f754aab40508283b38c
|
c834c1b7ef5d0039a706f174ed3f7b0ab82fa2e5
|
/optOnMysql/data2mysql.py
|
5903606b3171c597649676ce4e1d13f00e79309e
|
[] |
no_license
|
yangze01/Laws-Search-Project
|
126ffc5ec1ad1c2e9d95c2490104e8e37e766ad4
|
d1fff57a9298aa0d883a1b988aa98804d0ab00c1
|
refs/heads/master
| 2021-08-14T15:26:27.455518 | 2017-11-16T03:59:58 | 2017-11-16T03:59:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,597 |
py
|
#coding=utf8
import sys
import time
reload(sys)
sys.setdefaultencoding('utf8')
from optOnMysql.DocumentsOnMysql import *
from optOnMysql.DocumentUnit import *
import json
BasePath = sys.path[0]
def is_valid_date(str):
'''判断是否是一个有效的日期字符串'''
try:
time.strptime(str, "%Y-%m-%d")
return True
except:
return False#
def document_format(line, criminal):
line = json.loads(line.decode('utf8'))
document_unit = dict()
document_unit["title"] = line['title']
# print(len(document_unit['title']))
document_unit["court"] = line['court']
document_unit["url"] = line['url']
document_unit["content"] = '|'.join(line['content']).encode('utf8')
# print(len(document_unit["content"]))
document_unit["criminal"] = criminal
if(is_valid_date(line["date"])):
document_unit["date"] = line['date']
else:
document_unit["date"] = "0000-00-00"
return document_unit
def save_document2mysql(file_path, criminal):
opt = DocumentsOnMysql()
i = 0
for line in open(file_path):
print(i)
i = i + 1
document_unit = document_format(line, criminal)
opt.insertOneDocuments(document_unit)
opt.connClose()
print(u"finished")
if __name__ == "__main__":
opt = DocumentsOnMysql()
# opt.insertOneDocuments(document_unit)
# print(opt)
opt.findById("1")
a = opt.findall()
for i in a :
print(i)
opt.connClose()
# file_path = BasePath + "/../data/judgment_trafficking.txt"
# save_document2mysql(file_path,u"拐卖妇女儿童罪")
|
[
"[email protected]"
] | |
9ea362e7e23645d74ada3c82eae8bd8ed6962067
|
6f6b7e1a9837fb581cc5fed92b66b4ad12ea30f5
|
/19-05-161_STOCK_profit_AIC_BIC_L500_github/4plot_profit_nh6.py
|
4d358b74123f153c55f5113d35716f99896e3ac4
|
[
"MIT"
] |
permissive
|
danhtaihoang/stock
|
f7b4f4989ff0c2b267766761d402adc599fc893d
|
9c3a3e467839dda095a0152055e347254abaf271
|
refs/heads/master
| 2020-06-10T01:35:59.136032 | 2019-06-24T17:13:35 | 2019-06-24T17:13:35 | 193,546,744 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,052 |
py
|
import numpy as np
import matplotlib.pyplot as plt
#=========================================================================================
# average:
p1 = np.loadtxt('profit_cost_nhmax6.dat')
p2 = np.loadtxt('profit_AIC_nhmax6.dat')
p3 = np.loadtxt('profit_BIC_nhmax6.dat')
tmax = np.shape(p1)[0]
t = np.arange(0,tmax,1)
plt.figure(figsize=(20,16))
plt.subplot(2,2,1)
#plt.figure(figsize=(5,4))
plt.title('trade everyday')
plt.plot(t, p1[:,0],'k-',label='cost')
plt.plot(t, p2[:,0],'b-',label='AIC')
plt.plot(t, p3[:,0],'r-',label='BIC')
plt.legend()
plt.xlabel('time')
plt.ylabel('cumulative profit')
plt.ylim([-1,4])
plt.grid(linestyle='dotted')
plt.subplot(2,2,2)
plt.title('not trade everyday')
plt.plot(t, p1[:,1],'k-',label='cost')
plt.plot(t, p2[:,1],'b-',label='AIC')
plt.plot(t, p3[:,1],'r-',label='BIC')
plt.legend()
plt.xlabel('time')
plt.ylabel('cumulative profit')
plt.ylim([-1,4])
plt.grid(linestyle='dotted')
#plt.tight_layout(h_pad=0.8, w_pad=1.2)
plt.savefig('profit_cost_AIC_BIC_nhmax6.pdf', format='pdf', dpi=300)
|
[
"[email protected]"
] | |
5fe9b2191e2862a97b4b0500d3c4777c88eab68c
|
56e96acad654d7480d17d5cae7402a2bc6cbaa76
|
/share/py_module/dataset.py
|
fc4a162fa0c59a4f2c53f521c749910a52a91ef4
|
[] |
no_license
|
LitingLin/VehicleDC
|
641b1e25c22cac2ffb1dcba519b1af5ac7d9f2c8
|
2ac0b8ad708f033b59c0bc924ca7ec169e86b063
|
refs/heads/master
| 2020-05-17T19:30:00.556691 | 2019-07-12T16:21:12 | 2019-07-12T16:21:12 | 183,916,512 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,885 |
py
|
# coding: utf-8
import os
import re
import numpy as np
import torch
from torch.utils import data
from torchvision import transforms as T
from PIL import Image
color_attrs = ['Black', 'Blue', 'Brown',
'Gray', 'Green', 'Pink',
'Red', 'White', 'Yellow']
direction_attrs = ['Front', 'Rear']
type_attrs = ['passengerCar', 'saloonCar',
'shopTruck', 'suv', 'trailer', 'truck', 'van', 'waggon']
class Vehicle(data.Dataset):
"""
属性向量多标签:配合cross entropy loss的使用
使用处理过的数据: 去掉所有的unknown
"""
def __init__(self,
root,
transform=None,
is_train=True):
"""
:return:
"""
if not os.path.exists(root):
print('=> [Err]: root not exists.')
return
if is_train:
print('=> train data root: ', root)
else:
print('=> test data root: ', root)
# 统计非空子目录并按名称(类别名称)自然排序
self.img_dirs = [os.path.join(root, x) for x in os.listdir(root) \
if os.path.isdir(os.path.join(root, x))]
self.img_dirs = [x for x in self.img_dirs if len(os.listdir(x)) != 0]
if len(self.img_dirs) == 0:
print('=> [Err]: empty sub-dirs.')
return
self.img_dirs.sort() # 默认自然排序, 从小到大
# print('=> total {:d} classes for training'.format(len(self.img_dirs)))
# 将多标签分开
self.color_attrs = color_attrs
self.direction_attrs = direction_attrs
self.type_attrs = type_attrs
# 按子目录(类名)的顺序排序文件路径
self.imgs_path = []
self.labels = []
for x in self.img_dirs:
match = re.match('([a-zA-Z]+)_([a-zA-Z]+)_([a-zA-Z]+)', os.path.split(x)[1])
color = match.group(1) # 车身颜色
direction = match.group(2) # 车身方向
type = match.group(3) # 车身类型
# print('=> color: %s, direction: %s, type: %s' % (color, direction, type))
for y in os.listdir(x):
# 添加文件路径
self.imgs_path.append(os.path.join(x, y))
# 添加label
color_idx = int(np.where(self.color_attrs == np.array(color))[0])
direction_idx = int(np.where(self.direction_attrs == np.array(direction))[0])
type_idx = int(np.where(self.type_attrs == np.array(type))[0])
label = np.array([color_idx, direction_idx, type_idx], dtype=int)
label = torch.Tensor(label) # torch.from_numpy(label)
self.labels.append(label) # Tensor(label)
# print(label)
if is_train:
print('=> total {:d} samples for training.'.format(len(self.imgs_path)))
else:
print('=> total {:d} samples for testing.'.format(len(self.imgs_path)))
# 加载数据变换
if transform is not None:
self.transform = transform
else: # default image transformation
self.transform = T.Compose([
T.Resize(448),
T.CenterCrop(448),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# --------------------- serialize imgs_path to disk
# root_parent = os.path.abspath(os.path.join(root, '..'))
# print('=> parent dir: ', root_parent)
# if is_train:
# imgs_path = os.path.join(root_parent, 'train_imgs_path.pkl')
# else:
# imgs_path = os.path.join(ropytorch docot_parent, 'test_imgs_path.pkl')
# print('=> dump imgs path: ', imgs_path)
# pickle.dump(self.imgs_path, open(imgs_path, 'wb'))
def __getitem__(self, idx):
"""
:param idx:
:return:
"""
image = Image.open(self.imgs_path[idx])
# 数据变换, 灰度图转换成'RGB'
if image.mode == 'L' or image.mode == 'I': # 8bit或32bit灰度图
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
label = self.labels[idx]
f_path = os.path.split(self.imgs_path[idx])[0].split('/')[-2] + \
'/' + os.path.split(self.imgs_path[idx])[0].split('/')[-1] + \
'/' + os.path.split(self.imgs_path[idx])[1]
return image, label, f_path
def __len__(self):
"""os.path.split(self.imgs_path[idx])[0].split('/')[-2]
:return:
"""
return len(self.imgs_path)
|
[
"[email protected]"
] | |
6321392e8ea506f89ad8138de98709a7d3aeef3a
|
e5f1befb7c7ca0072747b33086fc6569a6befd01
|
/old/flaskold/json/008.py
|
f4caad8db1b8d37bfc3909c746dc3bac66210760
|
[] |
no_license
|
nepomnyashchii/TestGit
|
ae08d8bb1b7d2ab9389a309fd1dc9e24729b019c
|
c7abf4ab08ee3c2f3ea1fb09a1938bff7a3e0e5c
|
refs/heads/master
| 2020-04-28T23:41:51.053547 | 2020-01-24T12:22:40 | 2020-01-24T12:22:40 | 175,666,093 | 0 | 1 | null | 2019-03-15T13:44:03 | 2019-03-14T17:08:58 | null |
UTF-8
|
Python
| false | false | 147 |
py
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return "test"
if __name__ =="__main__":
app.run(debug=True)
|
[
"[email protected]"
] | |
f4a850cce56f0f8cf0a4527768d60ba75d2eb5df
|
e06ff08424324ac5d6c567ae9cd6954290ff9bd4
|
/Yudi TANG/axe/KNN/KNN_dating.py
|
ba4684b05e29ddc86468b5905cf90baf69208d11
|
[
"Apache-2.0"
] |
permissive
|
JKChang2015/Machine_Learning
|
b1bdfcf9ea43a98fc7efd5c0624bbaf5d9dbf495
|
f8b46bf23e4d1972de6bd652dd4286e9322ed62f
|
refs/heads/master
| 2021-06-06T19:18:16.596549 | 2020-05-03T22:28:18 | 2020-05-03T22:28:18 | 119,390,891 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,671 |
py
|
# KNN_dating
# Created by JKChang
# 29/01/2020, 10:20
# Tag:
# Description: dating people recommendation
# Feature: 1. Number of frequent flyer miles earned per year
# 2. Percentage of time spent playing video games
# 3. Liters of ice cream consumed per week
# classifies:1. doesn't like
# 2. small like
# 3. large like
import operator
import matplotlib.pyplot as plt
# from mpl_toolkits import mplot3d
import numpy as np
def viewMatrix(matrix, labels, arg1, arg2):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(matrix[:, arg1 - 1], matrix[:, arg2 - 1], 15.0 * np.array(labels), 15.0 * np.array(labels))
plt.show()
def view3DMatrix(matrix, labels):
fig = plt.figure()
ax = plt.axes(projection='3d')
# Data for a three-dimensional line
zline = np.linspace(0, 1, 1000)
xline = np.sin(zline)
yline = np.cos(zline)
ax.plot3D(xline, yline, zline, 'gray')
# Data for three-dimensional scattered points
zdata = matrix[:, 0]
xdata = matrix[:, 1]
ydata = matrix[:, 2]
ax.scatter3D(xdata, ydata, zdata, c=labels)
fig.show()
def kNNClassify(newInput, dataSet, labels, k):
numSamples = dataSet.shape[0] # shape[0] stands for the number of rows
# Step 1: calculate Euclidean distance
diff = np.tile(newInput, (numSamples, 1)) - dataSet
squareDiff = diff ** 2
squareSum = squareDiff.sum(axis=1)
distance = squareSum ** 0.5
# Step 2: Sort distance
# argsort() returns the indices that would sort an array in a ascending order
sortedDistIndicies = distance.argsort()
classCount = {} # key: label , value: laebl count
for i in range(k):
# Step 3: choose the min k distance
voteLabel = labels[sortedDistIndicies[i]]
# Step 4: count the label frequency
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
# Step 5: the max voted class label will return
# Sort the dictionary according to the values
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def file2matrix(filename):
with open(filename, 'r') as f:
resMatrix = np.zeros((1, 3))
labels = []
for line in f.readlines():
content = line.split('\t')
lineVector = np.asfarray([content[:3]])
resMatrix = np.r_[resMatrix, lineVector]
labels.append(int(content[-1]))
DataMatrix = np.delete(resMatrix, (0), axis=0)
return DataMatrix, labels
def autoNorm(dataSet):
# normalization:
# nor_value = (old_Value - minimum_value) / (max - min)
# get list of minimum value for each col
minValue = dataSet.min(0)
# get list of maximum value for each col
maxValue = dataSet.max(0)
normDataSet = np.zeros(np.shape(dataSet))
m = dataSet.shape[0]
# copy the minValue to size(m,1) matrix
normDataSet = dataSet - np.tile(minValue, (m, 1))
normDataSet = normDataSet / np.tile(maxValue - minValue, (m, 1))
return normDataSet, maxValue - minValue, minValue
def datingClassTest(filename):
hoRatio = 0.1
dataMatrix, labels = file2matrix(filename)
norm_matrix, ranges, min = autoNorm(dataMatrix)
# row number
m = norm_matrix.shape[0]
# number of test row
numTestVecs = int(m * hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
res = kNNClassify(norm_matrix[i, :], norm_matrix[numTestVecs:m, :], labels[numTestVecs:m], 3)
print('The classifier came back with: %d, the real answer is %d' % (res, labels[i]))
if (res != labels[i]):
errorCount += 1.0
print('the total error rate is: %f' % (errorCount / float(numTestVecs)))
def classifypersion(testSetName):
resultList = ['not at all', 'in small doses', 'in large doses']
percentTats = float(input('percentage of time spent playing video games? '))
ffMiles = float(input('frequent flier miles earned per year? '))
iceCream = float(input('liters of ice creamconsued per year? '))
datingDataMat, datingLabels = file2matrix(testSetName)
normMat, ranges, minVals = autoNorm(datingDataMat)
inArr = np.array([ffMiles, percentTats, iceCream])
classifierResult = kNNClassify((inArr - minVals / ranges), normMat, datingLabels, 3)
print('You will probably like this persion : %s' % resultList[int(classifierResult) - 1])
filename = '../resource/dating/datingTestSet2.txt'
# matrix, labels = file2matrix(filename)
# norm_matrix, ranges, min = autoNorm(matrix)
# view3DMatrix(norm_matrix, labels)
# datingClassTest(filename)
classifypersion(filename)
|
[
"[email protected]"
] | |
f8d2154649e59afa419b79b4777684cdda82eb5c
|
56b4d00870af18752b4414495b08e2ec3adf3ae4
|
/src/clims/api/endpoints/process_assignments.py
|
c5fd2f83c03d0928f0637275f0b82856ee822b26
|
[
"BSD-2-Clause"
] |
permissive
|
commonlims/commonlims
|
26c3f937eaa18e6935c5d3fcec823053ab7fefd9
|
36a02ed244c7b59ee1f2523e64e4749e404ab0f7
|
refs/heads/develop
| 2021-07-01T17:20:46.586630 | 2021-02-02T08:53:22 | 2021-02-02T08:53:22 | 185,200,241 | 4 | 1 |
NOASSERTION
| 2021-02-02T08:53:23 | 2019-05-06T13:16:37 |
Python
|
UTF-8
|
Python
| false | false | 1,028 |
py
|
from __future__ import absolute_import
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from sentry.api.base import Endpoint, SessionAuthentication
class ProcessAssignmentsEndpoint(Endpoint):
authentication_classes = (SessionAuthentication, )
permission_classes = (IsAuthenticated, )
def post(self, request, organization_slug):
"""
Assign one or more item to a workflow. The items are assigned by global_id.
"""
# TODO-auth: Ensure that the user is only assigning samples that are under the organization
# Entities is a list of global ids (e.g. Substance-100)
entities = request.data["entities"]
definition = request.data["definitionId"]
variables = request.data["variables"]
assignments = list()
assignments += self.app.workflows.batch_assign(
entities, definition, request.user, variables)
return Response({"assignments": len(assignments)}, status=201)
|
[
"[email protected]"
] | |
1186de1cba914cdcc904a0e0a09520080aa16289
|
46492cc7429c83fe362b0ed566fc54982e52c46e
|
/pitches/main/forms.py
|
bb9c5b6a6c3f20f413c47970a696323c03307838
|
[
"MIT"
] |
permissive
|
jakhax/pitches
|
15c8d87825c879b56cd931d26d398e736636134f
|
e56358d00089bd46addd54192220bcca0478e0da
|
refs/heads/master
| 2020-03-18T00:36:09.254870 | 2018-05-20T14:48:14 | 2018-05-20T14:48:14 | 134,102,974 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,399 |
py
|
from flask import current_app, session
from flask_babel import lazy_gettext
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, BooleanField, SelectField, SubmitField, IntegerField
from wtforms import ValidationError
from wtforms.validators import DataRequired, InputRequired, Length, Email, Regexp
from ..models import Role, User, TopicGroup
class FormHelpersMixIn(object):
@property
def submit_fields(self):
return [getattr(self, field) for field, field_type in self._fields.items()
if isinstance(field_type, SubmitField)]
@staticmethod
def is_has_data(*fields):
return any([field.data for field in fields])
def get_flashed_errors(self):
errors = session.pop('_form_errors') if '_form_errors' in session else {}
self.errors.update(errors)
for field, errors in errors.items():
if hasattr(self, field):
form_field = getattr(self, field)
if form_field.errors:
form_field.errors.extend(errors)
else:
form_field.errors = errors
class EditProfileForm(FlaskForm):
name = StringField(lazy_gettext('Real name'), validators=[Length(0, 64)])
homeland = StringField(lazy_gettext('Homeland'), validators=[Length(0, 64)])
about = TextAreaField(lazy_gettext('About me'))
avatar = StringField(lazy_gettext('Link to avatar'), validators=[Length(0, 256)])
submit = SubmitField(lazy_gettext('Save'))
class EditProfileAdminForm(FlaskForm):
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Length(1, 64), Email()])
username = StringField(lazy_gettext('Username'), validators=[
DataRequired(), Length(1, 32), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, lazy_gettext(
'Usernames must have only letters, numbers, dots or underscores'))])
confirmed = BooleanField(lazy_gettext('Confirmed'))
role = SelectField(lazy_gettext('Role'), coerce=int)
name = StringField(lazy_gettext('Real name'), validators=[Length(0, 64)])
homeland = StringField(lazy_gettext('Homeland'), validators=[Length(0, 64)])
about = TextAreaField(lazy_gettext('About me'))
avatar = StringField(lazy_gettext('Link to avatar'), validators=[Length(0, 256)])
submit = SubmitField(lazy_gettext('Save'))
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name) for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if (field.data.lower() != self.user.email
and User.query.filter_by(email=field.data.lower()).first()):
raise ValidationError(lazy_gettext('Email already registered.'))
def validate_username(self, field):
if (field.data.lower() != self.user.username_normalized
and User.query.filter_by(username_normalized=field.data.lower()).first()):
raise ValidationError(lazy_gettext('Username already in use.'))
class TopicForm(FlaskForm):
title = StringField(lazy_gettext('Title'), validators=[DataRequired(), Length(0, 128)])
group_id = IntegerField(lazy_gettext('Topic group ID'), validators=[InputRequired()])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 20})
poll_question="Rank"
poll_answers="Upvote\n Downvote"
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
def remove_edit_fields(self):
del self.group_id
del self.delete
def validate_group_id(self, field):
if not TopicGroup.query.filter_by(id=field.data).first():
raise ValidationError(lazy_gettext('Topic group with such ID is not exist.'))
class TopicWithPollForm(FlaskForm):
title = StringField(lazy_gettext('Title'), validators=[DataRequired(), Length(0, 128)])
group_id = IntegerField(lazy_gettext('Topic group ID'), validators=[InputRequired()])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 20})
poll_question = StringField(lazy_gettext('Poll question'), validators=[DataRequired(), Length(0, 256)])
poll_answers = TextAreaField(lazy_gettext('Poll answers'), validators=[DataRequired()], render_kw={'rows': 10})
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
def remove_edit_fields(self):
del self.group_id
del self.delete
def validate_group_id(self, field):
if not TopicGroup.query.filter_by(id=field.data).first():
raise ValidationError(lazy_gettext('Topic group with such ID is not exist.'))
class TopicGroupForm(FlaskForm):
title = StringField(lazy_gettext('Title'), validators=[DataRequired(), Length(0, 64)])
group_id = IntegerField(lazy_gettext('Parent topic group ID'), validators=[InputRequired()])
priority = SelectField(lazy_gettext('Priority'), coerce=int)
protected = BooleanField(lazy_gettext('Moderators only'))
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
def __init__(self, *args, **kwargs):
super(TopicGroupForm, self).__init__(*args, **kwargs)
self.priority.choices = [(p, p) for p in current_app.config['TOPIC_GROUP_PRIORITY']]
def remove_edit_fields(self):
del self.group_id
del self.delete
def validate_group_id(self, field):
if not TopicGroup.query.filter_by(id=field.data).first():
raise ValidationError(lazy_gettext('Topic group with such ID is not exist.'))
class CommentForm(FlaskForm):
body = TextAreaField(lazy_gettext('Leave your comment, {username}:'), validators=[DataRequired()],
render_kw={'rows': 4})
submit = SubmitField(lazy_gettext('Submit'))
def __init__(self, user, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs)
self.body.label.text = self.body.label.text.format(username=user.username)
class CommentEditForm(FlaskForm):
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 8})
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
class MessageReplyForm(FlaskForm):
title = StringField(lazy_gettext('Subject'), validators=[DataRequired(), Length(0, 128)])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 4})
send = SubmitField(lazy_gettext('Send'))
close = SubmitField(lazy_gettext('Close'))
delete = SubmitField(lazy_gettext('Delete'))
class MessageSendForm(FlaskForm):
title = StringField(lazy_gettext('Subject'), validators=[DataRequired(), Length(0, 128)])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 4})
send = SubmitField(lazy_gettext('Send'))
cancel = SubmitField(lazy_gettext('Cancel'))
class SearchForm(FlaskForm):
text = StringField('', validators=[DataRequired(), Length(1, 64)])
search = SubmitField(lazy_gettext('Search'))
|
[
"[email protected]"
] | |
eef750f84f81a27c35f5f451faf9e9a1b93c1cc4
|
4c117ea3617a576ddd07d8ea8aaab1a925fc402f
|
/bin/individualization/VennPlot.py
|
18f444e66a82a4f9f64427b83e18f591f031b0f6
|
[] |
no_license
|
452990729/Rep-seq
|
7be6058ba3284bea81282f2db7fd3bd7895173ba
|
e217b115791e0aba064b2426e4502a5c1b032a94
|
refs/heads/master
| 2021-12-11T14:27:46.912144 | 2019-06-04T03:49:40 | 2019-06-04T03:49:40 | 190,124,555 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,549 |
py
|
#!/usr/bin/env python
import os
import sys
import re
import matplotlib
matplotlib.use('Agg')
import venn
from matplotlib import pyplot as plt
def HandleFq(file_in):
base = '_'.join(re.split('_', os.path.basename(file_in))[:2])
list_tmp = []
m = 0
with open(file_in, 'r') as in1:
for line in in1:
m += 1
if m%4 == 2:
list_tmp.append(line.strip())
return set(list_tmp), base
def ReadTab(file_in):
list_tmp = []
label = '_'.join(re.split('_', os.path.basename(file_in))[:2])
with open(file_in, 'r') as in1:
for line in in1.readlines()[1:]:
list_tmp.append(re.split('\t', line.strip())[36])
return set(list_tmp), label
def main():
len_arg = len(sys.argv)
if sys.argv[1] == 'fastq':
func = HandleFq
elif sys.argv[1] == 'tab':
func = ReadTab
list_l = []
list_lb = []
for i in range(len_arg-2):
l, lb = func(sys.argv[i+2])
list_l.append(l)
list_lb.append(lb)
labels = venn.get_labels(list_l, fill=['number',])
if len_arg == 4:
fig, ax = venn.venn2(labels, names=list_lb)
elif len_arg == 5:
fig, ax = venn.venn3(labels, names=list_lb)
elif len_arg == 6:
fig, ax = venn.venn4(labels, names=list_lb)
elif len_arg == 7:
fig, ax = venn.venn5(labels, names=list_lb)
elif len_arg == 8:
fig, ax = venn.venn6(labels, names=list_lb)
plt.savefig('{}wayvenn.png'.format(str(len_arg-2)))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c423950c678b966d72c428c4dadd7d1045308bbb
|
c536c764aab4170c64f3f8b78bd91593dcb161a3
|
/vigenereb62.py
|
037292215097560084e9451db9c5655b7c2fb996
|
[] |
no_license
|
numberly/vigenere-b62
|
63bbc95c1f9390e9623a5882a9c2a14d110851b4
|
3dea3394ee557ba2e589af014cbc4454ebbbc874
|
refs/heads/master
| 2023-02-16T02:13:31.254670 | 2021-01-11T15:24:58 | 2021-01-11T15:24:58 | 328,698,862 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 535 |
py
|
def iter_reverse_digits(number, base):
while number != 0:
digit = number % base
yield digit
number -= digit
number //= base
def encode(alphabets, seed, size=6):
if len(alphabets) < size:
raise ValueError("There should be an alphabet per character you want")
secret = "".join(
alphabets[i][digit]
for i, digit in enumerate(iter_reverse_digits(seed, len(alphabets[0])))
)
secret += "".join(alphabets[i][0] for i in range(len(secret), size))
return secret
|
[
"[email protected]"
] | |
1bff440e67a7189665b42fe0833a0c9b007950e7
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_defenders.py
|
bb7548df4efbbe4fec4aeb39f3eec118e52a2ba7
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
from xai.brain.wordbase.nouns._defender import _DEFENDER
#calss header
class _DEFENDERS(_DEFENDER, ):
def __init__(self,):
_DEFENDER.__init__(self)
self.name = "DEFENDERS"
self.specie = 'nouns'
self.basic = "defender"
self.jsondata = {}
|
[
"[email protected]"
] | |
8339c4b6670fe18b61771e18903739838373f58c
|
01ce2eec1fbad3fb2d98085ebfa9f27c7efb4152
|
/itertools/itertools-combinations.py
|
b32166fe2a76aece52bb636b0b8705a63f17c3ce
|
[
"MIT"
] |
permissive
|
anishLearnsToCode/hackerrank-python
|
4cfeaf85e33f05342df887896fa60dae5cc600a5
|
7d707c07af051e7b00471ebe547effd7e1d6d9d9
|
refs/heads/master
| 2023-01-01T23:39:01.143328 | 2020-11-01T07:31:15 | 2020-11-01T07:31:15 | 265,767,347 | 8 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
# https://www.hackerrank.com/challenges/itertools-combinations/problem
import itertools
line = input().split()
word = sorted(line[0])
k = int(line[1])
for i in range(1, k + 1):
for j in itertools.combinations(word, i):
print(''.join(j))
|
[
"[email protected]"
] | |
5722c5bd79ba59802f5e4174de590823f9b31f54
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5631989306621952_1/Python/Hotshot8325/Q2.py
|
c61b1a46284a8ff8a0e7daff7477923bbd7b7f0f
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 968 |
py
|
#CodeJam pancake problem
import csv
import string
#import data from test file in the form [[[],[]],[[],[]].... with [[],[]] being one test case
with open('a-large.in') as csvfile:
testCase = csv.reader(csvfile, delimiter = ' ', quotechar='|')
rowNum = 0
inputText = []
#swapCount = []
for row in testCase:
#row = [str(i) for i in row]
if rowNum == 0:
numTestCases = int(row[0])
else:
inputText.append(row)
rowNum = rowNum + 1
for i in range(0,numTestCases):
letterInput = inputText[i][0]
lastWord = letterInput[0]
for j in range(1,len(letterInput)):
if string.uppercase.index(letterInput[j])>=string.uppercase.index(lastWord[0]):
lastWord = letterInput[j]+lastWord
else:
lastWord = lastWord +letterInput[j]
print "Case #"+str(i+1)+": "+lastWord
|
[
"[email protected]"
] | |
566302b568f0103bd3c6c2d54e6988ac6dd06f4b
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/JD9vSKZGrxQhLbA9r_11.py
|
8153c6d8cc99992256ea1d82f8771cd6328f44f3
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 255 |
py
|
def pile_of_cubes(m):
if m >= 10252519345963644753026: return None
x = m**0.5
if (x%1==0):
c = 1
while (x != c and x > 0):
x = x - c
c = c + 1
if (x == c):
return c
return None
|
[
"[email protected]"
] | |
05f1c23936d977e70fdef1e44fc27ab9f069cadf
|
55647a80c8b412af9df0ba3f50595cc2f29c25e6
|
/res/scripts/common/Lib/encodings/gbk.py
|
4b4a46dcbfdea9c2f98724c76a52405e54febf9c
|
[] |
no_license
|
cnsuhao/WOT-0.9.17-CT
|
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
|
d1f932d8cabaf8aa21708622e87f83c8d24d6451
|
refs/heads/master
| 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 1,136 |
py
|
# 2016.11.19 19:58:56 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/encodings/gbk.py
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gbk')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='gbk', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\Lib\encodings\gbk.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:58:56 Střední Evropa (běžný čas)
|
[
"[email protected]"
] | |
cd75f26df497e0e47746786f0197f8dc9b218f06
|
930c207e245c320b108e9699bbbb036260a36d6a
|
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/FCU_Return_Air_Temperature_Sensor.py
|
d4ac39c9698a57051d03037b2f79dc41b5511c4b
|
[] |
no_license
|
InnovationSE/BRICK-Generated-By-OLGA
|
24d278f543471e1ce622f5f45d9e305790181fff
|
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
|
refs/heads/master
| 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Return_Air_Temperature_Sensor import Return_Air_Temperature_Sensor
class FCU_Return_Air_Temperature_Sensor(Return_Air_Temperature_Sensor):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').FCU_Return_Air_Temperature_Sensor
|
[
"[email protected]"
] | |
1158acb79cf822c0ded1ea29f10b77727305c073
|
cd142a4e15d3576546fcb44841417039f0b8fb00
|
/build/double/catkin_generated/pkg.installspace.context.pc.py
|
9b014836f2e3e476722b6c40aa901294660dad37
|
[] |
no_license
|
mgou123/rplidar
|
4389819eb1998d404d1066c7b4a983972d236ce7
|
608c1f6da2d3e5a8bac06e8d55d8569af828a40b
|
refs/heads/master
| 2022-11-10T05:51:56.403293 | 2020-06-29T04:16:14 | 2020-06-29T04:16:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "double"
PROJECT_SPACE_DIR = "/home/xu/dogkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"[email protected]"
] | |
237743cb29e83580cbade37977253888764a05b4
|
f4f54015298eedfbbdfcaaf5e2a9603112f803a5
|
/sachin/gocept.filestore-0.3/gocept.filestore-0.3/src/gocept/filestore/tests.py
|
39487c46c2cf44f18a2df60610d46b4e1e9848c4
|
[] |
no_license
|
raviramawat8/Old_Python_Codes
|
f61e19bff46856fda230a096aa789c7e54bd97ca
|
f940aed0611b0636e1a1b6826fa009ceb2473c2b
|
refs/heads/master
| 2020-03-22T22:54:50.964816 | 2018-06-16T01:39:43 | 2018-06-16T01:39:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 335 |
py
|
# Copyright (c) 2007 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id: tests.py 5111 2007-08-30 11:27:23Z zagy $
import unittest
from zope.testing import doctest
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocFileSuite(
'README.txt',
optionflags=doctest.ELLIPSIS))
return suite
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.