blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fd186ecb8de7d13fb1a560a5b7063dd55cf34c3
|
b0856a2d66cc4c71705b8c16c169848070294cf6
|
/graphValidTree.py
|
c267910cc1708fcf239eee741ff3637ac2e1b0d5
|
[] |
no_license
|
jfriend08/LeetCode
|
9e378ff015edc3102a4785b0832cf0eeb09f5fc2
|
f76d3cf2e7fd91767f80bd60eed080a7bad06e62
|
refs/heads/master
| 2021-01-21T19:28:25.354537 | 2016-01-15T04:53:11 | 2016-01-15T04:53:11 | 28,518,328 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,012 |
py
|
'''
Given n nodes labeled from 0 to n - 1 and a list of undirected edges (each edge is a
pair of nodes), write a function to check whether these edges make up a valid tree.
For example:
Given n = 5 and edges = [[0, 1], [0, 2], [0, 3], [1, 4]], return true.
Given n = 5 and edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]], return false.
Hint:
Given n = 5 and edges = [[0, 1], [1, 2], [3, 4]], what should your return? Is this case a valid tree?
According to the definition of tree on Wikipedia: "a tree is an undirected graph in which any two
vertices are connected by exactly one path. In other words, any connected graph without simple cycles
is a tree."
Note: you can assume that no duplicate edges will appear in edges. Since all edges are undirected,
[0, 1] is the same as [1, 0] and thus will not appear together in edges.
'''
class Solution(object):
def makeMap(self, n, edges, linkMap):
for i in xrange(n):
linkMap[i] = []
for n1, n2 in edges:
linkMap[n1] += [n2]
linkMap[n2] += [n1]
def isValidTravel(self, parent, node, linkMap, visited):
visited[node] = True
for nei in linkMap[node]:
if nei == parent:
continue
elif not nei in visited:
res = self.isValidTravel(node, nei, linkMap, visited)
if not res:
return res
else:
return False
return True
def validTree(self, n, edges):
linkMap, visited = {}, {}
self.makeMap(n, edges, linkMap)
res = self.isValidTravel(None, 0, linkMap, visited)
return len(visited.keys()) == n and res
# for node in xrange(n):
# if not node in visited:
# res = self.isValidTravel(None, node, linkMap, visited)
# if res == False:
# return res
# return True
sol = Solution()
n, edges = 5, [[0, 1], [0, 2], [0, 3], [1, 4]]
print sol.validTree(n, edges)
n, edges = 5, [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]]
print sol.validTree(n, edges)
n, edges = 5, [[0, 1], [1, 2], [3, 4]]
print sol.validTree(n, edges)
|
[
"[email protected]"
] | |
88231ca16773294f42e2cf6d51ba1b8dc86895a1
|
25b81256057c9a2de014ab511e04703dc617f050
|
/etl/census/census_features.py
|
19b6e85b6775b9ed6933def9b874cd9390f5bb66
|
[
"MIT"
] |
permissive
|
conorhenley/cincinnati
|
7b9b2fc6d13e49ad5e95a557cd79b28bd17f0565
|
5ca86a8a31099365188969493e0dd369b4faefc0
|
refs/heads/master
| 2021-01-13T06:50:18.403686 | 2016-05-26T20:21:12 | 2016-05-26T20:21:12 | 64,249,902 | 1 | 0 | null | 2016-07-26T19:51:03 | 2016-07-26T19:51:03 | null |
UTF-8
|
Python
| false | false | 7,805 |
py
|
# coding: utf-8
# In[1]:
from sqlalchemy import create_engine
import pandas as pd
from lib_cinci.config import main as config
user = config['db']['user']
password = config['db']['password']
host = config['db']['host']
database = config['db']['database']
engine = create_engine('postgresql://{user}:{password}@{host}:5432/{database}'.format(user=user, password=password, host=host, database=database))
# In[3]:
pop_housing_sql = """SELECT census.*, groups.area FROM shape_files.census_pop_housing as census
JOIN shape_files.census_blocks_groups as groups
on census.tract = groups.tract
and census.block_group = groups.blkgrp;"""
# In[4]:
pop_housing_raw = pd.read_sql_query(pop_housing_sql, con=engine)
# # Raw census data
# In[5]:
pop_housing_raw.head()
# # Calculating census features
# list of feature description and calculation can be found in folder docs/data_dictionaries
#
# features are claculated for each pair of census tract and block
# In[5]:
features = pd.DataFrame({ 'tract' : pop_housing_raw.tract,
'block_group' : pop_housing_raw.block_group,
'housing_density': pop_housing_raw.H0030001/pop_housing_raw.area,
'rate_occupied_units': pop_housing_raw.H0030002/pop_housing_raw.H0030001,
'rate_vacant_units': pop_housing_raw.H0030003/pop_housing_raw.H0030001,
'rate_mortgage_or_loan' : pop_housing_raw.H0040002/pop_housing_raw.H0030001,
'rate_renter_occupied' : pop_housing_raw.H0040004/pop_housing_raw.H0030001,
'rate_for_rent' : pop_housing_raw.H0050002/pop_housing_raw.H0030001,
'rate_white_householder' : pop_housing_raw.H0060002/pop_housing_raw.P0180001,
'rate_black_householder' : pop_housing_raw.H0060003/pop_housing_raw.P0180001,
'rate_native_householder' : (pop_housing_raw.H0060004+pop_housing_raw.H0060006)/pop_housing_raw.P0180001,
'rate_asian_householder' : pop_housing_raw.H0060005/pop_housing_raw.P0180001,
'rate_other_race_householder' : pop_housing_raw.H0060007/pop_housing_raw.P0180001,
'rate_pop_occupied_units' : pop_housing_raw.H0100001/pop_housing_raw.P0010001,
'rate_1_per_household' : pop_housing_raw.H0130002/pop_housing_raw.P0180001,
'rate_2_per_household' : pop_housing_raw.H0130003/pop_housing_raw.P0180001,
'rate_3_per_household' : pop_housing_raw.H0130004/pop_housing_raw.P0180001,
'rate_4_per_household' : pop_housing_raw.H0130005/pop_housing_raw.P0180001,
'rate_5_per_household' : pop_housing_raw.H0130006/pop_housing_raw.P0180001,
'rate_6_per_household' : pop_housing_raw.H0130007/pop_housing_raw.P0180001,
'rate_7_plus_per_household' : pop_housing_raw.H0130008/pop_housing_raw.P0180001,
'rate_owner_occupied' : pop_housing_raw.H0140002/pop_housing_raw.H0030001,
'rate_owner_occupied_white' : pop_housing_raw.H0140003/pop_housing_raw.H0140002,
'rate_owner_occupied_black' : pop_housing_raw.H0140004/pop_housing_raw.H0140002,
'rate_owner_occupied_native' : (pop_housing_raw.H0140005+pop_housing_raw.H0140007)/pop_housing_raw.H0140002,
'rate_owner_occupied_asian' : pop_housing_raw.H0140006/pop_housing_raw.H0140002,
'rate_owner_occupied_other_race' : pop_housing_raw.H0140008/pop_housing_raw.H0140002,
'rate_renter_occupied_white' : pop_housing_raw.H0140011/pop_housing_raw.H0040004,
'rate_renter_occupied_black' : pop_housing_raw.H0140012/pop_housing_raw.H0040004,
'rate_renter_occupied_native' : (pop_housing_raw.H0140013+pop_housing_raw.H0140015)/pop_housing_raw.H0040004,
'rate_renter_occupied_asian' : pop_housing_raw.H0140014/pop_housing_raw.H0040004,
'rate_renter_occupied_other' : pop_housing_raw.H0140016/pop_housing_raw.H0040004,
'rate_owner_occupied_hispanic' : pop_housing_raw.H0150004/pop_housing_raw.H0140002,
#'rate_renter_occupied_hispanic' : pop_housing_raw.H0150005/pop_housing_raw.H0040004,
'rate_owner_occupied_w_children' : pop_housing_raw.H0190003/pop_housing_raw.H0140002,
'rate_owner_occupied_no_children' : pop_housing_raw.H0190004/pop_housing_raw.H0140002,
'rate_renter_occupied_no_children' : 1-(pop_housing_raw.H0190006/pop_housing_raw.H0040004),
'rate_renter_occupied_w_children' : pop_housing_raw.H0190006/pop_housing_raw.H0040004,
'population_density' : pop_housing_raw.P0010001/pop_housing_raw.area,
'rate_white_pop' : pop_housing_raw.P0030002/pop_housing_raw.P0010001,
'rate_black_pop' : pop_housing_raw.P0030003/pop_housing_raw.P0010001,
'rate_native_pop' : (pop_housing_raw.P0030006+pop_housing_raw.P0030004)/pop_housing_raw.P0010001,
'rate_asian_pop' : pop_housing_raw.P0030005/pop_housing_raw.P0010001,
'rate_other_race_pop' : pop_housing_raw.P0030007/pop_housing_raw.P0010001,
'rate_pop_over_18' : pop_housing_raw.P0110001/pop_housing_raw.P0010001,
'rate_male_under_18' : (pop_housing_raw.P0120003+pop_housing_raw.P0120004+pop_housing_raw.P0120005+pop_housing_raw.P0120006)/pop_housing_raw.P0010001,
'rate_male_18_35' : pop_housing_raw[['P0120007','P0120008','P0120009','P0120010','P0120011','P0120012']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_35_50' : pop_housing_raw[['P0120013','P0120014','P0120015']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_50_75' : pop_housing_raw[['P0120016', 'P0120017', 'P0120018', 'P0120019', 'P0120020', 'P0120021', 'P0120022']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_over_75' : pop_housing_raw[['P0120023','P0120024','P0120025']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_under_18' : pop_housing_raw[['P0120027','P0120028','P0120029','P0120030']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_18_35' : pop_housing_raw[['P0120031', 'P0120032', 'P0120033', 'P0120034', 'P0120035', 'P0120036']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_35_50' : pop_housing_raw[['P0120037', 'P0120038', 'P0120039']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_50_75' : pop_housing_raw[['P0120040', 'P0120041', 'P0120042', 'P0120043', 'P0120044', 'P0120045', 'P0120046']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_over_75' : pop_housing_raw[['P0120047','P0120048','P0120049']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_households' : pop_housing_raw.P0180001/pop_housing_raw.H0030001})
# In[7]:
features
# In[10]:
features.to_sql('census_features', engine, schema='shape_files', if_exists='replace', index=False)
|
[
"[email protected]"
] | |
b561022b7fd0c683ba9c07ba5381c7a55b8b49cd
|
bc599c9a404940fae21ed6b57edb7bb9dc04e71c
|
/test/base_test/graphics/baseScatter.py
|
2bd2cb944a44d6477901a3cff545da2ae9d41870
|
[] |
no_license
|
jcarlosglx/SparkReport
|
c9b37a1419f113ea13341e6641ceb17056aeb7d0
|
9d6b044f037e8dfe583bcf76c51dd792ac1cc34a
|
refs/heads/master
| 2023-08-11T16:04:28.393856 | 2021-09-21T23:06:08 | 2021-09-21T23:06:08 | 409,001,831 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 785 |
py
|
from test.base_test.base_http.baseGetTest import BaseGetGeneralTest
from test.base_test.base_dimension.baseGraphicTwoDimensionTest import \
BaseGraphicTwoDimensionTest
from typing import List, Type
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
class BaseGetScatterTest(BaseGetGeneralTest, BaseGraphicTwoDimensionTest):
Graphics: List[str] = ["Scatter"]
def test_get_scatter(self, get_app: Flask, get_db: Type[SQLAlchemy]):
self.reload_json()
response = get_app.test_client().get(
f"{self.url_get}{self.endpoint_get}", json=self.JSON
)
self.save_response_file(response)
code_response = str(response.status_code)
assert code_response == self.expect_status_get, self.print_error(code_response)
|
[
"[email protected]"
] | |
c85cb0f32d51c4871c1b38ca50593c5a5e7ecd75
|
b95f80c0c2e7700ed24248bb84f4ef02723e367c
|
/tests/k8s/test_discovery.py
|
3bec8820ae816dfa8b80dda2036bf7231f9dce29
|
[
"MIT"
] |
permissive
|
tinyzimmer/kopf
|
b97faab3f396dc169ebe053c6b41d57d20756738
|
74c42a2acdf2a72446d290fa1f27b53ec5d43218
|
refs/heads/master
| 2022-12-04T17:51:30.648646 | 2020-08-30T00:23:18 | 2020-08-30T00:23:18 | 291,496,989 | 0 | 0 |
MIT
| 2020-08-30T15:26:12 | 2020-08-30T15:26:11 | null |
UTF-8
|
Python
| false | false | 4,573 |
py
|
import aiohttp.web
import pytest
from kopf.clients.discovery import discover, is_namespaced, is_status_subresource
from kopf.structs.resources import Resource
async def test_discovery_of_existing_resource(
resp_mocker, aresponses, hostname):
res1info = {'name': 'someresources', 'namespaced': True}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
info = await discover(resource=resource)
assert info == res1info
async def test_discovery_of_unexisting_resource(
resp_mocker, aresponses, hostname):
result = {'resources': []}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
info = await discover(resource=resource)
assert info is None
@pytest.mark.parametrize('status', [403, 404])
async def test_discovery_of_unexisting_group_or_version(
resp_mocker, aresponses, hostname, status):
list_mock = resp_mocker(return_value=aresponses.Response(status=status, reason="boo!"))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
info = await discover(resource=resource)
assert info is None
async def test_discovery_is_cached_per_session(
resp_mocker, aresponses, hostname):
res1info = {'name': 'someresources1', 'namespaced': True}
res2info = {'name': 'someresources2', 'namespaced': True}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
result = {'resources': [res2info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources1')
info = await discover(resource=resource)
assert info == res1info
resource = Resource('some-group.org', 'someversion', 'someresources2')
info = await discover(resource=resource)
assert info is None # cached as absent on the 1st call.
resource = Resource('some-group.org', 'someversion', 'someresources1')
info = await discover(resource=resource)
assert info == res1info
@pytest.mark.parametrize('namespaced', [True, False])
async def test_is_namespaced(
resp_mocker, aresponses, hostname, namespaced):
res1info = {'name': 'someresources', 'namespaced': namespaced}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
result = await is_namespaced(resource=resource)
assert result == namespaced
@pytest.mark.parametrize('namespaced', [True, False])
async def test_is_status_subresource_when_not_a_subresource(
resp_mocker, aresponses, hostname, namespaced):
res1info = {'name': 'someresources', 'namespaced': namespaced}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
result = await is_status_subresource(resource=resource)
assert result is False # an extra type-check
@pytest.mark.parametrize('namespaced', [True, False])
async def test_is_status_subresource_when_is_a_subresource(
resp_mocker, aresponses, hostname, namespaced):
res1info = {'name': 'someresources', 'namespaced': namespaced}
res1status = {'name': 'someresources/status', 'namespaced': namespaced}
result = {'resources': [res1info, res1status]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
result = await is_status_subresource(resource=resource)
assert result is True # an extra type-check
|
[
"[email protected]"
] | |
63eb117df50510a881cad1cd17e8650e4c931d87
|
84a5c4c2e0977d42425771098f5f881c750da7f0
|
/neomodel_constraints/fetcher/constraints/util.py
|
3a08ca6379d4f6cab307f35957905429b97c95ac
|
[] |
no_license
|
SSripilaipong/neomodel-constraints
|
6c3023ba156275e48f5f7ebcbdd283ce8d41f9a1
|
4b91185ba9eec993c58e9ae770fd3d0e90f915ae
|
refs/heads/main
| 2023-07-15T09:58:41.451631 | 2021-08-29T13:19:38 | 2021-08-29T13:19:38 | 390,312,509 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 540 |
py
|
from typing import List
from neomodel_constraints.constraint import ConstraintSet, TypeMapperAbstract
from .data import Neo4jConstraintQueryRecord
def convert_constraints_with_type_mapper(
raw: List[Neo4jConstraintQueryRecord],
type_mapper: TypeMapperAbstract
) -> ConstraintSet:
constraints = set()
for record in raw:
constraint_type = type_mapper.map(record.type_)
constraint = constraint_type.from_raw(record.dict())
constraints.add(constraint)
return ConstraintSet(constraints)
|
[
"[email protected]"
] | |
74173ef5d6c8e8f1b2f1282a3ba50014aaf181af
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/experimental/terraform/lint/tfsec/register.py
|
da368f57c9b4d2296cb0466cd1219a8d9616b2f6
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 |
Apache-2.0
| 2023-09-14T19:33:33 | 2012-12-17T17:39:04 |
Python
|
UTF-8
|
Python
| false | false | 245 |
py
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.terraform.lint.tfsec.rules import rules as tfsec_rules
def rules():
return tfsec_rules()
|
[
"[email protected]"
] | |
e0d0bff373d69b9455fd52b2ddecf9431c15390d
|
e95eb3b5332ba010669f921fe6ac22f85837da2a
|
/examples/analysis/parse_demo.py
|
1470b8acd579aa33a07b0bd3b49fffc8f89cffa2
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
LABSN/expyfun
|
a5998722f09bfb08e3167d6309ce0d5d534b8b18
|
f324eb8c65afa9530698f15ca058700518355a8f
|
refs/heads/main
| 2023-08-05T13:06:15.026909 | 2023-07-25T19:07:03 | 2023-07-25T19:07:03 | 11,614,571 | 13 | 19 |
BSD-3-Clause
| 2023-07-25T19:07:05 | 2013-07-23T17:28:02 |
Python
|
UTF-8
|
Python
| false | false | 733 |
py
|
# -*- coding: utf-8 -*-
"""
============
Parsing demo
============
This example shows some of the functionality of ``read_tab``.
"""
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import ast
from expyfun.io import read_tab
print(__doc__)
data = read_tab('sample.tab') # from simple_experiment
print('Number of trials: %s' % len(data))
keys = list(data[0].keys())
print('Data keys: %s\n' % keys)
for di, d in enumerate(data):
if d['trial_id'][0][0] == 'multi-tone':
print('Trial %s multi-tone' % (di + 1))
targs = ast.literal_eval(d['multi-tone trial'][0][0])
presses = [int(k[0]) for k in d['keypress']]
print(' Targs: %s\n Press: %s' % (targs, presses))
|
[
"[email protected]"
] | |
5bbb358a632d9bba20e2078a0a95695607f33fff
|
1a87d286396a2c6f6b6ac7c53495f80690836c7b
|
/LC/LC_testJustification.py
|
e1b9c5fe604fe74fbcb2713c10b062f9b244c481
|
[] |
no_license
|
kickbean/LeetCode
|
14d33eea9dd70821114ca6d7e1a32111d4d64bf0
|
92e4de152e2aae297ef0e93c9eea61d7ad718f4e
|
refs/heads/master
| 2016-09-10T14:38:33.692759 | 2014-04-08T00:26:51 | 2014-04-08T00:26:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,565 |
py
|
'''
Given an array of words and a length L, format the text such that each line has exactly L characters and is fully (left and right) justified.
You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces ' ' when necessary so that each line has exactly L characters.
Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
For the last line of text, it should be left justified and no extra space is inserted between words.
For example,
words: ["This", "is", "an", "example", "of", "text", "justification."]
L: 16.
Return the formatted lines as:
[
"This is an",
"example of text",
"justification. "
]
Note: Each word is guaranteed not to exceed L in length.
click to show corner cases.
Corner Cases:
A line other than the last line might contain only one word. What should you do in this case?
In this case, that line should be left-justified.
Created on Feb 3, 2014
@author: Songfan
'''
def solution(words, L):
n = len(words)
if n == 0: return words
res = []
currWords = []
availableSpace = L
for wi in range(n):
w = words[wi]
wLen = len(w)
if wLen < availableSpace:
currWords.append(w)
availableSpace -= wLen + 1
else:
res.append(combineWords(currWords, L))
currWords = [w]
availableSpace = L - wLen - 1
if len(currWords):
res.append(w + ' ' * (L - wLen))
return res
def combineWords(words, L):
wordNum = len(words)
wordLen = 0
for w in words:
wordLen += len(w)
spaceNumTotal = L - wordLen
if wordNum == 1:
return words[0] + ' ' * spaceNumTotal
spaceNum = spaceNumTotal // (wordNum - 1)
additionalSpace = spaceNumTotal % (wordNum - 1)
res = ''
for wi in range(wordNum):
if wi == wordNum - 1:
res += words[wi]
elif additionalSpace > 0:
res += words[wi] + ' ' * (spaceNum + 1)
additionalSpace -= 1
else:
res += words[wi] + ' ' * spaceNum
return res
words = ["This", "is", "an", "example", "of", "text", "justification."]
L = 16
print solution(words, L)
words = ["This", "is", "an", "vervverycrazy", "example", "of", "text", "justification."]
L = 16
print solution(words, L)
|
[
"[email protected]"
] | |
015f23d3858690ee7470909983c15dd848b5709a
|
46f91363f5cc43b1644a7da93938aef3c0de29c5
|
/leonardo/module/media/__init__.py
|
233a0f5b0e426c65d5e8688c40baf9bf33e3e777
|
[
"BSD-2-Clause"
] |
permissive
|
shinichi81/django-leonardo
|
55e1f7492813b8a877dac92aadb114785ea2eb83
|
152ad02ba23b8bc94f676a7221c15338181c67b7
|
refs/heads/master
| 2021-01-14T12:45:14.400206 | 2015-11-01T09:38:55 | 2015-11-01T09:38:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,222 |
py
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from .widget import *
default_app_config = 'leonardo.module.media.MediaConfig'
class Default(object):
optgroup = 'Media'
@property
def apps(self):
return [
'leonardo.module',
'leonardo.module.media',
]
@property
def widgets(self):
return [
DownloadListWidget,
DownloadItemWidget,
InternetVideoWidget,
MediaGalleryWidget,
SimpleImageWidget,
VectorGraphicsWidget,
PdfDocumentWidget,
FlashObjectWidget,
]
plugins = [
('leonardo.module.media.apps.category_nested', 'List of directories'),
('leonardo.module.media.apps.category_simple', 'Simple list of directories'),
]
config = {
'MEDIA_PAGINATE_BY': (25, _('Pagination count for media files')),
'MEDIA_PUBLIC_UPLOAD_TO': ('public', _('Prefix for public files from MEDIA_ROOT')),
'MEDIA_PRIVATE_UPLOAD_TO': ('private', _('Prefix for private files from MEDIA_ROOT')),
'MEDIA_IS_PUBLIC_DEFAULT': (True, _('Set uploaded files to public automatically')),
'MEDIA_ENABLE_PERMISSIONS': (True, _(
'Permissions for downloadable items. Experimental feature.')),
'MEDIA_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS': (False, _('ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS')),
'MEDIA_THUMB_SMALL_GEOM': ('64x64', _('MEDIA_THUMB_SMALL_GEOM')),
'MEDIA_THUMB_SMALL_OPT': ('', _('Another options for small thumnails')),
'MEDIA_THUMB_MEDIUM_GEOM': ('256x256', _('MEDIA_THUMB_MEDIUM_GEOM')),
'MEDIA_THUMB_MEDIUM_OPT': ('', _('Another options for medium thumnails')),
'MEDIA_THUMB_LARGE_GEOM': ('768x768', _('MEDIA_THUMB_LARGE_GEOM')),
'MEDIA_THUMB_LARGE_OPT': ('', _('Another options for large thumnails')),
'MEDIA_LOGICAL_STRUCTURE': (False, _('If is True all folders and files will has same path in the OS')),
}
page_actions = ['media/_actions.html']
class MediaConfig(AppConfig, Default):
name = 'leonardo.module.media'
verbose_name = "Media"
default = Default()
|
[
"[email protected]"
] | |
0ea35b60098989cbad8bece1f505638fa7a685d2
|
01ed217a3c3c028e6cf4e3675cb86f4eef992e13
|
/SimG4Core/PrintGeomInfo/test/python/runPrintSolid_cfg.py
|
bb9e7a06455f3f00c6cc1a434b1f718f2240c745
|
[
"Apache-2.0"
] |
permissive
|
dtp2-tpg-am/cmssw
|
ae318d154779c311e2e93cdffe0c7bc24d6d2593
|
7a32f48e079f78b501deee6cc9d19caba269e7fb
|
refs/heads/AM_12_0_2_dev
| 2022-11-04T12:05:05.822865 | 2021-10-28T07:25:28 | 2021-10-28T07:25:28 | 185,209,257 | 2 | 1 |
Apache-2.0
| 2022-04-26T07:18:06 | 2019-05-06T14:07:10 |
C++
|
UTF-8
|
Python
| false | false | 1,897 |
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run3_cff import Run3
process = cms.Process('G4PrintGeometry',Run3)
process.load('Configuration.Geometry.GeometryExtended2021Reco_cff')
#from Configuration.Eras.Era_Run3_dd4hep_cff import Run3_dd4hep
#process = cms.Process('G4PrintGeometry',Run3_dd4hep)
#process.load('Configuration.Geometry.GeometryDD4hepExtended2021Reco_cff')
process.load('SimGeneral.HepPDTESSource.pdt_cfi')
process.load('IOMC.RandomEngine.IOMC_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.generatorSmeared_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('SimG4Core.Application.g4SimHits_cfi')
process.load('SimG4Core.PrintGeomInfo.printGeomSolids_cff')
if hasattr(process,'MessageLogger'):
process.MessageLogger.G4cout=dict()
process.MessageLogger.G4cerr=dict()
process.MessageLogger.PrintGeom=dict()
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/DummyPhysics'
process.g4SimHits.Physics.DummyEMPhysics = True
process.g4SimHits.Physics.DefaultCutValue = 10.
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits*process.printGeomSolids)
|
[
"[email protected]"
] | |
1897d9ce65665335394d0b57ff2ccf5a2082d7f6
|
5f2608d4a06e96c3a032ddb66a6d7e160080b5b0
|
/week6/homework_w6_q_c1.py
|
406a821246f24f931111b8aadf5a01215a8e8aea
|
[] |
no_license
|
sheikhusmanshakeel/statistical-mechanics-ens
|
f3e150030073f3ca106a072b4774502b02b8f1d0
|
ba483dc9ba291cbd6cd757edf5fc2ae362ff3df7
|
refs/heads/master
| 2020-04-08T21:40:33.580142 | 2014-04-28T21:10:19 | 2014-04-28T21:10:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,542 |
py
|
import math, random, pylab
def rho_free(x, y, beta):
return math.exp(-(x - y) ** 2 / (2.0 * beta))
def levy_free_path(xstart, xend, dtau, N):
x = [xstart]
for k in range(1, N):
dtau_prime = (N - k) * dtau
x_mean = (dtau_prime * x[k - 1] + dtau * xend) / (dtau + dtau_prime)
sigma = math.sqrt(1.0 / (1.0 / dtau + 1.0 / dtau_prime))
x.append(random.gauss(x_mean, sigma))
return x
beta = 20.0
N = 80
dtau = beta / N
n_steps = 100000
x = [0.0] * N
data = []
Weight_trott = lambda y: math.exp(sum(-a **2/ 2.0 * dtau for a in y))
for step in range(n_steps):
Ncut = random.randint(0, N-1)
# x_new = levy_free_path(x[0], x[0], dtau, N)
x_new = levy_free_path(x[0], x[Ncut], dtau, Ncut) + x[Ncut:]
if random.uniform(0, 1) < min(1, Weight_trott(x_new) / Weight_trott(x)):
x = x_new[:]
k = random.randint(0, N - 1)
data.append(x[k])
print len(data)
pylab.hist(data, bins=50, normed=True, label='QMC')
x_values = [0.1 * a for a in range (-30, 30)]
y_values = [math.sqrt(math.tanh(beta / 2.0)) / math.sqrt(math.pi) * \
math.exp( - xx **2 * math.tanh( beta / 2.0)) for xx in x_values]
pylab.plot(x_values, y_values, label='exact')
pylab.xlabel('$x$')
pylab.ylabel('$\\pi(x)$ (normalized)')
pylab.axis([-3.0, 3.0, 0.0, 0.8])
pylab.legend()
ProgType = 'Levy_free_path'
pylab.title(ProgType + ' beta = ' + str(beta) + ', dtau = ' + str(dtau) +
', Nsteps = '+ str(n_steps))
pylab.savefig(ProgType + str(beta) + '.png')
pylab.show()
|
[
"[email protected]"
] | |
06ffea8d37e7baecbc877318ae07f0960176aa71
|
1255cedc3b8c486f07fb12b90b75b8773b4714be
|
/xnote/app/migrations/0002_auto_20210704_1851.py
|
ab7cafc76b864f0fe4f3aa7f3cbd0fcd44849f6c
|
[
"Apache-2.0"
] |
permissive
|
sebastianczech/Xnote
|
81c4cd00b2759037b2e538172ca70abdfba2740c
|
6b6785f5d1db37322b74818aa355eddad3a7a8a9
|
refs/heads/main
| 2023-07-19T14:22:43.026363 | 2021-09-18T14:15:54 | 2021-09-18T14:15:54 | 376,524,045 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,100 |
py
|
# Generated by Django 3.2.4 on 2021-07-04 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='walletaccount',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletcar',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletcredit',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletdeposit',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletexpense',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='wallethouse',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletincome',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
]
|
[
"[email protected]"
] | |
4d28d031c27a0637460b632a9b19cba410228c5b
|
ebe29aa1cc69cd4de540f1310086bac47f3bbc38
|
/fakturo/billingstack/auth.py
|
637df96d8fe8f8a7f0b3a14ac9b442e3569ba857
|
[
"Apache-2.0"
] |
permissive
|
billingstack/python-fakturo-billingstack
|
b352262adc5c7046c46ff464290abafd709e8049
|
fb641b43ee0ab2a92aea64cc010c989bfbfe5436
|
refs/heads/master
| 2021-01-10T21:39:35.998727 | 2013-04-05T22:01:15 | 2013-04-05T22:01:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,879 |
py
|
import logging
import simplejson as json
from requests.auth import AuthBase
from fakturo.core import client
LOG = logging.getLogger(__name__)
class AuthHelper(AuthBase, client.BaseClient):
def __init__(self, url, username=None, password=None,
account_name=None):
super(AuthHelper, self).__init__(url)
self.auth_info = {}
if not account_name:
raise ValueError('No account given.')
cred_info = {
'username': username,
'password': password,
'merchant': account_name
}
self.cred_info = cred_info
if self.cred_valid:
self.refresh_auth()
@property
def cred_valid(self):
c = self.cred_info
return True if c.get('username') and c.get('password') else False
def get_token_key(self, key):
"""
Return something from the token info, None if no key or no info is
there.
:param key: What to get
"""
token_info = self.auth_info.get('token')
return token_info.get('id') if token_info else token_info
@property
def token(self):
return self.get_token_key('id')
@property
def endpoint(self):
return self.auth_info.get('endpoint')
@property
def account(self):
return self.auth_info.get('merchant')
def __call__(self, request):
if not self.token and self.cred_valid:
self.refresh_auth()
request.headers['X-Auth-Token'] = self.token
return request
def refresh_auth(self):
auth_data = dict([(k, v) for k, v in self.cred_info.items() if v])
LOG.debug('Authenticating on URL %s CREDENTIALS %s' %
(self.url, auth_data))
response = self.post('/authenticate', data=json.dumps(auth_data))
self.auth_info.update(response.json)
|
[
"[email protected]"
] | |
b0789b65346da9d46568ef7fc745efe52ce14c2c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_rhetoricians.py
|
b82e3e2f934329cba730d00cb0c53fa56ef00f97
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 273 |
py
|
from xai.brain.wordbase.nouns._rhetorician import _RHETORICIAN
#calss header
class _RHETORICIANS(_RHETORICIAN, ):
def __init__(self,):
_RHETORICIAN.__init__(self)
self.name = "RHETORICIANS"
self.specie = 'nouns'
self.basic = "rhetorician"
self.jsondata = {}
|
[
"[email protected]"
] | |
352121d56b8a5bb9fa3eec78314000a59d9186b6
|
b50508302647ad849029210bff200930b1902987
|
/apps/articles/migrations/0001_initial.py
|
dcee0bc7423816df2b8733388e92bfed9f9a7652
|
[] |
no_license
|
tianjiajun123/myBlog
|
a46718ed3fde114bfa282428d0c8b7f36b5adce9
|
2cd67bc0e85974cda477c366db9f7051b8b11132
|
refs/heads/master
| 2023-02-15T11:12:37.266980 | 2021-01-06T10:58:50 | 2021-01-06T10:58:50 | 326,363,135 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,498 |
py
|
# Generated by Django 3.1.4 on 2021-01-03 20:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Articles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='文章标题')),
('img', models.ImageField(upload_to='', verbose_name='文章配图')),
('abstract', models.TextField(verbose_name='文章摘要')),
('content', models.TextField(verbose_name='文章内容')),
('visited', models.IntegerField(verbose_name='文章访问量')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='文章作者')),
],
options={
'verbose_name': '文章',
'verbose_name_plural': '文章',
'ordering': ('-created_at',),
},
),
]
|
[
"[email protected]"
] | |
d0999586ccbd5cec385e34f8a7edbf19decb2542
|
4443d08048f9980045e5f0541c69db0d756391d1
|
/partner_ngos/programs_management/doctype/project_indicator/test_project_indicator.py
|
886c2b9e33e38f60e194f3c716e3dc39fa36f037
|
[
"MIT"
] |
permissive
|
mohsinalimat/partner_ngos
|
dea0db6e0f9718e7ffc69f7171bdb1603a055d72
|
4a345fb6989ff5a21db7fca07aa4e5174dca8f59
|
refs/heads/master
| 2023-03-15T13:15:40.571368 | 2020-07-09T07:22:59 | 2020-07-09T07:22:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 222 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestProjectIndicator(unittest.TestCase):
pass
|
[
"[email protected]"
] | |
c5ef9f3c896720bfe3cbcd8bf8087394c0635cc3
|
343bdaddfc66c6316e2cee490e9cedf150e3a5b7
|
/0101_0200/0140/0140.py
|
fcfbf5535dac6588d0fb41901a5501b3284bd7d6
|
[] |
no_license
|
dm-alexi/acmp
|
af7f6b4484b78f5922f3b464406a0ba5dea0d738
|
3fa0016d132adfeab7937b3e8c9687a34642c93a
|
refs/heads/master
| 2021-07-09T15:14:25.857086 | 2020-10-20T19:08:54 | 2020-10-20T19:08:54 | 201,908,038 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 484 |
py
|
from math import inf
with open("input.txt", "r") as f, open("output.txt", "w") as q:
n = int(f.readline())
m = [[int(x) if x != "100000" else inf for x in f.readline().split()] for i in range(n)]
for k in range(n):
for i in range(n):
for j in range(n):
if m[i][k] < inf and m[k][j] < inf and m[i][k] + m[k][j] < m[i][j]:
m[i][j] = m[i][k] + m[k][j]
q.write("YES" if any(m[i][i] < 0 for i in range(n)) else "NO")
|
[
"[email protected]"
] | |
e3777872b94428267992a01b44c30ba2643b99bc
|
c91b68be796a9835c528856b6f5fa7b56d2af451
|
/examples/mnist_convnet.py
|
d9e994d350811b397b81ced710890fceedbf32db
|
[
"Apache-2.0"
] |
permissive
|
syzh1991/tensorpack
|
fe61cb46fd40aa0cb9f8a0a3ea4ea3bb833cb3c5
|
174c3fc9d60b0cbeccac2ae3e73e73d6e788dbe0
|
refs/heads/master
| 2021-01-17T00:24:08.366350 | 2016-04-19T06:25:57 | 2016-04-19T06:25:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,520 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist_convnet.py
# Author: Yuxin Wu <[email protected]>
import tensorflow as tf
import numpy as np
import os, sys
import argparse
from tensorpack.train import *
from tensorpack.models import *
from tensorpack.utils import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.tfutils import *
from tensorpack.callbacks import *
from tensorpack.dataflow import *
"""
MNIST ConvNet example.
about 0.6% validation error after 30 epochs.
"""
BATCH_SIZE = 128
IMAGE_SIZE = 28
class Model(ModelDesc):
def _get_input_vars(self):
return [InputVar(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE), 'input'),
InputVar(tf.int32, (None,), 'label')
]
def _get_cost(self, input_vars, is_training):
is_training = bool(is_training)
keep_prob = tf.constant(0.5 if is_training else 1.0)
image, label = input_vars
image = tf.expand_dims(image, 3) # add a single channel
nl = PReLU.f
image = image * 2 - 1
l = Conv2D('conv0', image, out_channel=32, kernel_shape=3, nl=nl,
padding='VALID')
l = MaxPooling('pool0', l, 2)
l = Conv2D('conv1', l, out_channel=32, kernel_shape=3, nl=nl, padding='SAME')
l = Conv2D('conv2', l, out_channel=32, kernel_shape=3, nl=nl, padding='VALID')
l = MaxPooling('pool1', l, 2)
l = Conv2D('conv3', l, out_channel=32, kernel_shape=3, nl=nl, padding='VALID')
l = FullyConnected('fc0', l, 512)
l = tf.nn.dropout(l, keep_prob)
# fc will have activation summary by default. disable this for the output layer
logits = FullyConnected('fc1', l, out_dim=10, nl=tf.identity)
prob = tf.nn.softmax(logits, name='prob')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, cost)
# compute the number of failed samples, for ClassificationError to use at test time
wrong = prediction_incorrect(logits, label)
nr_wrong = tf.reduce_sum(wrong, name='wrong')
# monitor training error
tf.add_to_collection(
MOVING_SUMMARY_VARS_KEY, tf.reduce_mean(wrong, name='train_error'))
# weight decay on all W of fc layers
wd_cost = tf.mul(1e-5,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, wd_cost)
add_param_summary([('.*/W', ['histogram'])]) # monitor histogram of all W
return tf.add_n([wd_cost, cost], name='cost')
def get_config():
basename = os.path.basename(__file__)
logger.set_logger_dir(
os.path.join('train_log', basename[:basename.rfind('.')]))
# prepare dataset
dataset_train = BatchData(dataset.Mnist('train'), 128)
dataset_test = BatchData(dataset.Mnist('test'), 256, remainder=True)
step_per_epoch = dataset_train.size()
# prepare session
sess_config = get_default_sess_config()
sess_config.gpu_options.per_process_gpu_memory_fraction = 0.5
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=dataset_train.size() * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
tf.scalar_summary('learning_rate', lr)
return TrainConfig(
dataset=dataset_train,
optimizer=tf.train.AdamOptimizer(lr),
callbacks=Callbacks([
StatPrinter(),
ModelSaver(),
InferenceRunner(dataset_test,
[ScalarStats('cost'), ClassificationError() ])
]),
session_config=sess_config,
model=Model(),
step_per_epoch=step_per_epoch,
max_epoch=100,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') # nargs='*' in multi mode
parser.add_argument('--load', help='load model')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
with tf.Graph().as_default():
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
SimpleTrainer(config).train()
|
[
"[email protected]"
] | |
d06d7c4a50a9d2ed62e1339c2c422ef078e2e611
|
7410903c6cd5ef35c592af00c934fb21c369cbf2
|
/00_Code/01_LeetCode/69_Sqrt.py
|
4f2aa947d9e808ddbc9837a59a51ea6e638dbf3b
|
[
"MIT"
] |
permissive
|
KartikKannapur/Algorithms
|
f4e4726170599db0622d18e8c06a382e9bce9e77
|
66e3c8112826aeffb78bd74d02be1a8d1e478de8
|
refs/heads/master
| 2020-12-25T18:32:41.086518 | 2020-10-19T02:59:47 | 2020-10-19T02:59:47 | 93,961,043 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 806 |
py
|
# #Implement int sqrt(int x).
# #Compute and return the square root of x.
# #x is guaranteed to be a non-negative integer.
# #Your runtime beats 81.07 % of python submissions.
class Solution(object):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
"""
Method 1: Built-in functions
"""
# import math
# return int(math.sqrt(int(x)))
"""
Method 2: Binary Search
Your runtime beats 53.94 % of python submissions.
"""
low = 0
high = x
while low <= high:
mid = (low + high) // 2
if mid ** 2 <= x < (mid + 1) ** 2:
return mid
elif mid ** 2 > x:
high = mid
else:
low = mid + 1
|
[
"[email protected]"
] | |
74108a22b91ad3b4c6b46bc638f052f5195fb339
|
e030b26ea0f45eda5a25bf18051e9231e604fdd5
|
/doc/source/sphinxext/numpy_ext/docscrape_sphinx.py
|
bcf7e70731cc798b73e4f22a48c25d361f65c6d1
|
[
"CECILL-B",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
neurospin/piws
|
d246dc1925c563964309e53f36fc44e48f929cf7
|
4ec6f60c6343623a82761c90c74642b4b372ffd1
|
refs/heads/master
| 2021-01-17T03:49:35.787846 | 2018-10-15T09:44:39 | 2018-10-15T09:44:39 | 32,163,903 | 0 | 17 |
NOASSERTION
| 2020-10-14T12:56:38 | 2015-03-13T15:29:57 |
HTML
|
UTF-8
|
Python
| false | false | 8,004 |
py
|
import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString
from docscrape import FunctionDoc
from docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
[
"[email protected]"
] | |
e4c2ae41b7aec6371b17182c26cbfda22f852b60
|
b466a62a6b8151937212688c09b3a5704eaa7466
|
/Python OOP - Exam Preparation - 2 April 2020/tests/test_battlefield.py
|
86b729b594d2a13d2cc6756a5da43117a61aedc9
|
[
"MIT"
] |
permissive
|
DiyanKalaydzhiev23/OOP---Python
|
89efa1a08056375496278dac3af97e10876f7728
|
7ac424d5fb08a6bd28dc36593e45d949b3ac0cd0
|
refs/heads/main
| 2023-07-08T08:23:05.148293 | 2021-08-13T12:09:12 | 2021-08-13T12:09:12 | 383,723,287 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,693 |
py
|
from unittest import TestCase, main
from project.battle_field import BattleField
from project.controller import Controller
class TestBattleField(TestCase):
def setUp(self):
self.c = Controller()
self.c.add_player("Beginner", "pesho")
self.c.add_player("Advanced", "ivan")
self.c.add_card("Magic", "boom")
self.c.add_card("Trap", "oops")
self.c.add_player_card("pesho", "boom")
self.c.add_player_card("ivan", "oops")
self.c.add_player_card("ivan", "boom")
self.attacker = self.c.player_repository.find("pesho")
self.enemy = self.c.player_repository.find("ivan")
self.b = BattleField()
def test_attacker_enemy_dead(self):
self.attacker.health = 0
self.enemy.health = 0
with self.assertRaises(ValueError) as ve:
self.c.fight("pesho", "ivan")
self.assertEqual("Player is dead!", str(ve.exception))
def test_increase_beginner(self):
self.b.increase_beginner(self.attacker)
self.assertEqual(90, self.attacker.health)
def test_getting_bonus_points(self):
self.b.get_bonus_points(self.attacker)
self.b.get_bonus_points(self.enemy)
self.assertEqual(130, self.attacker.health)
self.assertEqual(335, self.enemy.health)
def test_attacker_is_dead_after_fight(self):
self.c.fight("pesho", "ivan")
self.c.fight("pesho", "ivan")
self.assertTrue(self.attacker.is_dead)
def test_enemy_is_dead_after_fight(self):
self.c.fight("ivan", "pesho")
self.c.fight("ivan", "pesho")
self.assertTrue(self.attacker.is_dead)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
ac4c91a50fd1f04ce141715e5289aa64f8765f8f
|
0bb474290e13814c2498c086780da5096453da05
|
/agc034/B/main.py
|
dcdc2a07ea70836db87eccb7f03314c35c2aad03
|
[] |
no_license
|
ddtkra/atcoder
|
49b6205bf1bf6a50106b4ae94d2206a324f278e0
|
eb57c144b5c2dbdd4abc432ecd8b1b3386244e30
|
refs/heads/master
| 2022-01-25T15:38:10.415959 | 2020-03-18T09:22:08 | 2020-03-18T09:22:08 | 208,825,724 | 1 | 0 | null | 2022-01-21T20:10:20 | 2019-09-16T14:51:01 |
Python
|
UTF-8
|
Python
| false | false | 721 |
py
|
#!/usr/bin/env python3
import sys
def solve(s: str):
s = s.replace('BC', 'X')
ans = 0
cur = 0
for i in range(len(s)):
if(s[i] == 'A'):
cur += 1
elif(s[i] == 'X'):
ans += cur
else:
cur = 0
print(ans)
return
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
s = next(tokens) # type: str
solve(s)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
18169718282ec7bfbfb2b7d2c8bd1613b7b9aa52
|
9b8e2992a38f591032997b5ced290fe1acc3ad94
|
/lcs4t.py
|
ede392018cce26478bbc4a6e676503d973b8be70
|
[] |
no_license
|
girishdhegde/aps-2020
|
c694443c10d0d572c8022dad5a6ce735462aaa51
|
fb43d8817ba16ff78f93a8257409d77dbc82ced8
|
refs/heads/master
| 2021-08-08T04:49:18.876187 | 2021-01-02T04:46:20 | 2021-01-02T04:46:20 | 236,218,152 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,238 |
py
|
from collections import defaultdict
import math
t=int(input())
for i in range(t):
n, total=map(int,input().split())
coin = []
values = defaultdict(list)
y = list(map(int,input().split()))
for j in range(n):
coin.append(y[j])
values[y[j]].append(0)
coins = []
for j in range(n):
if coin[j]!=1:
coins.append(coin[j])
print("coins:", coins)
if(len(coins) == 1):
if(total%coins[0]==0):
print("NO")
else:
values[coins[0]][0]=math.ceil(total/coins[0])
print("YES",end=" ")
x=list(values.values())
for h in x:
print(h[0],end=" ")
else:
coins=sorted(coins,reverse=True)
flag=0
for c in coins:
if total%c==0:
d=total/c-1
values[c][0]=int(d)
total-=d*c
else:
flag=1
d=math.ceil(total/c)
values[c][0]=int(d)
break
if flag==0:
print("NO")
else:
print("YES",end=" ")
x=list(values.values())
for h in x:
print(h[0],end=" ")
|
[
"[email protected]"
] | |
eda051d72d323b88e5d07e61bdabdbd16c2948e5
|
d6a3186af0aaa86b3936f1d98730b7120918b962
|
/testing_practice/tests_django/car_v2.py
|
91228379ab91290fe1f4b03df8524ddd44bd8be1
|
[] |
no_license
|
kranthy09/testing
|
edd6376733723ef58a8a5ecece31cbaf030ca45d
|
ecdd5ce3b3688b42181d5ccb74003ed97e79fbc9
|
refs/heads/master
| 2022-07-02T23:58:09.308746 | 2020-05-05T16:58:45 | 2020-05-05T16:58:45 | 261,354,583 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,592 |
py
|
class Car:
def __init__(self,max_speed, acceleration, tyre_friction, color = None):
self._color = color
self.is_valid_data("max_speed", max_speed)
self.is_valid_data("acceleration", acceleration)
self.is_valid_data("tyre_friction", tyre_friction)
self._acceleration = acceleration
self._tyre_friction = tyre_friction
self._max_speed = max_speed
self._is_engine_started = False
self._current_speed = 0
def start_engine(self):
if self._is_engine_started:
print("Stop the engine to start_engine")
else:
self._is_engine_started = True
def accelerate(self):
if self._is_engine_started:
self._current_speed += self._acceleration
if self._current_speed > self._max_speed:
self._current_speed = self._max_speed
else:
print("Start the engine to accelerate")
def apply_brakes(self):
if self._is_engine_started:
self._current_speed -= self._tyre_friction
if self._current_speed <= 0:
self._current_speed = 0
else:
print("Start the engine to apply_breaks")
def sound_horn(self):
if self._is_engine_started:
print("Beep Beep")
else:
print("Start the engine to sound_horn")
def stop_engine(self):
if self._is_engine_started:
self._is_engine_started = False
else:
print("Start the engine to stop_engine")
@property
def max_speed(self):
return self._max_speed
@property
def acceleration(self):
return self._acceleration
@property
def tyre_friction(self):
return self._tyre_friction
@property
def color(self):
return self._color
@property
def is_engine_started(self):
return self._is_engine_started
@property
def current_speed(self):
return self._current_speed
@staticmethod
def is_valid_data(args, value):
if value > 0:
return True
else:
raise ValueError(f"Invalid value for {args}")
class Truck(Car):
def __init__(self,max_speed, acceleration, tyre_friction, max_cargo_weight, color=None):
super().__init__(max_speed, acceleration, tyre_friction, color)
self.is_valid_data("max_cargo_weight", max_cargo_weight)
self._max_cargo_weight = max_cargo_weight
self._weight_in_cargo = 0
def sound_horn(self):
if self._is_engine_started:
print("Honk Honk")
else:
print("Start the engine to sound_horn")
def load(self, cargo_weight):
self.is_valid_data("cargo_weight", cargo_weight)
if self._current_speed:
print("Cannot load cargo during motion")
else:
self._weight_in_cargo += cargo_weight
if self._weight_in_cargo > self._max_cargo_weight:
print(f"Cannot load cargo more than max limit: {self._max_cargo_weight}")
self._weight_in_cargo -= cargo_weight
def unload(self, cargo_weight):
self.is_valid_data("cargo_weight", cargo_weight)
if self._current_speed:
print("Cannot unload cargo during motion")
else:
self._weight_in_cargo -= cargo_weight
if self._weight_in_cargo < 0:
print(f"Cannot unload cargo less than min limit: {0}")
self._weight_in_cargo += cargo_weight
@property
def max_cargo_weight(self):
return self._max_cargo_weight
@property
def weight_in_cargo(self):
return self._weight_in_cargo
class RaceCar(Car):
def __init__(self, max_speed, acceleration, tyre_friction, color = None):
super().__init__(max_speed, acceleration, tyre_friction,color)
self._nitro = 0
def accelerate(self):
import math
super().accelerate()
if self._nitro:
self._current_speed += math.ceil(self._acceleration * 0.3)
self._nitro -= 10
if self._current_speed > self._max_speed:
self._current_speed = self._max_speed
def apply_brakes(self):
if self._current_speed > (0.5 * self._max_speed):
self._nitro += 10
super().apply_brakes()
def sound_horn(self):
if self._is_engine_started:
print("Peep Peep\nBeep Beep")
else:
print("Start the engine to sound_horn")
@property
def nitro(self):
return self._nitro
|
[
"[email protected]"
] | |
de7ce52b41660eee7eea8ff7603241674cd09c47
|
9da8754002fa402ad8e6f25659978bd269bbcec8
|
/src/622A/cdf_622A.py
|
696901db63211acbb043bb8a0098147f0db843e9
|
[
"MIT"
] |
permissive
|
kopok2/CodeforcesSolutionsPython
|
a00f706dbf368ba0846c8ae86d4145b5dd3e1613
|
35bec0dbcff47765b123b5fe60476014376153df
|
refs/heads/master
| 2023-02-02T03:08:22.097651 | 2020-12-17T22:00:50 | 2020-12-17T22:00:50 | 196,035,812 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 645 |
py
|
import math
class CodeforcesTask622ASolution:
def __init__(self):
self.result = ''
self.n = 0
def read_input(self):
self.n = int(input())
def process_task(self):
n = int(math.sqrt(self.n))
a = (n + n ** 2) / 2
while a < self.n:
n += 1
a = (n + n ** 2) / 2
n -= 1
x = self.n - (n + n ** 2) / 2
self.result = str(int(x))
def get_result(self):
return self.result
if __name__ == "__main__":
Solution = CodeforcesTask622ASolution()
Solution.read_input()
Solution.process_task()
print(Solution.get_result())
|
[
"[email protected]"
] | |
249ce324bde793fd41492fa2f8d1d0c2ce88c9cd
|
ed97fb5c71da7ed89235432e3971bb0ef6064f8b
|
/algorithms/python/290.py
|
3c1bbff0685733f3cd42f905b78b0d011cbfcd85
|
[
"MIT"
] |
permissive
|
viing937/leetcode
|
8241be4f8bc9234a882b98ada2e5d13b0ebcca68
|
b07f7ba69f3d2a7e294f915934db302f43c0848f
|
refs/heads/master
| 2023-08-31T18:25:06.443397 | 2023-08-31T15:31:38 | 2023-08-31T15:31:38 | 37,374,931 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 559 |
py
|
class Solution(object):
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
arr = str.split(' ')
if len(pattern) != len(arr):
return False
hashmap = {}
for i in range(len(pattern)):
if pattern[i] in hashmap.keys() and hashmap[pattern[i]] != arr[i]:
return False
hashmap[pattern[i]] = arr[i]
if hashmap.values().count(arr[i]) > 1:
return False
return True
|
[
"[email protected]"
] | |
f5fccd5cf37b249aa0bd6ec0df11050ccceac4ba
|
226b1c73a706f4734834196d18305d4d2c873589
|
/synlib/descriptions/INVX12.py
|
d9548d8ad876fcb48facd24d7fd0a2450a47ae9a
|
[] |
no_license
|
ocakgun/vlsistuff
|
43b4b07ae186b8d2360d11c57cd10b861e96bcbe
|
776c07f5d0c40fe7d410b5c85e7381017d4dab64
|
refs/heads/master
| 2022-06-13T14:40:22.641310 | 2020-05-08T11:09:00 | 2020-05-08T11:09:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 392 |
py
|
Desc = cellDescClass("INVX12")
Desc.properties["cell_leakage_power"] = "3253.878540"
Desc.properties["cell_footprint"] = "inv"
Desc.properties["area"] = "43.243200"
Desc.pinOrder = ['A', 'Y']
Desc.add_arc("A","Y","combi")
Desc.set_job("inv") # (!A)
Desc.add_param("area",43.243200);
Desc.add_pin("A","input")
Desc.add_pin("Y","output")
Desc.add_pin_func("Y","unknown")
CellLib["INVX12"]=Desc
|
[
"[email protected]"
] | |
66cba7b1d697df1b112e0741f078b2d82f7853cf
|
a0801d0e7325b31f0383fc68517e208680bb36d6
|
/ProjectEuler/113.py
|
67180d3f1e179379f2c22641ec3d5bb209b71d03
|
[] |
no_license
|
conormccauley1999/CompetitiveProgramming
|
bd649bf04438817c7fa4755df2c2c7727273b073
|
a7e188767364be40f625612af3d16182f2d8d4de
|
refs/heads/master
| 2023-05-14T13:19:32.678134 | 2023-05-11T16:07:33 | 2023-05-11T16:07:33 | 179,089,010 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 523 |
py
|
# Problem 113
def cnt(length, inc):
end = 10 if inc else -1
step = 1 if inc else -1
dp = []
dp.append([1] * 10)
dp[0][0] = 0
for _ in range(length - 1):
dp.append([0] * 10)
for cur_position in range(1, length):
for cur_digit in range(10):
for next_digit in range(cur_digit, end, step):
dp[cur_position][cur_digit] += dp[cur_position - 1][next_digit]
return sum(dp[length - 1])
print(sum(cnt(i, True) + cnt(i, False) - 9 for i in range(1, 101)))
|
[
"[email protected]"
] | |
1852e9dffcb63b063f786faeffaec2ee72e25153
|
390d19c3159133d8c688396cb11b4ed3f8178d09
|
/BaekJoon/단계별로 풀어보기/Stack & Queue/1874_스택 수열.py
|
a3b4cc9eb6adb72cb147d6257fb3a6768140f325
|
[] |
no_license
|
JJayeee/CodingPractice
|
adba64cbd1d030b13a877f0b2e5ccc1269cb2e11
|
60f8dce48c04850b9b265a9a31f49eb6d9fc13c8
|
refs/heads/master
| 2021-08-16T17:14:01.161390 | 2021-07-16T00:42:18 | 2021-07-16T00:42:18 | 226,757,079 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 324 |
py
|
n = int(input())
arr = [int(input()) for _ in range(n)]
cnt = 0
stack = []
path = []
for i in range(1, n+1):
stack.append(i)
path.append('+')
while stack and stack[-1] == arr[cnt]:
stack.pop()
path.append('-')
cnt += 1
if stack:
print('NO')
else:
for p in path:
print(p)
|
[
"[email protected]"
] | |
3f80baeaee8814fce5876d1fd05427e8e5961cfc
|
281a10505f8044dbed73f11ed731bd0fbe23e0b5
|
/expenseApp/forms.py
|
a27cb1ba630beb7e285f40eca794838affed173f
|
[
"Apache-2.0"
] |
permissive
|
cs-fullstack-fall-2018/project3-django-jpark1914
|
7c6f57ab5f8055c11ac5b9d3c8bf0aa5057008d7
|
53bca13243d7e50263ec25b2fb8a299a8bbada1c
|
refs/heads/master
| 2020-04-02T00:59:33.254360 | 2018-10-29T04:58:42 | 2018-10-29T04:58:42 | 153,831,819 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 873 |
py
|
from .models import AccountModel, UserSetup, TransactionModel
from django import forms
class AccountForm(forms.ModelForm):
class Meta:
model = AccountModel
fields = ['account_name','balance','savings','deposit', 'expense']
widgets = {'balance': forms.HiddenInput, 'savings':forms.HiddenInput(), 'deposit': forms.HiddenInput(), 'expense': forms.HiddenInput()}
class DepositForm(forms.ModelForm):
class Meta:
model = AccountModel
fields = ['deposit','expense']
widgets = {'expense': forms.HiddenInput()}
class ExpenseForm(forms.ModelForm):
class Meta:
model = AccountModel
fields = ['expense']
class UserForm(forms.ModelForm):
class Meta:
model = UserSetup
fields = ['name', 'email', 'password']
widgets = {
'password': forms.PasswordInput(),
}
|
[
"[email protected]"
] | |
e95f81c2edaab21bbe2f5f9e621eae62b13fdc86
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/_api/v1/keras/datasets/cifar10/__init__.pyi
|
f00ca8b56bea50c2b28c37c450e8e377366f9b62
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
pyi
|
# Stubs for tensorflow._api.v1.keras.datasets.cifar10 (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python.keras.datasets.cifar10 import load_data as load_data
|
[
"[email protected]"
] | |
615b0e3ce001a61de185b62a6465cb046a30fcc6
|
416baad7e83075c1c59f1fa920d9a18cd3351f16
|
/tensor2tensor/models/__init__.py
|
58badcb4e011ce3350c1cf88d2bd7a49cbdc8d59
|
[
"Apache-2.0"
] |
permissive
|
jmlingeman/tensor2tensor
|
aa759fc9101149284b0b6f2f7a03e9759b7214a9
|
9921c4a816aafb76964a960541045ce4d730b3c9
|
refs/heads/master
| 2021-04-29T01:52:38.283004 | 2018-04-23T20:04:12 | 2018-04-23T20:04:12 | 121,812,413 | 0 | 0 | null | 2018-02-16T23:39:11 | 2018-02-16T23:39:11 | null |
UTF-8
|
Python
| false | false | 1,986 |
py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models defined in T2T. Imports here force registration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
# pylint: disable=unused-import
from tensor2tensor.layers import modalities
from tensor2tensor.models import basic
from tensor2tensor.models import bytenet
from tensor2tensor.models import lstm
from tensor2tensor.models import neural_gpu
from tensor2tensor.models import resnet
from tensor2tensor.models import revnet
from tensor2tensor.models import shake_shake
from tensor2tensor.models import slicenet
from tensor2tensor.models import transformer
from tensor2tensor.models import vanilla_gan
from tensor2tensor.models import xception
from tensor2tensor.models.research import aligned
from tensor2tensor.models.research import attention_lm
from tensor2tensor.models.research import attention_lm_moe
from tensor2tensor.models.research import cycle_gan
from tensor2tensor.models.research import gene_expression
from tensor2tensor.models.research import multimodel
from tensor2tensor.models.research import super_lm
from tensor2tensor.models.research import transformer_moe
from tensor2tensor.models.research import transformer_revnet
from tensor2tensor.models.research import transformer_sketch
from tensor2tensor.models.research import transformer_vae
# pylint: enable=unused-import
|
[
"[email protected]"
] | |
06f2d36fbb85bae7a5b684e93a760e88ee7b328d
|
de392462a549be77e5b3372fbd9ea6d7556f0282
|
/accounts/migrations/0198_auto_20210812_1748.py
|
26547e06f8a5011da9233e88c29c998430ef3246
|
[] |
no_license
|
amutebe/AMMS_General
|
2830770b276e995eca97e37f50a7c51f482b2405
|
57b9b85ea2bdd272b44c59f222da8202d3173382
|
refs/heads/main
| 2023-07-17T02:06:36.862081 | 2021-08-28T19:07:17 | 2021-08-28T19:07:17 | 400,064,408 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 729 |
py
|
# Generated by Django 3.2.3 on 2021-08-12 14:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0197_auto_20210812_1740'),
]
operations = [
migrations.AlterField(
model_name='car',
name='car_number',
field=models.CharField(default='BCL12082021142', max_length=200, primary_key=True, serialize=False, verbose_name='Corrective action no.:'),
),
migrations.AlterField(
model_name='employees',
name='employeeID',
field=models.CharField(default='BCL11', max_length=20, primary_key=True, serialize=False, verbose_name='Employee ID'),
),
]
|
[
"[email protected]"
] | |
281090431d5371b25cb5f61faa42b5ded0fee6a8
|
b2ba670818623f8ab18162382f7394baed97b7cb
|
/test-data/AndroidSlicer/Carnote/DD/32.py
|
4201f082c91275a5dd9fd6d0c08a4ab34f2b21ff
|
[
"MIT"
] |
permissive
|
hsumyatwin/ESDroid-artifact
|
012c26c40537a79b255da033e7b36d78086b743a
|
bff082c4daeeed62ceda3d715c07643203a0b44b
|
refs/heads/main
| 2023-04-11T19:17:33.711133 | 2022-09-30T13:40:23 | 2022-09-30T13:40:23 | 303,378,286 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,140 |
py
|
#start monkey test seedNo 0
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
package = 'com.spisoft.quicknote'
activity ='com.spisoft.quicknote.MainActivity'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(0.5)
MonkeyRunner.sleep(0.5)
device.touch(982,153, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(699,932, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(923,1695, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(963,1730, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(62,124, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(467,678, 'DOWN_AND_UP')
|
[
"[email protected]"
] | |
ff38640ad5a4f55a1f83c27af699d4597b525d3d
|
70f41a06d733e680af3bb1f00d8ff33574f4f4bb
|
/src/fh_tools/language_test/base_test/bisect_demo/grades_demo.py
|
3c04e262f62fafdf04b345d787e0ae2cae0fa7b6
|
[
"MIT"
] |
permissive
|
mmmaaaggg/RefUtils
|
209f7136acc63c880e60974c347e19adc4c7ac2e
|
f127658e75b5c52b4db105a22176ee0931ceacae
|
refs/heads/master
| 2021-06-11T16:06:06.245275 | 2021-03-10T05:32:14 | 2021-03-10T05:32:14 | 139,413,962 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 445 |
py
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author : MG
@Time : 19-8-9 上午10:33
@File : grades_demo.py
@contact : [email protected]
@desc : 通过二分发计算分数等级
"""
import bisect
def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
i = bisect.bisect(breakpoints, score)
return grades[i]
print([grade(score) for score in [33, 99, 77, 70, 89, 90, 100]])
if __name__ == "__main__":
pass
|
[
"[email protected]"
] | |
2155f6826ed7b9607bfc77f9e46bc7b6daf29ed5
|
95d64b1dea3db73e85562aa2665c3a696370a679
|
/code/information-gain.py
|
da46714604a93ed9d59f09d41c3b92d03c5e7812
|
[] |
no_license
|
Smolky/exist-2021
|
7481e36fb3f44263c1a2190890fc6ac894c4fac5
|
2d51a01a829cb9e9b44eca5b9eefb06cb62162c8
|
refs/heads/main
| 2023-05-30T14:21:56.913663 | 2021-06-16T08:10:36 | 2021-06-16T08:10:36 | 364,022,851 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,758 |
py
|
"""
Information Gain per class
This class calculates the Information Gain (Mutual Info) of a dataset
and uses it to select the most discrimatory features
@author José Antonio García-Díaz <[email protected]>
@author Rafael Valencia-Garcia <[email protected]>
"""
import os
import sys
import argparse
import pandas as pd
import numpy as np
import pickle
from pathlib import Path
from sklearn import preprocessing
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import mutual_info_regression
from dlsdatasets.DatasetResolver import DatasetResolver
from utils.Parser import DefaultParser
from features.FeatureResolver import FeatureResolver
from sklearn.pipeline import Pipeline, FeatureUnion
from features.TokenizerTransformer import TokenizerTransformer
def main ():
# var parser
parser = DefaultParser (description = 'Calculates the Information Gain (Mutual Info) per class and obtains the best LF')
# @var args Get arguments
args = parser.parse_args ()
# @var dataset_resolver DatasetResolver
dataset_resolver = DatasetResolver ()
# @var dataset Dataset This is the custom dataset for evaluation purposes
dataset = dataset_resolver.get (args.dataset, args.corpus, args.task, False)
dataset.filename = dataset.get_working_dir (args.task, 'dataset.csv')
# @var df Ensure if we already had the data processed
df = dataset.get ()
# @var task_type String
task_type = dataset.get_task_type ()
# @var df_train DataFrame
df_train = dataset.get_split (df, 'train')
df_train = df_train[df_train['label'].notna()]
# @var feature_resolver FeatureResolver
feature_resolver = FeatureResolver (dataset)
# @var feature_file String
feature_file = feature_resolver.get_suggested_cache_file ('lf', task_type)
# @var features_cache String The file where the features are stored
features_cache = dataset.get_working_dir (args.task, feature_file)
# If the feautures are not found, get the default one
if not Path (features_cache).is_file ():
raise Exception ('features lf file are not avaiable')
sys.exit ()
# @var transformer Transformer
transformer = feature_resolver.get ('lf', cache_file = features_cache)
# @var features_df DataFrame
features_df = transformer.transform ([])
# @var linguistic_features List
linguistic_features = features_df.columns.to_list ()
# Keep only the training features
features_df = features_df[features_df.index.isin (df_train.index)].reindex (df_train.index)
# Attach label
features_df = features_df.assign (label = df_train['label'])
# @var unique_labels Series Bind to the label
unique_labels = dataset.get_available_labels ()
# @var X
X = features_df.loc[:, features_df.columns != 'label']
# @var mi
if 'classification' == task_type:
mi = mutual_info_classif (X = X, y = df_train['label']).reshape (-1, 1)
elif 'regression':
mi = mutual_info_regression (X = X, y = df_train['label']).reshape (-1, 1)
# @var best_features_indexes List
best_features_indexes = pd.DataFrame (mi,
columns = ['Coefficient'],
index = linguistic_features
)
if 'regression' == task_type:
print ("by dataset")
print ("----------")
best_features_indexes.index = linguistic_features
print ("top")
print (best_features_indexes.sort_values (by = 'Coefficient', ascending = False).head (20).to_csv (float_format = '%.5f'))
print ("worst")
print (best_features_indexes.sort_values (by = 'Coefficient', ascending = True).head (10).to_csv (float_format = '%.5f'))
if 'classification' == task_type:
# @var average_features_per_label List
average_features_per_label = [features_df.loc[df_train.loc[features_df['label'] == label].index].mean ().to_frame ().T for label in unique_labels]
# Merge features by label
features_df_merged = pd \
.concat (average_features_per_label) \
.reset_index (drop = True) \
.assign (label = unique_labels) \
.set_index ('label') \
.transpose ()
# Attach coefficient to the features
features_df_merged = features_df_merged.assign (Coefficient = best_features_indexes.values)
print ("by dataset")
print ("----------")
best_features_indexes.index = features_df_merged.index
print ("top")
print (best_features_indexes.sort_values (by = 'Coefficient', ascending = False).head (10).to_csv (float_format = '%.5f'))
print ("worst")
print (best_features_indexes.sort_values (by = 'Coefficient', ascending = True).head (10).to_csv (float_format = '%.5f'))
# Results merged by label
print ("by label")
print ("----------")
print ("top")
print (features_df_merged.sort_values (by = 'Coefficient', ascending = False).head (10)[unique_labels].to_csv (float_format = '%.5f'))
print ("worst")
print (features_df_merged.sort_values (by = 'Coefficient', ascending = True).head (10)[unique_labels].to_csv (float_format = '%.5f'))
if __name__ == "__main__":
main ()
|
[
"[email protected]"
] | |
9545a1c677720b2cc1d1a08ee3eaaa268a423759
|
390d19c3159133d8c688396cb11b4ed3f8178d09
|
/BaekJoon/APS_2019/2669_직사각형 네개의 합집합의 면적.py
|
1d2b01330c52119bf983190b61936d5a7dcf040a
|
[] |
no_license
|
JJayeee/CodingPractice
|
adba64cbd1d030b13a877f0b2e5ccc1269cb2e11
|
60f8dce48c04850b9b265a9a31f49eb6d9fc13c8
|
refs/heads/master
| 2021-08-16T17:14:01.161390 | 2021-07-16T00:42:18 | 2021-07-16T00:42:18 | 226,757,079 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 277 |
py
|
dwr = [[0]*100 for _ in range(100)]
count = 0
for i in range(4):
xs, ys, xe, ye = map(int, input().split())
for x in range(xs, xe):
for y in range(ys, ye):
if dwr[x][y] == 0:
dwr[x][y] = 1
count += 1
print(count)
|
[
"[email protected]"
] | |
a865e9f10130569267073fa37a1314786a38c6bb
|
8efd8bcd3945d88370f6203e92b0376ca6b41c87
|
/problems1_100/79_ Word_Search.py
|
5374317283275085258b340378ccd5eef61390f0
|
[] |
no_license
|
Provinm/leetcode_archive
|
732ad1ef5dcdfdde6dd5a33522e86f7e24ae2db5
|
3e72dcaa579f4ae6f587898dd316fce8189b3d6a
|
refs/heads/master
| 2021-09-21T08:03:31.427465 | 2018-08-22T15:58:30 | 2018-08-22T15:58:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,465 |
py
|
'''
Given a 2D board and a word, find if the word exists in the grid.
The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.
For example,
Given board =
[
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
word = "ABCCED", -> returns true,
word = "SEE", -> returns true,
word = "ABCB", -> returns false.
'''
class Solution(object):
Routes = []
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not word: return True
for i in self.get_first_pos(board, word[0]):
next_pos = i
if self.sub_exist(next_pos, board, word, tem=[next_pos]):
return True
return False
def sub_exist(self, ini, board, word, tem, deep=1):
print('deep={}, tem={}'.format(deep, tem))
if not word[deep:]:
return True
tem_r = self.get_next_pos(board, word[deep], ini, tem)
# print('ini={}, tem_r={}'.format(ini, tem_r))
if not tem_r:
return False
for i in tem_r:
deep += 1
tem.append(i)
if self.sub_exist(i, board, word, tem, deep):
return True
deep -= 1
tem = tem[:deep]
def get_first_pos(self, board, word):
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col] == word:
yield (row, col)
def get_next_pos(self, board, word, pos, tem):
row, col = pos
left, top = (row, col-1), (row-1, col)
right, down = (row, col+1), (row+1, col)
res = []
for p in [i for i in [left, top, right, down] if self.valid_pos(board, i)]:
if board[p[0]][p[1]] == word and p not in tem:
res.append(p)
return res
def valid_pos(self, board, pos):
max_row = len(board)
max_col = len(board[0])
row, col = pos
if 0 <= row < max_row and \
0 <= col < max_col:
return True
else:
return False
s = Solution()
board = [["A","B","C","E"],
["S","F","E","S"],
["A","D","E","E"]]
word = "ABCESEEEFS"
print(s.exist(board, word))
|
[
"[email protected]"
] | |
54a075ef2572940304283d2f526de481af678278
|
5154364983b0e44c4af2d41a59cfa8edc923283a
|
/python_Source/developmentP/deeplearining/pie_chart_01.py
|
00eb701184787dad7373c13e41ea294c5459683e
|
[] |
no_license
|
vasana12/python_python_git
|
082f84df30e4b307d223e8970f87a7432a1d80fd
|
db87e112731fca1fe80fef29d8f180f19b9e7afc
|
refs/heads/master
| 2020-03-30T23:22:30.197307 | 2018-11-24T05:05:51 | 2018-11-24T05:05:51 | 151,698,409 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 562 |
py
|
import matplotlib.pyplot as plt
from matplotlib import font_manager, rc
import matplotlib
font_location = "C:/Windows/fonts/malgun.ttf"
font_name = font_manager.FontProperties(fname=font_location).get_name()
matplotlib.rc('font', family=font_name)
labels = '개구리', '돼지', '개', '통나무'
sizes = [15, 30, 40, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0)
plt.pie(sizes, explode = explode, labels=labels, colors=colors,
autopct='%2.2f%%', shadow=False, startangle=90)
plt.axis('equal')
plt.show()
|
[
"[email protected]"
] | |
b8f5573ff344929c69dceabf3640aea61ec7232f
|
bd97064b5ed9f17b11bcd3ac9a1f2c8ea9ffaf82
|
/restapi/routers/Utils.py
|
8284d48e5f99c5cebcbd87f7d2ecb895771a1912
|
[] |
no_license
|
IndominusByte/bhaktirahayu-backend
|
a33eff6d0a74894934a6643ef3b81af283542ecf
|
628d5d5cdbe145696835e32c47f77ca03dc72708
|
refs/heads/main
| 2023-08-13T16:25:21.241086 | 2021-09-18T18:04:25 | 2021-09-18T18:04:25 | 389,309,892 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
from fastapi import APIRouter, Depends
from fastapi_jwt_auth import AuthJWT
from schemas.utils.UtilSchema import UtilEncodingImageBase64
from libs.MagicImage import MagicImage
router = APIRouter()
@router.post('/encoding-image-base64',response_model=bytes)
async def encoding_image_base64(util_data: UtilEncodingImageBase64, authorize: AuthJWT = Depends()):
authorize.jwt_required()
return MagicImage.convert_image_as_base64(util_data.path_file)
|
[
"[email protected]"
] | |
9166dc2e456f9adbf39f8f327bc6c3f432090aa9
|
976d399110f839ba98dc30e51004297385c56479
|
/phone.py
|
cd062200df0418c8ebf51a5f6d08aaded568f901
|
[] |
no_license
|
EileenLL/Phone-App-Practice
|
4f9bb0eda10e505c833b79d15e21b5e3525399f6
|
3b83fd7547a4248752f89255f530e19710b91033
|
refs/heads/master
| 2020-12-05T02:01:28.760728 | 2017-03-02T05:15:49 | 2017-03-02T05:15:49 | 83,637,368 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,924 |
py
|
class Phone(object):
"""A simple Phone class to keep track of contacts"""
def __init__(self, number, name, contacts=None):
self.number = number
self.name = name
if contacts:
self.contacts = contacts
else:
self.contacts = {}
# The __repr__ method gives the class a print format that is meaningful to
# humans, in this case we chose first and last name
def __repr__(self):
return self.name
def add_contact(self, first_name, last_name, number):
"""Creates new Contact instance and adds the instance to contacts"""
entry = Contact(first_name, last_name, number)
self.contacts[self._get_contact_key(first_name, last_name)] = entry
print self.contacts
# See the types of each parameter from the function call in contact_ui.py
pass
def call(self, first_name, last_name):
"""Call a contact."""
call_name = self._get_contact_key(first_name, last_name)
contact = self.contacts[self._get_contact_key(first_name, last_name)]
contact_number = contact.phone_number
# look up number in dictionary through name key
print "You are calling " + str(call_name) + " at " + str(contact_number)
pass
def text(self, first_name, message):
"""Send a contact a message."""
pass
def del_contact(self, first_name, last_name):
"""Remove a contact from phone"""
del self.contacts[self._get_contact_key(first_name, last_name)]
pass
def _get_contact_key(self, first_name, last_name):
"""This is a private method. It's meant to be used only from within
this class. We notate private attributes and methods by prepending with
an underscore.
"""
return first_name.lower() + " " + last_name.lower()
# class definition for a Contact
class Contact(object):
"""A class to hold information about an individual"""
# initialize an instance of the object Contact
def __init__(self,
first_name,
last_name,
phone_number,
email="",
twitter_handle=""):
self.first_name = first_name
self.last_name = last_name
self.phone_number = phone_number
self.email = email
self.twitter_handle = twitter_handle
# The __repr__ method gives the class a print format that is meaningful to
# humans, in this case we chose first and last name
def __repr__(self):
return "%s %s" % (self.first_name, self.last_name)
def full_name(self):
return self.first_name + " " + self.last_name
# some examples of how to use these two classes
# Make a Phone instace
# tommys_phone = Phone(5555678, "Tommy Tutone's Phone")
# Use the Phone class to add new contacts!
# tommys_phone.add_contact("Jenny", "From That Song", 8675309)
|
[
"[email protected]"
] | |
870f4ef3cedddc663fb7b8f310d6b86d04b6de4f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03588/s377192087.py
|
33be853cadb78a71d1fcb119905a836c2c06e43c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 160 |
py
|
n = int(input())
max_a = 0
st_b = 0
for i in range(n):
a,b = map(int,input().split())
if max_a < a:
max_a = a
st_b = b
print(max_a+st_b)
|
[
"[email protected]"
] | |
8b183bf27487b5db210287a08477ad86698afa14
|
7d328fa9c4b336f28fa357306aad5483afa2d429
|
/BinTreeFromSortedArray.py
|
2d3addba12667610b79141ff6049c7dda7f413fa
|
[] |
no_license
|
ktyagi12/LeetCode
|
30be050f1e2fcd16f73aa38143727857cc943536
|
64e68f854b327ea70dd1834de25e756d64957514
|
refs/heads/master
| 2021-07-01T21:24:26.765487 | 2021-05-09T11:42:50 | 2021-05-09T11:42:50 | 230,497,920 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 600 |
py
|
#Problem available at: https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/submissions/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
mid = len(nums) // 2
root = TreeNode(nums[mid])
root.left = self.sortedArrayToBST(nums[:mid])
root.right = self.sortedArrayToBST(nums[mid+1:])
return root
|
[
"[email protected]"
] | |
15225b8ed699b8710acd02ca79f4d765e1fdcdbf
|
150af06564fbd615479d67385e39b491d55a2ac2
|
/examples/aio.py
|
c590d077ca02df6b350e0b14348466c5b12f2d8d
|
[
"MIT"
] |
permissive
|
colanconnon/graphql-ws
|
3d340abe167a7202cca858fe86d829dd700dc99a
|
3df53014dc60762007e2669d45135fb0f574e759
|
refs/heads/master
| 2021-05-07T17:36:38.551202 | 2017-10-25T21:16:27 | 2017-10-25T21:16:27 | 108,750,259 | 0 | 0 | null | 2017-10-29T16:21:07 | 2017-10-29T16:21:07 | null |
UTF-8
|
Python
| false | false | 1,645 |
py
|
from aiohttp import web, WSMsgType
from template import render_graphiql
from schema import schema
from graphql import format_error
import json
from graphql_ws import WebSocketSubscriptionServer
async def graphql_view(request):
payload = await request.json()
response = await schema.execute(payload.get('query', ''), return_promise=True)
data = {}
if response.errors:
data['errors'] = [format_error(e) for e in response.errors]
if response.data:
data['data'] = response.data
jsondata = json.dumps(data,)
return web.Response(text=jsondata, headers={'Content-Type': 'application/json'})
async def graphiql_view(request):
return web.Response(text=render_graphiql(), headers={'Content-Type': 'text/html'})
subscription_server = WebSocketSubscriptionServer(schema)
async def subscriptions(request):
ws = web.WebSocketResponse(protocols=('graphql-ws',))
await ws.prepare(request)
await subscription_server.handle(ws)
# async for msg in ws:
# if msg.type == WSMsgType.TEXT:
# if msg.data == 'close':
# await ws.close()
# else:
# await ws.send_str(msg.data + '/answer')
# elif msg.type == WSMsgType.ERROR:
# print('ws connection closed with exception %s' %
# ws.exception())
# print('websocket connection closed')
return ws
app = web.Application()
app.router.add_get('/subscriptions', subscriptions)
app.router.add_get('/graphiql', graphiql_view)
app.router.add_get('/graphql', graphql_view)
app.router.add_post('/graphql', graphql_view)
web.run_app(app, port=8000)
|
[
"[email protected]"
] | |
51b9d85a67e999addd2899a420954e72eea8ab63
|
978248bf0f275ae688f194593aa32c267832b2b6
|
/xlsxwriter/test/comparison/test_table14.py
|
f0690c66bde3644bc9256ba5ff345a5604768e7d
|
[
"BSD-2-Clause-Views"
] |
permissive
|
satish1337/XlsxWriter
|
b0c216b91be1b74d6cac017a152023aa1d581de2
|
0ab9bdded4f750246c41a439f6a6cecaf9179030
|
refs/heads/master
| 2021-01-22T02:35:13.158752 | 2015-03-31T20:32:28 | 2015-03-31T20:32:28 | 33,300,989 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,856 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'table14.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'num_format': '0.00;[Red]0.00', 'dxf_index': 2})
format2 = workbook.add_format({'num_format': '0.00_ ;\-0.00\ ', 'dxf_index': 1})
format3 = workbook.add_format({'num_format': '0.00_ ;[Red]\-0.00\ ', 'dxf_index': 0})
data = [
['Foo', 1234, 2000, 4321],
['Bar', 1256, 4000, 4320],
['Baz', 2234, 3000, 4332],
['Bop', 1324, 1000, 4333],
]
worksheet.set_column('C:F', 10.288)
worksheet.add_table('C2:F6', {'data': data,
'columns': [{},
{'format': format1},
{'format': format2},
{'format': format3},
]})
workbook.close()
self.assertExcelEqual()
|
[
"[email protected]"
] | |
0b61ccd08991ebb0902f43a83ba3074f2e60a203
|
18305efd1edeb68db69880e03411df37fc83b58b
|
/pdb_files3000rot/g7/1g7v/tractability_450/pymol_results_file.py
|
b3ca0aa99f8776269651041e072c2f991de4c442
|
[] |
no_license
|
Cradoux/hotspot_pipline
|
22e604974c8e38c9ffa979092267a77c6e1dc458
|
88f7fab8611ebf67334474c6e9ea8fc5e52d27da
|
refs/heads/master
| 2021-11-03T16:21:12.837229 | 2019-03-28T08:31:39 | 2019-03-28T08:31:39 | 170,106,739 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,214 |
py
|
from os.path import join
import tempfile
import zipfile
from pymol import cmd, finish_launching
from pymol.cgo import *
finish_launching()
dirpath = None
def cgo_arrow(atom1='pk1', atom2='pk2', radius=0.07, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''):
from chempy import cpv
radius, gap = float(radius), float(gap)
hlength, hradius = float(hlength), float(hradius)
try:
color1, color2 = color.split()
except:
color1 = color2 = color
color1 = list(cmd.get_color_tuple(color1))
color2 = list(cmd.get_color_tuple(color2))
def get_coord(v):
if not isinstance(v, str):
return v
if v.startswith('['):
return cmd.safe_list_eval(v)
return cmd.get_atom_coords(v)
xyz1 = get_coord(atom1)
xyz2 = get_coord(atom2)
normal = cpv.normalize(cpv.sub(xyz1, xyz2))
if hlength < 0:
hlength = radius * 3.0
if hradius < 0:
hradius = hlength * 0.6
if gap:
diff = cpv.scale(normal, gap)
xyz1 = cpv.sub(xyz1, diff)
xyz2 = cpv.add(xyz2, diff)
xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)
obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
return obj
dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
hs_zip.extractall(dirpath)
cmd.load(join(dirpath,"protein.pdb"), "protein")
cmd.show("cartoon", "protein")
if dirpath:
f = join(dirpath, "label_threshold_10.mol2")
else:
f = "label_threshold_10.mol2"
cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_14.mol2")
else:
f = "label_threshold_14.mol2"
cmd.load(f, 'label_threshold_14')
cmd.hide('everything', 'label_threshold_14')
cmd.label("label_threshold_14", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_17.mol2")
else:
f = "label_threshold_17.mol2"
cmd.load(f, 'label_threshold_17')
cmd.hide('everything', 'label_threshold_17')
cmd.label("label_threshold_17", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
colour_dict = {'acceptor':'red', 'donor':'blue', 'apolar':'yellow', 'negative':'purple', 'positive':'cyan'}
threshold_list = [10, 14, 17]
gfiles = ['donor.grd', 'apolar.grd', 'acceptor.grd']
grids = ['donor', 'apolar', 'acceptor']
num = 0
surf_transparency = 0.2
if dirpath:
gfiles = [join(dirpath, g) for g in gfiles]
for t in threshold_list:
for i in range(len(grids)):
try:
cmd.load(r'%s'%(gfiles[i]), '%s_%s'%(grids[i], str(num)))
cmd.isosurface('surface_%s_%s_%s'%(grids[i], t, num), '%s_%s'%(grids[i], num), t)
cmd.set('transparency', surf_transparency, 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.color(colour_dict['%s'%(grids[i])], 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.group('threshold_%s'%(t), members = 'surface_%s_%s_%s'%(grids[i],t, num))
cmd.group('threshold_%s' % (t), members='label_threshold_%s' % (t))
except:
continue
try:
cmd.group('hotspot_%s' % (num), members='threshold_%s' % (t))
except:
continue
for g in grids:
cmd.group('hotspot_%s' % (num), members='%s_%s' % (g,num))
cluster_dict = {"16.4940004349":[], "16.4940004349_arrows":[]}
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(6.0), float(103.5), float(82.5), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([6.0,103.5,82.5], [3.903,105.552,80.989], color="blue red", name="Arrows_16.4940004349_1")
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(9.5), float(108.0), float(80.5), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([9.5,108.0,80.5], [11.728,106.388,80.182], color="blue red", name="Arrows_16.4940004349_2")
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(9.5), float(105.0), float(79.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([9.5,105.0,79.0], [11.728,106.388,80.182], color="blue red", name="Arrows_16.4940004349_3")
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(9.5), float(105.5), float(77.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([9.5,105.5,77.0], [11.141,102.835,76.967], color="blue red", name="Arrows_16.4940004349_4")
cluster_dict["16.4940004349"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(11.0), float(110.5), float(81.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([11.0,110.5,81.0], [13.419,110.042,82.914], color="blue red", name="Arrows_16.4940004349_5")
cluster_dict["16.4940004349"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(7.42102675834), float(107.749665562), float(78.4210819103), float(1.0)]
cluster_dict["16.4940004349"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(5.5), float(113.5), float(80.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([5.5,113.5,80.0], [5.021,110.73,80.529], color="red blue", name="Arrows_16.4940004349_6")
cluster_dict["16.4940004349"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(8.5), float(115.0), float(78.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([8.5,115.0,78.0], [6.555,117.389,78.438], color="red blue", name="Arrows_16.4940004349_7")
cluster_dict["16.4940004349"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(10.5), float(109.0), float(79.5), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([10.5,109.0,79.5], [11.883,106.786,77.978], color="red blue", name="Arrows_16.4940004349_8")
cluster_dict["16.4940004349"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(11.5), float(113.0), float(78.0), float(1.0)]
cluster_dict["16.4940004349_arrows"] += cgo_arrow([11.5,113.0,78.0], [13.328,115.357,77.05], color="red blue", name="Arrows_16.4940004349_9")
cmd.load_cgo(cluster_dict["16.4940004349"], "Features_16.4940004349", 1)
cmd.load_cgo(cluster_dict["16.4940004349_arrows"], "Arrows_16.4940004349")
cmd.set("transparency", 0.2,"Features_16.4940004349")
cmd.group("Pharmacophore_16.4940004349", members="Features_16.4940004349")
cmd.group("Pharmacophore_16.4940004349", members="Arrows_16.4940004349")
if dirpath:
f = join(dirpath, "label_threshold_16.4940004349.mol2")
else:
f = "label_threshold_16.4940004349.mol2"
cmd.load(f, 'label_threshold_16.4940004349')
cmd.hide('everything', 'label_threshold_16.4940004349')
cmd.label("label_threshold_16.4940004349", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
cmd.group('Pharmacophore_16.4940004349', members= 'label_threshold_16.4940004349')
cmd.bg_color("white")
cmd.show("cartoon", "protein")
cmd.color("slate", "protein")
cmd.show("sticks", "organic")
cmd.hide("lines", "protein")
|
[
"[email protected]"
] | |
3c1414d17c449561e276f13e399900b1c4bd8035
|
72a9d5019a6cc57849463fc315eeb0f70292eac8
|
/Python-Programming/6- Numpy/Numpy_.py
|
98ac37a1616122702019f51a69f73e320c98fe2f
|
[] |
no_license
|
lydiawawa/Machine-Learning
|
393ce0713d3fd765c8aa996a1efc9f1290b7ecf1
|
57389cfa03a3fc80dc30a18091629348f0e17a33
|
refs/heads/master
| 2020-03-24T07:53:53.466875 | 2018-07-22T23:01:42 | 2018-07-22T23:01:42 | 142,578,611 | 1 | 0 | null | 2018-07-27T13:08:47 | 2018-07-27T13:08:47 | null |
UTF-8
|
Python
| false | false | 3,509 |
py
|
# %%%%%%%%%%%%% Python %%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% Authors %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Dr. Martin Hagan----->Email: [email protected]
# Dr. Amir Jafari------>Email: [email protected]
# %%%%%%%%%%%%% Date:
# V1 Jan - 04 - 2018
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% Numpy Python %%%%%%%%%%%%%%%%%%%%%%%%%%%%
# =============================================================
import numpy as np
# ----------------------------------------------------------------------------------
#---------------------- creating numpy array----------------------------------------
x = np.array([1, 2, 3, 4])
y = np.linspace(-5, 1, 10)
z = np.arange(0, 10)
print(x)
print(y)
print(z)
type(x)
print(x.dtype)
# ----------------------------------------------------------------------------------
#---------------------- Step Size---------------------------------------------------
x1 = np.arange(0, 10, 2)
x2 = np.arange(0, 5, .5)
x3 = np.arange(0, 1, .1)
y1 = np.linspace(1, 5, 2)
List = list(x1)
print(List)
Min = np.amin(x1)
print(Min)
Max = np.amax(y1)
print(Max)
# ----------------------------------------------------------------------------------
#---------------------- Array Operands----------------------------------------------
a1 = np.array([1, 1, 1, 1]) + np.array([2, 2, 2, 2])
print(a1)
a2 = np.array([1, 1, 1, 1]) - np.array([2, 2, 2, 2])
print(a2)
a3 = np.array([1, 1, 1, 1]) * np.array([2, 2, 2, 2])
print(a3)
a4 = np.array([1, 1, 1, 1]) / np.array([2, 2, 2, 2])
print(a4)
a5 = np.array([True, True, False]) + np.array([True, False, False])
print(a5)
a6 = np.array([True, True, False]) * np.array([True, False, False])
print(a6)
# ----------------------------------------------------------------------------------
#---------------------- Mathematical Function---------------------------------------
print (abs(-2))
list1 = [-1, -2, -3]
s1 = []
for i in range(len(list1)):
s1.append(abs(list1[i]))
print(s1)
np.abs(-3)
np.abs([-2, -7, 1])
# ----------------------------------------------------------------------------------
#---------------------- Indexing----------------------------------------------------
a7 = np.arange(1, 5, .5)
print(len(a7))
second_element = a7[1]
print(second_element)
first_three_elements = a7[0:3]
print(first_three_elements)
# ----------------------------------------------------------------------------------
# --------------------------Masking-------------------------------------------------
print(a7)
bigger_than_3 = a7 > 3
print(bigger_than_3)
type(bigger_than_3)
len(bigger_than_3)
d2 = [i for i, v in enumerate(a7) if v > 3]
print(d2)
[i for i, v in enumerate(a7) if v > 3]
d3 = [v for i, v in enumerate(a7) if v > 26]
print(d3)
sum(bigger_than_3)
len(d2)
large_nums = a7[bigger_than_3]
len(a7[bigger_than_3])
print(large_nums)
large_nums = a7[a7 > 3]
print(large_nums)
# ----------------------------------------------------------------------------------
# --------------------------More----------------------------------------------------
a8 = np.logical_and(a7 > 1, a7 < 3)
print(a8)
a9 = a7[np.logical_and(a7 > 1, a7 < 3)]
print(a9)
a10 = np.logical_or(a7 < 3, a7 > 4)
print(a10)
a11= a7[np.logical_or(a7 < 22, a7 > 27)]
print(a11)
# ----------------------------------------------------------------------------------
# --------------------------Vectorizing Function-------------------------------------
def f(x):
return x ** 2 > 2
f_v = np.vectorize(f)
print(f_v([1,2,3]))
|
[
"[email protected]"
] | |
fcff171d2095a1a02ec1b3033c6527903854024e
|
a844cba1a0cd54c650b640a7a5cbeabb8c2d15a5
|
/modules/debugger/modules.py
|
952d7b44e0a87252905c2dcc0c446df72cfd9ab7
|
[
"MIT"
] |
permissive
|
romain-tracktik/sublime_debugger
|
de5950d9f79fcfbe0407af4f89e15e91acb035aa
|
6ff71182fee427cfc0254a9d47679d7a6d1424f9
|
refs/heads/master
| 2020-09-13T12:06:54.544461 | 2019-11-16T09:51:55 | 2019-11-16T09:51:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,109 |
py
|
from ..typecheck import *
from ..import dap
from ..import core
from ..import ui
class Modules:
def __init__(self):
self.modules = [] #type: List[dap.Module]
self.on_updated = core.Event() #type: core.Event[None]
def on_module_event(self, event: dap.ModuleEvent) -> None:
if event.reason == dap.ModuleEvent.new:
self.modules.append(event.module)
self.on_updated()
return
if event.reason == dap.ModuleEvent.new:
# FIXME: NOT IMPLEMENTED
return
if event.reason == dap.ModuleEvent.new:
# FIXME: NOT IMPLEMENTED
return
def clear_session_date(self) -> None:
self.modules.clear()
self.on_updated()
class ModulesView(ui.Block):
def __init__(self, modules: Modules):
super().__init__()
self.modules = modules
def added(self, layout: ui.Layout):
self.on_updated_handle = self.modules.on_updated.add(self.dirty)
def removed(self):
self.on_updated_handle.dispose()
def render(self) -> ui.Panel.Children:
items = []
for module in self.modules.modules:
items.append(
ui.block(
ui.Label(module.name)
)
)
return [
ui.Table(items=items)
]
|
[
"[email protected]"
] | |
551b428503874c903e41834e1c62952b6faaeea5
|
8baa6d8e35a17f331345d9f314cdb2787653d38a
|
/src/exojax/spec/exomol.py
|
b13173a276fb13d010cf9c32fe7d85bedb6157c2
|
[
"MIT"
] |
permissive
|
bmorris3/exojax
|
2fb1dae486a1d4d7a91ee8e9fdd1c9e616fb1b3f
|
67d1b6c868d69892d4bbf9e620ed05e432cfe61f
|
refs/heads/master
| 2023-09-04T20:12:32.817699 | 2021-06-12T06:14:00 | 2021-06-12T06:14:00 | 379,588,979 | 0 | 0 |
MIT
| 2021-06-23T12:03:57 | 2021-06-23T12:03:57 | null |
UTF-8
|
Python
| false | false | 1,356 |
py
|
import numpy as np
def Sij0(A,g,nu_lines,elower,QTref):
"""Reference Line Strength in Tref=296K, S0.
Note:
Tref=296K
Args:
A: Einstein coefficient (s-1)
g: the upper state statistical weight
nu_lines: line center wavenumber (cm-1)
elower: elower
QTref: partition function Q(Tref)
Mmol: molecular mass (normalized by m_u)
Returns:
Sij(T): Line strength (cm)
"""
ccgs=29979245800.0
hcperk=1.4387773538277202 #hc/kB in cgs
Tref=296.0
S0=-A*g*np.exp(-hcperk*elower/Tref)*np.expm1(-hcperk*nu_lines/Tref)\
/(8.0*np.pi*ccgs*nu_lines**2*QTref)
return S0
def gamma_exomol(P, T, n_air, alpha_ref):
"""gamma factor by a pressure broadening
Args:
P: pressure (bar)
T: temperature (K)
n_air: coefficient of the temperature dependence of the air-broadened halfwidth
alpha_ref: broadening parameter
Returns:
gamma: pressure gamma factor (cm-1)
"""
Tref=296.0 #reference tempearture (K)
gamma=alpha_ref*P*(Tref/T)**n_air
return gamma
def gamma_natural(A):
"""gamma factor by natural broadning
1/(4 pi c) = 2.6544188e-12 (cm-1 s)
Args:
A: Einstein A-factor (1/s)
Returns:
gamma_natural: natural width (cm-1)
"""
return 2.6544188e-12*A
|
[
"[email protected]"
] | |
ec9c417e630a5d1e8843d6a2e23386c2db157f4e
|
d136c10cdd556055717f8b4330066f56052688b5
|
/setup.py
|
49f3c1e51fc0701380fc53e434bee2a0568658c7
|
[
"CC0-1.0"
] |
permissive
|
biomodels/BIOMD0000000083
|
24cfbb23497e0dae2fa764e13bbdfef565bd2a51
|
e8221a507f10df4490c4c6dd004368c9384f2de6
|
refs/heads/master
| 2021-01-01T05:34:54.036641 | 2014-10-16T05:22:13 | 2014-10-16T05:22:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 377 |
py
|
from setuptools import setup, find_packages
setup(name='BIOMD0000000083',
version=20140916,
description='BIOMD0000000083 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000083',
maintainer='Stanley Gu',
maintainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
[
"[email protected]"
] | |
8817047c2c71be9cdae859f54be02ae569fe144c
|
724ae861f52fedc9d57f39c30971fab0114dc34b
|
/cms_content/cms_app.py
|
1f95945cb2b4dac8bdf406ce033d0d0148078a6c
|
[
"BSD-3-Clause"
] |
permissive
|
mmlic/django-cms-content
|
36faa82f5303ba151d1f5542d895bbf65aec753a
|
d6b214542ab618b6fa4645483fa5bf58e8212f86
|
refs/heads/master
| 2021-01-16T18:06:01.393986 | 2010-09-18T14:54:45 | 2010-09-18T14:54:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 362 |
py
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms_content.menu import CMSContentMenu
class CMSContentApp(CMSApp):
name = _(u"CMS Content App")
urls = ["cms_content.urls"]
menus = [CMSContentMenu]
apphook_pool.register(CMSContentApp)
|
[
"[email protected]"
] | |
871af0fab6d27935b3a9d6894e5b69448e205e49
|
e7d4b6fcace1509d37359776d9f72020dad3da41
|
/part010/ch08_basemap/sec2_draw/test_2_plot_x_x.py
|
955139e8b3ec761b1aa180217c7fb55b866cb87f
|
[] |
no_license
|
LinyunGH/book_python_gis
|
b422e350222c4ab5711efb4cc6101e229bd26f7b
|
067d75e324c006e2098827ac16ba38d4894b8a21
|
refs/heads/master
| 2020-04-09T22:25:35.049625 | 2018-10-14T09:56:38 | 2018-10-14T09:56:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,862 |
py
|
# -*- coding: utf-8 -*-
print('=' * 40)
print(__file__)
from helper.textool import get_tmp_file
################################################################################
from mpl_toolkits.basemap import Basemap
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
para = {'projection': 'merc',
'lat_0': 0, 'lon_0': 120,
'resolution': 'h', 'area_thresh': .1,
'llcrnrlon': 116, 'llcrnrlat': 36.6,
'urcrnrlon': 124, 'urcrnrlat': 40.2 }
my_map = Basemap(**para)
my_map.drawcoastlines(); my_map.drawmapboundary()
################################################################################
lon = 121.60001; lat = 38.91027
x, y = my_map(lon, lat)
my_map.plot(x, y, 'bo', markersize=12)
# plt.show()
plt.savefig(get_tmp_file(__file__, '1'), bbox_inches='tight', dpi=600)
plt.savefig(get_tmp_file(__file__, '1', file_ext='pdf'), bbox_inches='tight', dpi=600)
plt.clf()
################################################################################
my_map = Basemap(**para)
my_map.drawcoastlines(); my_map.drawmapboundary()
lons = [121.60001, 121.38617, 117.19723]
lats = [38.91027, 37.53042, 39.12473]
x, y = my_map(lons, lats)
################################################################################
my_map.plot(x, y, 'bo', markersize=10)
# plt.show()
plt.savefig(get_tmp_file(__file__, '2'), bbox_inches='tight', dpi=600)
plt.savefig(get_tmp_file(__file__, '2', file_ext='pdf'), bbox_inches='tight', dpi=600)
plt.clf()
################################################################################
my_map = Basemap(**para)
my_map.drawcoastlines(); my_map.drawmapboundary()
my_map.plot(x, y, marker=None,color='m')
# plt.show()
plt.savefig(get_tmp_file(__file__, '3'), bbox_inches='tight', dpi=600)
plt.savefig(get_tmp_file(__file__, '3', file_ext='pdf'), bbox_inches='tight', dpi=600)
plt.clf()
|
[
"[email protected]"
] | |
662c71161cfd8d0510f97315e3f4b811738fdcf3
|
c1c7214e1f9230f19d74bb9776dac40d820da892
|
/examples/django/model一般/FilePathFieldの使い方の例/project/app/views.py
|
862e10440074a4c00591ebc229e8546a7d8428c2
|
[] |
no_license
|
FujitaHirotaka/djangoruler3
|
cb326c80d9413ebdeaa64802c5e5f5daadb00904
|
9a743fbc12a0efa73dbc90f93baddf7e8a4eb4f8
|
refs/heads/master
| 2020-04-01T13:32:28.078110 | 2018-12-13T00:39:56 | 2018-12-13T00:39:56 | 153,256,642 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 412 |
py
|
from django.shortcuts import render
import os
from .forms import *
from pathlib import Path
import re
from ajax.views import z
#この部分は本編とは関係なし
########################
d=z()
########################
def index(request):
d["form"]=Form
d["form2"]=Form2
d["form3"]=Form3
d["form4"]=Form4
d["form5"]=Form5
return render(request, 'app/index.html', d)
|
[
"[email protected]"
] | |
4e633c267ce51f581b210db672c0ed1041e02ffd
|
37ff29a9a83eafbf0f54e2ce0bf2c0255b1663a1
|
/build/husky_control/catkin_generated/generate_cached_setup.py
|
d7af4a5285b20e1200840d1b9135e75b800eadd2
|
[] |
no_license
|
wy7727/husky
|
f8d9c2a05487f66efbfb58e8fc1c141efc10e177
|
7925bc34ae316639aef88fc3e6a8d36aba12620b
|
refs/heads/master
| 2020-04-09T12:09:41.420418 | 2019-12-01T09:24:24 | 2019-12-01T09:24:24 | 160,337,603 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,353 |
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/ying/wy_ws/devel;/home/ying/px4/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/ying/wy_ws/devel/.private/husky_control/env.sh')
output_filename = '/home/ying/wy_ws/build/husky_control/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"[email protected]"
] | |
24ff4cfbe9ebc07dc6bf91e7a7bdf56035b30726
|
b5402b40b69244380bc0d3f85ff65483d0505181
|
/mongodb/factory/execode.py
|
c462619c70d66714d3b5f4d8d1e6279769e79df5
|
[
"MIT"
] |
permissive
|
RxJellyBot/Jelly-Bot
|
ea7b6bd100431736732f9f4cc739858ec148e3e2
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
refs/heads/master
| 2023-08-29T20:41:01.813945 | 2021-10-20T05:27:21 | 2021-10-20T05:27:21 | 189,347,226 | 5 | 1 |
MIT
| 2020-09-05T00:50:41 | 2019-05-30T04:47:48 |
Python
|
UTF-8
|
Python
| false | false | 7,976 |
py
|
"""Execode-related data controllers."""
from datetime import timedelta
from typing import Type, Optional, Tuple
from bson import ObjectId
from django.http import QueryDict # pylint: disable=wrong-import-order
from extutils.dt import now_utc_aware
from flags import Execode, ExecodeCompletionOutcome, ExecodeCollationFailedReason
from models import ExecodeEntryModel, Model
from models.exceptions import ModelConstructionError
from mongodb.utils import ExtendedCursor
from mongodb.exceptions import NoCompleteActionError, ExecodeCollationError
from mongodb.helper import ExecodeCompletor, ExecodeRequiredKeys
from mongodb.factory.results import (
EnqueueExecodeResult, CompleteExecodeResult, GetExecodeEntryResult,
OperationOutcome, GetOutcome, WriteOutcome
)
from JellyBot.systemconfig import Database
from ._base import BaseCollection
from .mixin import GenerateTokenMixin
__all__ = ("ExecodeManager",)
DB_NAME = "execode"
class _ExecodeManager(GenerateTokenMixin, BaseCollection):
token_length = ExecodeEntryModel.EXECODE_LENGTH
token_key = ExecodeEntryModel.Execode.key
database_name = DB_NAME
collection_name = "main"
model_class = ExecodeEntryModel
def build_indexes(self):
self.create_index(ExecodeEntryModel.Execode.key, name="Execode", unique=True)
self.create_index(ExecodeEntryModel.Timestamp.key,
name="Timestamp (for TTL)", expireAfterSeconds=Database.ExecodeExpirySeconds)
def enqueue_execode(self, root_uid: ObjectId, execode_type: Execode, data_cls: Type[Model] = None,
**data_kw_args) \
-> EnqueueExecodeResult:
"""
Enqueue an Execode action.
:param root_uid: user to execute the enqueued Execode
:param execode_type: type of the execode
:param data_cls: model class of the additional data class
:param data_kw_args: arguments to construct the model
:return: enqueuing result
"""
execode = self.generate_hex_token()
now = now_utc_aware(for_mongo=True)
if not data_cls and data_kw_args:
return EnqueueExecodeResult(WriteOutcome.X_NO_MODEL_CLASS)
if data_cls:
try:
data = data_cls(**data_kw_args).to_json()
except ModelConstructionError as ex:
return EnqueueExecodeResult(WriteOutcome.X_INVALID_MODEL, ex)
else:
data = {}
if execode_type == Execode.UNKNOWN:
return EnqueueExecodeResult(WriteOutcome.X_UNKNOWN_EXECODE_ACTION)
model, outcome, ex = self.insert_one_data(
CreatorOid=root_uid, Execode=execode, ActionType=execode_type, Timestamp=now, Data=data)
return EnqueueExecodeResult(
outcome, ex, model, execode, now + timedelta(seconds=Database.ExecodeExpirySeconds))
def get_queued_execodes(self, root_uid: ObjectId) -> ExtendedCursor[ExecodeEntryModel]:
"""
Get the queued Execodes of ``root_uid``.
:param root_uid: user OID to get the queued Execodes
:return: a cursor yielding queued Execodes of the user
"""
filter_ = {ExecodeEntryModel.CreatorOid.key: root_uid}
return ExtendedCursor(self.find(filter_), self.count_documents(filter_), parse_cls=ExecodeEntryModel)
def get_execode_entry(self, execode: str, action: Optional[Execode] = None) -> GetExecodeEntryResult:
"""
Get the entry of an Execode action.
Limits the result to only return the Execode with the action type ``action`` if it is not ``None``.
:param execode: code of the Execode
:param action: action of the Execode
:return: result of getting the Execode
"""
cond = {ExecodeEntryModel.Execode.key: execode}
if action:
cond[ExecodeEntryModel.ActionType.key] = action
ret: ExecodeEntryModel = self.find_one_casted(cond)
if not ret:
if self.count_documents({ExecodeEntryModel.Execode.key: execode}) > 0:
return GetExecodeEntryResult(GetOutcome.X_EXECODE_TYPE_MISMATCH)
return GetExecodeEntryResult(GetOutcome.X_NOT_FOUND_ABORTED_INSERT)
return GetExecodeEntryResult(GetOutcome.O_CACHE_DB, model=ret)
def remove_execode(self, execode: str):
"""
Delete the Execode entry.
:param execode: execode of the entry to be deleted
"""
self.delete_one({ExecodeEntryModel.Execode.key: execode})
def _attempt_complete(self, execode: str, tk_model: ExecodeEntryModel, execode_kwargs: QueryDict) \
-> Tuple[OperationOutcome, Optional[ExecodeCompletionOutcome], Optional[Exception]]:
cmpl_outcome = ExecodeCompletionOutcome.X_NOT_EXECUTED
ex = None
try:
cmpl_outcome = ExecodeCompletor.complete_execode(tk_model, execode_kwargs)
if cmpl_outcome.is_success:
outcome = OperationOutcome.O_COMPLETED
self.remove_execode(execode)
else:
outcome = OperationOutcome.X_COMPLETION_FAILED
except NoCompleteActionError as e:
outcome = OperationOutcome.X_NO_COMPLETE_ACTION
ex = e
except ExecodeCollationError as e:
if e.err_code == ExecodeCollationFailedReason.MISSING_KEY:
outcome = OperationOutcome.X_MISSING_ARGS
else:
outcome = OperationOutcome.X_COLLATION_ERROR
ex = e
except Exception as e:
outcome = OperationOutcome.X_COMPLETION_ERROR
ex = e
return outcome, cmpl_outcome, ex
def complete_execode(self, execode: str, execode_kwargs: dict, action: Optional[Execode] = None) \
-> CompleteExecodeResult:
"""
Finalize the pending Execode.
:param execode: execode of the action to be completed
:param execode_kwargs: arguments may be needed to complete the Execode action
:param action: type of the Execode action
"""
ex = None
tk_model: Optional[ExecodeEntryModel] = None
# Force type to be dict because the type of `execode_kwargs` might be django QueryDict
if isinstance(execode_kwargs, QueryDict):
execode_kwargs = execode_kwargs.dict()
if not execode:
outcome = OperationOutcome.X_EXECODE_EMPTY
return CompleteExecodeResult(outcome, None, None, set(), ExecodeCompletionOutcome.X_NOT_EXECUTED)
# Not using self.find_one_casted for catching `ModelConstructionError`
get_execode = self.get_execode_entry(execode, action)
if get_execode.success:
tk_model = get_execode.model
# Check for missing keys
if missing_keys := ExecodeRequiredKeys.get_required_keys(tk_model.action_type).difference(execode_kwargs):
return CompleteExecodeResult(OperationOutcome.X_MISSING_ARGS, None, tk_model, missing_keys,
ExecodeCompletionOutcome.X_MISSING_ARGS)
try:
outcome, cmpl_outcome, ex = self._attempt_complete(execode, tk_model, execode_kwargs)
except ModelConstructionError as e:
outcome = OperationOutcome.X_CONSTRUCTION_ERROR
cmpl_outcome = ExecodeCompletionOutcome.X_MODEL_CONSTRUCTION
ex = e
else:
cmpl_outcome = ExecodeCompletionOutcome.X_EXECODE_NOT_FOUND
if get_execode.outcome == GetOutcome.X_NOT_FOUND_ABORTED_INSERT:
outcome = OperationOutcome.X_EXECODE_NOT_FOUND
elif get_execode.outcome == GetOutcome.X_EXECODE_TYPE_MISMATCH:
outcome = OperationOutcome.X_EXECODE_TYPE_MISMATCH
else:
outcome = OperationOutcome.X_ERROR
return CompleteExecodeResult(outcome, ex, tk_model, set(), cmpl_outcome)
ExecodeManager = _ExecodeManager()
|
[
"[email protected]"
] | |
bc311155799542ad602305eb319bcfe862940578
|
f37978530be6cf40bd7b4e5dbaf63f779114ff95
|
/src/bioregistry/curation/add_descriptions_from_gs.py
|
04f3759ae75bb8aaa0a3fc81e7d23e8b6bb18533
|
[
"MIT",
"CC0-1.0",
"CC-PDDC",
"CC-BY-4.0"
] |
permissive
|
biopragmatics/bioregistry
|
03d983e96b65681352d0eddbe39902059d299e6d
|
a05af7e42f60109f01133e3072bb673423b74dd3
|
refs/heads/main
| 2023-08-30T21:02:44.854342 | 2023-08-30T01:10:16 | 2023-08-30T01:10:16 | 319,481,281 | 77 | 28 |
MIT
| 2023-09-12T08:21:24 | 2020-12-08T00:33:21 |
Python
|
UTF-8
|
Python
| false | false | 940 |
py
|
# -*- coding: utf-8 -*-
"""Add descriptions from a google curation sheet."""
import click
import pandas as pd
import bioregistry
URL = (
"https://docs.google.com/spreadsheets/d/e/2PACX-1vQVw4odnZF34f267p9WqdQOhi"
"Y9tewD-jbnATgpi5W9smbkemvbOcVZSdeboXknoWxDhPyvtcxUYiQO/pub?gid=1947246172&single=true&output=tsv"
)
@click.command()
def main():
"""Add descriptions from a google curation sheet."""
df = pd.read_csv(URL, sep="\t")
del df[df.columns[0]]
df = df[df.description.notna()]
df = df[df["prefix"].map(lambda p: bioregistry.get_description(p) is None)]
df = df[df["prefix"].map(lambda p: bioregistry.get_obofoundry_prefix(p) is None)]
click.echo(df.to_markdown())
r = dict(bioregistry.read_registry())
for prefix, description in df[["prefix", "description"]].values:
r[prefix].description = description
bioregistry.write_registry(r)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
e1e3e0a6195e8962484a4fa4111f09eb936c7802
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/classes/_each148.py
|
382a04d946964f99cf0e1924c7cf471beca627c8
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,113 |
py
|
from xcp2k.inputsection import InputSection
class _each148(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Just_energy': 'JUST_ENERGY', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Xas_scf': 'XAS_SCF', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Rot_opt': 'ROT_OPT', 'Cell_opt': 'CELL_OPT', 'Band': 'BAND', 'Ep_lin_solver': 'EP_LIN_SOLVER', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Replica_eval': 'REPLICA_EVAL', 'Bsse': 'BSSE', 'Shell_opt': 'SHELL_OPT', 'Tddft_scf': 'TDDFT_SCF'}
|
[
"[email protected]"
] | |
f43afaa4f0016dfe69be9baf4880dfa31bc9e26b
|
85e50dc8487701f0c15c72141681c849021d9826
|
/news/scripts/indiatoday.py
|
0dc1c52a98c0c909ed68bcbfa7521e59391e3c62
|
[] |
no_license
|
sadakchap/news-aggregator
|
4c46a9b3ab18cf4c693dff2c71c9c7fc58f1ee1c
|
dbd7e031d783f9cc9cf9e652be8b70d73d53d9cb
|
refs/heads/master
| 2022-12-09T12:34:43.929043 | 2021-06-02T01:46:30 | 2021-06-02T01:46:30 | 199,251,857 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 880 |
py
|
from bs4 import BeautifulSoup
import requests
from news.models import NewsBox
requests.packages.urllib3.disable_warnings()
url = 'https://www.indiatoday.in/'
source = requests.get(url).text
soup = BeautifulSoup(source, "lxml")
news_box = soup.find('ul', class_='itg-listing')
# print(news_box.prettify())
def indiatoday():
for news_story in news_box.find_all('li')[:7]:
news_link = url + news_story.find('a').get('href')
img_src = None
news_title = news_story.find('a').text
if not NewsBox.objects.filter(news_link=news_link).exists():
news = NewsBox()
news.src_name = 'India Today'
news.src_link = url
news.title = news_title
news.news_link = news_link
news.img = img_src
news.save()
# print(news_link)
# print(news_title)
# print('*'*80)
|
[
"[email protected]"
] | |
53f066b9b58f4908fa80f7b05ad0314541a78b2f
|
d83120a187421256f4a59e7dec582657b8b8bb88
|
/starter/.history/backend/flaskr/__init___20210711040044.py
|
96f30636e696400be6d39026aec94776b5b72129
|
[] |
no_license
|
saraalmuraytib/Trivia
|
2dc382cf0c124d673cad699cb01563ca87389d38
|
f88e21ac04681f4131c737f1674dcde32731071c
|
refs/heads/main
| 2023-06-30T04:52:08.612654 | 2021-08-03T15:33:50 | 2021-08-03T15:33:50 | 384,743,716 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,468 |
py
|
import os
from flask import Flask, request, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import random
from models import setup_db, Question, Category
QUESTIONS_PER_PAGE = 10
def paginate_questions(request, selection):
page = request.args.get('page', 1, type=int)
start = (page - 1) * QUESTIONS_PER_PAGE
end = start + QUESTIONS_PER_PAGE
questions = [question.format() for question in selection]
current_questions = questions[start:end]
return current_questions
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
setup_db(app)
'''
@TODO 1: Set up CORS. Allow '*' for origins. Delete the sample route after completing the TODOs
'''
CORS(app)
#CORS(app, resources={'/': {'origins': '*'}})
'''
@TODO 2: Use the after_request decorator to set Access-Control-Allow
'''
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
'''
@TODO 3:
Create an endpoint to handle GET requests
for all available categories.
'''
@app.route('/categories')
def get_categories():
categories = Category.query.order_by(Category.type).all()
if len(categories) == 0:
abort(404)
return jsonify({
'success': True,
'categories': {category.id: category.type for category in categories}
})
'''
@TODO 4:
Create an endpoint to handle GET requests for questions,
including pagination (every 10 questions).
This endpoint should return a list of questions,
number of total questions, current category, categories.
TEST: At this point, when you start the application
you should see questions and categories generated,
ten questions per page and pagination at the bottom of the screen for three pages.
Clicking on the page numbers should update the questions.
'''
@app.route('/questions')
def get_questions():
selection = Question.query.order_by(Question.id).all()
current_questions = paginate_questions(request, selection)
categories = Category.query.order_by(Category.type).all()
if len(current_questions) == 0:
abort(404)
'''
This endpoint should return a list of questions,
number of total questions, current category, categories
'''
return jsonify({
'success': True,
'questions': current_questions,
'total_questions': len(selection),
'categories': {category.id: category.type for category in categories},
'current_category': None
})
'''
@TODO 5:
Create an endpoint to DELETE question using a question ID.
TEST: When you click the trash icon next to a question, the question will be removed.
This removal will persist in the database and when you refresh the page.
'''
@app.route('/questions/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
try:
question = Question.query.filter(
Question.id == question_id).one_or_none()
if question is None:
abort(404)
question.delete()
return jsonify({
'success': True,
'deleted': question_id
})
except:
abort(422)
'''
@TODO 6:
Create an endpoint to POST a new question,
which will require the question and answer text,
category, and difficulty score.
TEST: When you submit a question on the "Add" tab,
the form will clear and the question will appear at the end of the last page
of the questions list in the "List" tab.
'''
@app.route('/questions', methods=['POST'])
def create_question():
body = request.get_json()
new_question = body.get('question')
new_answer = body.get('answer')
new_category = body.get('category')
new_difficulty = body.get('difficulty')
try:
question = Question(question=new_question, answer=new_answer,
category=new_category,difficulty=new_difficulty)
question.insert()
return jsonify({
'success': True,
'created': question.id,
})
except:
abort(422)
'''
@TODO 7:
Create a POST endpoint to get questions based on a search term.
It should return any questions for whom the search term
is a substring of the question.
TEST: Search by any phrase. The questions list will update to include
only question that include that string within their question.
Try using the word "title" to start.
'''
'''
@TODO 8:
Create a GET endpoint to get questions based on category.
TEST: In the "List" tab / main screen, clicking on one of the
categories in the left column will cause only questions of that
category to be shown.
'''
'''
@TODO 9:
Create a POST endpoint to get questions to play the quiz.
This endpoint should take category and previous question parameters
and return a random questions within the given category,
if provided, and that is not one of the previous questions.
TEST: In the "Play" tab, after a user selects "All" or a category,
one question at a time is displayed, the user is allowed to answer
and shown whether they were correct or not.
'''
'''
@TODO 10:
Create error handlers for all expected errors
including 404 and 422.
'''
return app
|
[
"[email protected]"
] | |
ef3d8382444a8442f31cd305561d3477ba1a01b4
|
4ff8676136167cdd81d7a983272102fff86360e8
|
/python/258. 各位相加.py
|
138d139307262136c271371b5e43d5a1c038538f
|
[] |
no_license
|
geniuscynic/leetcode
|
0ec256af2377d19fee22ce736462a7e95e3f4e67
|
379a8f27f8213951ee8be41bd56598036995d267
|
refs/heads/master
| 2023-07-19T07:22:20.001770 | 2021-09-07T14:50:40 | 2021-09-07T14:50:40 | 297,277,833 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,290 |
py
|
import sys
from collections import defaultdict
from collections import Counter
from collections import deque
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def addDigits(self, num: int) -> int:
res = num
while res >= 10:
num = res
res = 0
while num != 0:
res += num % 10
num = num // 10
return res
def coverttoTree():
ls =deque([6,2,8,0,4,7,9,None,None,3,5])
temp = TreeNode(ls.popleft())
res = deque()
res.append(temp)
while ls:
left = ls.popleft()
right = ls.popleft()
node = res.popleft()
#print(node.val, left, right)
if left != None:
node.left = TreeNode(left)
res.append(node.left)
if right != None:
node.right = TreeNode(right)
res.append(node.right)
return temp
if __name__ == "__main__":
solution = Solution()
nums1 = 38
m = TreeNode(2)
nums2 = TreeNode(4)
n = 3
result = solution.addDigits(nums1)
#print(solution.ls)
print(result)
|
[
"[email protected]"
] | |
c4eed199172881acd25a5d986044c3aded598837
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/2D_20200722174654.py
|
d47af0911f58fcf03a29cbe21f4c5d543a6f60ef
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,036 |
py
|
def array(n,m):
# where n is row size and m is column size
array = [[0 for x in range(n)] for x in range(m)]
print(array)
a = [[2, 4, 6, 8, 10], [3, 6, 9, 12, 15], [4, 8, 12, 16, 20]]
# where the first arguement reps the row and second arguement reps the column
print(a[0][3])
def hourGlass(arr):
# you have a 2d array
# get max hour glass
# var maxCount to keep record of the max count
# what do you know about an hourglass
# the indicies fall in a pattern where
# i and i+2 are not equal to 0 and i + 1 is equal to 0
maxCount = 1
totalCount = 0
count = 0
j = 3
if arr !=[]:
for i in range(len(arr)):
# remember j is looping through arr[i]
for j in range(len(arr[i])):
print(arr[i][j],arr[i])
else:
return 0
print(hourGlass([[1,1,1,0,0,0],[0,1,0,0,0,0],[1,1,1,0,0,0],[0,0,2,4,4,0],[0,0,0,2,0,0],[0,0,1,2,4,0]]))
|
[
"[email protected]"
] | |
74f652c82ed09864bfc29d35dfe58397eefec789
|
613d8e4af67407c8e95effb1759b9ffca5246cd3
|
/oc_stats/common.py
|
c9028eedcbcc4e889573a69a6a85ae4c9a630def
|
[] |
no_license
|
old-castle-fansubs/stats
|
c0286af1c66d82165a526b4a307c79235da6807a
|
dd814252918d1d5f08af90ec0d39b008249639a1
|
refs/heads/master
| 2021-07-21T15:14:22.622202 | 2021-07-18T15:02:35 | 2021-07-18T15:02:35 | 189,878,638 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,049 |
py
|
import dataclasses
import typing as T
from datetime import date, datetime, timedelta
from pathlib import Path
PROJ_DIR = Path(__file__).parent
ROOT_DIR = PROJ_DIR.parent
DATA_DIR = ROOT_DIR / "data"
CACHE_DIR = DATA_DIR / "cache"
STATIC_DIR = PROJ_DIR / "static"
def json_default(obj: T.Any) -> T.Any:
if dataclasses.is_dataclass(obj):
return dataclasses.asdict(obj)
if isinstance(obj, (datetime, date)):
return obj.isoformat()
if isinstance(obj, timedelta):
return obj.total_seconds()
return None
def convert_to_diffs(
items: dict[date, T.Union[int, float]]
) -> dict[date, T.Union[int, float]]:
ret: dict[date, T.Union[int, float]] = {}
if not items:
return ret
prev_key = list(items.keys())[0]
prev_value = None
for key, value in sorted(items.items(), key=lambda kv: kv[0]):
if prev_value is not None:
if abs((key - prev_key).days) <= 1:
ret[key] = value - prev_value
prev_key = key
prev_value = value
return ret
|
[
"[email protected]"
] | |
1afc24adfe7a79c184a08009fa6d8f3031d965d5
|
b02a759c59a9d469f5fee00b3775703c5405e1b2
|
/16.RecommenderSystems/test_evaluation_model.py
|
0697d622f761ff08d2ef829abbb01ea9d92d7d8e
|
[] |
no_license
|
asdlei99/MachineLearningCombat
|
a18672e11306d26cc59b9bb16ee90db06af24df5
|
57e8dafcef849eb407bc79a0b0724abc9931bd27
|
refs/heads/master
| 2021-09-15T12:50:24.531567 | 2018-06-02T01:30:27 | 2018-06-02T01:30:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,754 |
py
|
import random
import math
def SplitData(data, M, k, seed):
test = []
train = []
random.seed(seed)
for user, item in data:
if random.randint(0, M) == k:
test.append([user, item])
else:
train.append([user, item])
return train, test
# 准确率
def Precision(train, test, N):
hit = 0
all = 0
for user in train.keys():
tu = test[user]
rank = GetRecommendation(user, N)
for item, pui in rank:
if item in tu:
hit += 1
all += N
return hit / (all * 1.0)
# 召回率
def Recall(train, test, N):
hit = 0
all = 0
for user in train.keys():
tu = test[user]
rank = GetRecommendation(user, N)
for item, pui in rank:
if item in tu:
hit += 1
all += len(tu)
return hit / (all * 1.0)
# 覆盖率
def Coverage(train, test, N):
recommend_items = set()
all_items = set()
for user in train.keys():
for item in train[user].keys():
all_items.add(item)
rank = GetRecommendation(user, N)
for item, pui in rank:
recommend_items.add(item)
return len(recommend_items) / (len(all_items) * 1.0)
# 新颖度
def Popularity(train, test, N):
item_popularity = dict()
for user, items in train.items():
for item in items.keys():
if item not in item_popularity:
item_popularity[item] = 0
item_popularity[item] += 1
ret = 0
n = 0
for user in train.keys():
rank = GetRecommendation(user, N)
for item, pui in rank:
ret += math.log(1 + item_popularity[item])
n += 1
ret /= n * 1.0
return ret
|
[
"[email protected]"
] | |
924614ca77686fbed9c16d7c46a7bc9e6c37e0a0
|
6471dcdf07fc0bdcde9797914857d154d85e3c07
|
/pie_app/bSerial.py
|
8f5c20566aaf7756b96301008a6c2c9f609a675b
|
[] |
no_license
|
cudmore/pie
|
e43ec3c4c95acb5a051a25a8d5549071908ed5c8
|
b74b105bc101a8504453d20a066fcd764864731f
|
refs/heads/master
| 2023-05-15T01:51:54.326274 | 2023-05-09T02:17:11 | 2023-05-09T02:17:11 | 139,335,434 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,915 |
py
|
"""
Author: Robert H Cudmore
Date: 20180808
"""
import time, threading, serial, queue
import logging
logger = logging.getLogger('flask.app')
#########################################################################
class mySerialThread(threading.Thread):
"""
background thread which monitors inSerialQueue and sends commands out serial.
"""
def __init__(self, inSerialQueue, outSerialQueue, errorSerialQueue, port, baud):
threading.Thread.__init__(self)
self._stop_event = threading.Event()
self.inSerialQueue = inSerialQueue
self.outSerialQueue = outSerialQueue
self.errorSerialQueue = errorSerialQueue
self.port = port #'/dev/ttyACM0'
self.baud = baud #115200
logger.debug('mySerialThread initializing, port:' + str(port) + ' baud:' + str(baud))
self.mySerial = None
try:
# there is no corresponding self.mySerial.close() ???
self.mySerial = serial.Serial(port, baud, timeout=0.25)
except (serial.SerialException) as e:
logger.error(str(e))
errorSerialQueue.put(str(e))
except:
logger.error('other exception in mySerialThread init')
raise
#else:
# errorSerialQueue.put('None')
def stop(self):
"""
call stop() then join() to ensure thread is done
"""
self._stop_event.set()
def run(self):
logger.debug('starting mySerialThread')
while not self._stop_event.is_set():
try:
# serialDict is {'type': command/dump, 'str': command/filePath}
serialDict = self.inSerialQueue.get(block=False, timeout=0)
except (queue.Empty) as e:
# there was nothing in the queue
pass
else:
# there was something in the queue
#logger.info('serialThread inSerialQueue: "' + str(serialCommand) + '"')
serialType = serialDict['type']
serialCommand = serialDict['str']
try:
if self.mySerial is not None:
if serialType == 'dump':
# dump a teensy/arduino trial to a file
fullSavePath = serialCommand
self.mySerial.write('d\n'.encode()) # write 'd\n'
#time.sleep(0.01)
resp = self.mySerial.readline().decode().strip()
with open(fullSavePath, 'w') as file:
while resp:
file.write(resp + '\n')
resp = self.mySerial.readline().decode().strip()
elif serialType == 'command':
# send a command to teensy and get one line response
if not serialCommand.endswith('\n'):
serialCommand += '\n'
self.mySerial.write(serialCommand.encode())
#time.sleep(0.01)
resp = self.mySerial.readline().decode().strip()
self.outSerialQueue.put(resp)
logger.info('serialThread outSerialQueue: "' + str(resp) + '"')
else:
logger.error('bad serial command type' + str(serialDict))
except (serial.SerialException) as e:
logger.error(str(e))
except:
logger.error('other exception in mySerialThread run')
raise
# make sure not to remove this
time.sleep(0.1)
|
[
"[email protected]"
] | |
3802507496894d4653a42e21e7b67071fb3f637a
|
d5e8610ad12b6733e5b014abba5cf356cb658a44
|
/network/client.py
|
cbcce48537f16c421516b99c718c8210d6b6b600
|
[
"WTFPL"
] |
permissive
|
EliasFarhan/GBJam
|
494c4bfcff1e2246001d489c3e60a6e9ddb6ae62
|
156a003378b8db994d4bd1f9c3a12415ceac7c01
|
refs/heads/master
| 2016-09-05T11:41:50.329113 | 2014-08-10T20:14:31 | 2014-08-10T20:14:31 | 22,541,009 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,869 |
py
|
import copy
import socket
from threading import Lock, Thread
from engine.const import log, CONST
from engine.vector import Vector2
players = {}
players_lock = Lock()
player_pos = Vector2()
player_anim_state = ""
player_anim_counter = 0
client_player_lock = Lock()
PORT = CONST.port
HOST = CONST.host
update_thread = None
self_id = 0
sock = None
def get_players():
global players,players_lock
players_lock.acquire()
tmp_players = copy.deepcopy(players)
players_lock.release()
return tmp_players
def get_player():
global player_pos, player_anim_counter, player_anim_state, client_player_lock
client_player_lock.acquire()
pos = copy.deepcopy(player_pos)
state = copy.deepcopy(player_anim_state)
frame = copy.deepcopy(player_anim_counter)
client_player_lock.release()
return pos, state, frame
def set_player(new_player):
global player_pos, player_anim_counter, player_anim_state, client_player_lock
from engine.init import engine
client_player_lock.acquire()
player_pos = new_player.pos + new_player.screen_relative_pos * engine.get_screen_size()
player_anim_state = new_player.anim.state
player_anim_counter = new_player.anim.anim_counter
client_player_lock.release()
def get_self_id():
global self_id
return self_id
def init():
global update_thread, self_id,sock
data = "ID_REQUEST;"
new_id_request = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
sock.sendall(data)
new_id_request = sock.recv(1024)
except socket.error as e:
sock.close()
sock = None
log("Network init: "+str(e),1)
return
self_id = new_id_request.split(";")[1]
get_thread = Thread(target=client_get)
get_thread.daemon = True
get_thread.start()
set_thread = Thread(target=client_set)
set_thread.daemon = True
set_thread.start()
def client_get():
log("START UPDATE SERVER")
from engine.init import engine
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while not engine.finish:
udp_sock.sendto("GET_REQUEST;%s"%self_id, (HOST, PORT+1))
get_player_data = udp_sock.recv(1024)
"""Position"""
parsed_data = get_player_data.split(';')
parsed_data[1] = parsed_data[1].split(',')
parsed_data[1] = Vector2(int(float(parsed_data[1][0])), int(float(parsed_data[1][1])))
"""Frame"""
parsed_data[3] = int(parsed_data[3])
"""update players position"""
players_lock.acquire()
players[parsed_data[0]] = parsed_data
players_lock.release()
def client_set():
log("START UPDATE SERVER")
from engine.init import engine
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while not engine.finish:
(pos, state, frame) = get_player()
if state != "":
udp_sock.sendto("SET_REQUEST;"+str(self_id)+";"
+pos.get_string() +";"
+state+";"
+str(frame)+";"
, (HOST, PORT+2))
def set_request(pos, state, frame):
global sock
"""Change the position of the player on the server"""
"""Set correct pos, state, frame"""
try:
if not sock:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
sock.sendall("SET_REQUEST;"+str(self_id)+";"+pos.get_string() +";"+state+";"+str(frame)+";")
sock.recv(1024)
except socket.error as e:
sock.close()
sock = None
log("Network set: "+str(e),1)
return
def get_players_request():
global sock
try:
if not sock:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
sock.sendall("GET_REQUEST;")
get_request_nmb = sock.recv(1024)
#log(get_request_nmb)
try:
nmb = int(get_request_nmb.split(';')[1])
sock.sendall("%i;"%nmb)
length = 5
for i in range(nmb):
get_request_data = sock.recv(1024)
#log(get_request_data)
"""Position"""
parsed_data = get_request_data.split(';')
parsed_data[1] = parsed_data[1].split(',')
parsed_data[1] = Vector2(int(float(parsed_data[1][0])), int(float(parsed_data[1][1])))
"""Frame"""
parsed_data[3] = int(parsed_data[3])
"""update players position"""
players[parsed_data[0]] = parsed_data
sock.sendall("NEXT")
except IndexError:
pass
except socket.error as e:
sock.close()
sock = None
log("Network get: "+str(e),1)
return
|
[
"[email protected]"
] | |
670e3279efdafed42ae531d9129bbbdd829e0b5a
|
b472c692ac9efc39e508d2709fe14e2b4e844fd7
|
/Python/test.py
|
818eb63debb2c34cbca6a32dc628f5e91c3de1f1
|
[] |
no_license
|
enningxie/Interview
|
75ac734873282dc67503815019718a6e1b27c512
|
becdd40463c01551f2c8a53abc9d2281979f2bc1
|
refs/heads/master
| 2020-03-18T08:13:00.049201 | 2018-12-25T01:02:45 | 2018-12-25T01:02:45 | 134,496,872 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 216 |
py
|
from collections import defaultdict
if __name__ == '__main__':
d_dict = defaultdict()
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
for i, value in enumerate(nums1):
d_dict[i] = value
d_dict.pop()
|
[
"[email protected]"
] | |
ef5cedf36af3d5382bcecb579ae28b374f22bd7d
|
7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3
|
/atcoder/arc/arc006/c.py
|
cb6d240266da7927722159dc9328cf026bb5874f
|
[] |
no_license
|
roiti46/Contest
|
c0c35478cd80f675965d10b1a371e44084f9b6ee
|
c4b850d76796c5388d2e0d2234f90dc8acfaadfa
|
refs/heads/master
| 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 332 |
py
|
# -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll
N = int(raw_input())
a = []
for i in xrange(N):
w = int(raw_input())
for j in xrange(len(a)):
if a[j] >= w:
a[j] = w
break
else:
a.append(w)
a = sorted(a)
print len(a)
|
[
"[email protected]"
] | |
784fa6abdaea42679c74b37c9104130a6c8ab49a
|
ee7e42417d9d1e76b0e84e44dc6eb037adc3ebad
|
/.history/manage_20190703133205.py
|
97195c48fcefe2c35d0dc6fce4c318d0b570fa33
|
[] |
no_license
|
web3-qa/pets-api
|
4632127ee84a299f207d95754f409fc1e4c0013d
|
ee4a04e7291740ac8eb6147c305b41d27d5be29c
|
refs/heads/master
| 2023-05-12T09:09:47.509063 | 2019-07-18T15:07:13 | 2019-07-18T15:07:13 | 197,611,701 | 0 | 0 | null | 2023-05-01T19:42:17 | 2019-07-18T15:19:59 |
Python
|
UTF-8
|
Python
| false | false | 429 |
py
|
import os
import sys
from os import path
from xmlrpc.client import Server
from virtualenv import create_bootstrap_script
from application import create_bootstrap_script
from flask_script import Manager, Server
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'..')))
app = create_app()
manager = Manager(app)
manager.add_command("runserver", Server
use_debugger = True,
use_reloader = True,
)
|
[
"[email protected]"
] | |
3e0f44d691a846473e3bd241080b059dfc70b086
|
8fd56e9b9dbc49c16b4a8afe1007f824183bb0ab
|
/Python_Stack/django/django_fundamentals/django_intro/dojo_and_ninjas/dojo_and_ninjas/settings.py
|
f0d9e05a39974011a73b5ec3195f1ab742aa2ea1
|
[] |
no_license
|
DiyarBarham/CodingDojo
|
b1cc7d7355f5fb139cb640168f78d6b7f91e372a
|
0891e2c41ddbb9004eadfd2d54fe7f34d6d4ef58
|
refs/heads/main
| 2023-07-08T12:12:33.227932 | 2021-08-07T13:55:33 | 2021-08-07T13:55:33 | 363,878,740 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,293 |
py
|
"""
Django settings for dojo_and_ninjas project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-53wty_=t8u#0m8n^_@cd965s9q2u-cj=wrylu)y0ty#e9=@#z_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'dojo_and_ninjas_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dojo_and_ninjas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dojo_and_ninjas.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"[email protected]"
] | |
8f629fd50ba1ec120f55b90d665fc66b65f77590
|
07a783c06bb4bb059e8c38589fe3f9bfc5a14b22
|
/tests/builtins/test_iter.py
|
b06f6fbbe28a6ac4114a49d71dc32bf850c37594
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
vishalsodani/batavia
|
a3c79b0342069fe6387eb3d7cc3ac3f4947d1842
|
690e5093da6653456381466e5fb9c153c295cb6b
|
refs/heads/master
| 2021-01-22T08:10:07.777012 | 2016-09-03T15:14:52 | 2016-09-03T15:14:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 395 |
py
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class IterTests(TranspileTestCase):
pass
class BuiltinIterFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["iter"]
not_implemented = [
'test_bytearray',
'test_bytes',
'test_complex',
'test_dict',
'test_frozenset',
'test_NotImplemented',
]
|
[
"[email protected]"
] | |
dbfee9b9af6cb812be348a4573cc993fd8d52d08
|
c7a6f8ed434c86b4cdae9c6144b9dd557e594f78
|
/ECE364/.PyCharm40/system/python_stubs/348993582/gtk/_gtk/TreeSortable.py
|
37cf02c4e1bb9f31abf693a5bf90d28a3cd5e6da
|
[] |
no_license
|
ArbalestV/Purdue-Coursework
|
75d979bbe72106975812b1d46b7d854e16e8e15e
|
ee7f86145edb41c17aefcd442fa42353a9e1b5d1
|
refs/heads/master
| 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,309 |
py
|
# encoding: utf-8
# module gtk._gtk
# from /usr/lib64/python2.6/site-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.136
# no doc
# imports
import atk as __atk
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
class TreeSortable(__gobject.GInterface):
# no doc
@classmethod
def do_has_default_sort_func(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def do_set_sort_column_id(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def do_sort_column_changed(cls, *args, **kwargs): # real signature unknown
pass
def get_sort_column_id(self, *args, **kwargs): # real signature unknown
pass
def has_default_sort_func(self, *args, **kwargs): # real signature unknown
pass
def set_default_sort_func(self, *args, **kwargs): # real signature unknown
pass
def set_sort_column_id(self, *args, **kwargs): # real signature unknown
pass
def set_sort_func(self, *args, **kwargs): # real signature unknown
pass
def sort_column_changed(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__gtype__ = None # (!) real value is ''
|
[
"[email protected]"
] | |
31a19fe8af08abd87b739ef20aafc4f44fe5f260
|
97c5fe6a54636de9b056719ea62ac1de4e76ebdc
|
/src/newsletter/views.py
|
04f05109ae7071108c1fdf1cfd58ac97df81bd14
|
[
"MIT"
] |
permissive
|
EdwardBetts/matchmaker
|
937ece7acbfd1fcb57ab59cd13b16c3cd67d54f3
|
ec56d18c6af8ca904325deca3be56484d3415c70
|
refs/heads/master
| 2020-12-11T01:50:10.773983 | 2016-01-26T16:53:29 | 2016-01-26T16:53:29 | 56,478,725 | 0 | 0 | null | 2016-04-18T05:11:12 | 2016-04-18T05:11:12 | null |
UTF-8
|
Python
| false | false | 3,301 |
py
|
from django.conf import settings
from django.core.mail import send_mail
from django.shortcuts import render, get_object_or_404
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import ModelFormMixin
from likes.models import Like
from matches.models import Match
from profiles.models import UserJob
from questions.mixins import PostQuestionMixin
from questions.models import UserAnswer, Question, LEVELS
from questions.forms import QuestionForm
from .forms import ContactForm, SignUpForm
from .models import SignUp
class homeTemplateView(PostQuestionMixin, ModelFormMixin, TemplateView):
template_name = "home.html"
form_class = QuestionForm
def get_context_data(self, *args, **kwargs):
if self.request.user.is_authenticated():
self.object = Question.objects.get_unanswered_questions(self.request.user).order_by("?").first()
positions = []
locations = []
employers = []
context = super(homeTemplateView, self).get_context_data(*args, **kwargs)
matches, users = Match.objects.get_match_all(self.request.user)[:6]
jobs = UserJob.objects.filter(user__in=users).order_by("?")[:6]
user_like = get_object_or_404(Like, user=self.request.user)
context["answers"] = UserAnswer.objects.filter(
user=self.request.user,
question=self.object).first()
if jobs:
for job in jobs:
if job.position not in positions:
positions.append(job.position)
if job.location not in locations:
locations.append(job.location)
if job.employer not in employers:
employers.append(job.employer)
context["positions"] = positions
context["locations"] = locations
context["employers"] = employers
context["liked_users"] = user_like.liked_users.all()
context["matches_list"] = matches
context["question_object"] = self.object
context["levels"] = LEVELS
context["form"] = self.form_class
return context
def contact(request):
title = 'Contact Us'
title_align_center = True
form = ContactForm(request.POST or None)
if form.is_valid():
form_email = form.cleaned_data.get("email")
form_message = form.cleaned_data.get("message")
form_full_name = form.cleaned_data.get("full_name")
subject = 'Site contact form'
from_email = settings.EMAIL_HOST_USER
to_email = [from_email, '[email protected]']
contact_message = "%s: %s via %s" % (
form_full_name,
form_message,
form_email)
some_html_message = """
<h1>hello</h1>
"""
send_mail(subject,
contact_message,
from_email,
to_email,
html_message=some_html_message,
fail_silently=True)
context = {
"form": form,
"title": title,
"title_align_center": title_align_center,
}
return render(request, "forms.html", context)
|
[
"[email protected]"
] | |
84aaf4120a0a6a01012a2a5dcf06b0d75f9c3de5
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/_MY_ORGS/Web-Dev-Collaborative/blog-research/Data-Structures/1-Python/strings/contain_string.py
|
67056fed67317b8f05ae54f52aee5108734c2c45
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 |
MIT
| 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null |
UTF-8
|
Python
| false | false | 651 |
py
|
"""
Implement strStr().
Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
Example 1:
Input: haystack = "hello", needle = "ll"
Output: 2
Example 2:
Input: haystack = "aaaaa", needle = "bba"
Output: -1
Reference: https://leetcode.com/problems/implement-strstr/description/
"""
def contain_string(haystack, needle):
if len(needle) == 0:
return 0
if len(needle) > len(haystack):
return -1
for i in range(len(haystack)):
if len(haystack) - i < len(needle):
return -1
if haystack[i:i+len(needle)] == needle:
return i
return -1
|
[
"[email protected]"
] | |
675ae4611cdb22f5676993b3c7c77fdad3196c7b
|
e638e9fda0e672fa9a414515d0c05a24ab55ad38
|
/FindPeakElement.py
|
8ff51d79a507758cc2e9a7895bd046e1d669a131
|
[] |
no_license
|
zjuzpz/Algorithms
|
8d1c7d50429aa5540eb817dc5495a20fc3f11125
|
2df1a58aa9474f2ecec2ee7c45ebf12466181391
|
refs/heads/master
| 2021-01-21T05:55:48.768728 | 2020-08-04T22:44:08 | 2020-08-04T22:44:08 | 44,586,024 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,221 |
py
|
"""
162. Find Peak Element
A peak element is an element that is greater than its neighbors.
Given an input array where num[i] ≠ num[i+1], find a peak element and return its index.
The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.
You may imagine that num[-1] = num[n] = -∞.
For example, in array [1, 2, 3, 1], 3 is a peak element
and your function should return the index number 2.
"""
# O(logn)
# O(1)
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return
lower, upper = 0, len(nums) - 1
while lower < upper:
mid = (lower + upper) // 2
if mid == 0:
if len(nums) == 1 or nums[mid] > nums[mid + 1]:
return mid
return mid + 1
if nums[mid] > nums[mid - 1]:
if mid == len(nums) - 1 or nums[mid] > nums[mid + 1]:
return mid
lower = mid + 1
else:
upper = mid - 1
return lower
if __name__ == "__main__":
print(Solution().findPeakElement([1,2,3,1]))
|
[
"[email protected]"
] | |
84e04268feae1b1a5487fcbd2eaeda1fadbcb044
|
978248bf0f275ae688f194593aa32c267832b2b6
|
/xlsxwriter/test/table/test_table04.py
|
b4fda1d6dc147b5f25e5321e06cd322bd6883955
|
[
"BSD-2-Clause-Views"
] |
permissive
|
satish1337/XlsxWriter
|
b0c216b91be1b74d6cac017a152023aa1d581de2
|
0ab9bdded4f750246c41a439f6a6cecaf9179030
|
refs/heads/master
| 2021-01-22T02:35:13.158752 | 2015-03-31T20:32:28 | 2015-03-31T20:32:28 | 33,300,989 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,867 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...table import Table
from ...worksheet import Worksheet
from ...workbook import WorksheetMeta
from ...sharedstrings import SharedStringTable
class TestAssembleTable(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
worksheet.add_table('C3:F13', {'autofilter': False})
worksheet._prepare_tables(1)
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F13" totalsRowShown="0">
<tableColumns count="4">
<tableColumn id="1" name="Column1"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Column4"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
[
"[email protected]"
] | |
b9da67d441c8a8ea04914a68e4e11e3566b32dde
|
9ca55981d3245d87d45debce8e9825b60db43046
|
/chemicals/thermal_conductivity.pyi
|
75e0492a449dc884cb25dbc4ed2080c24b0a1a97
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
CalebBell/chemicals
|
c6b1ebd409c32e0e1053c4f97668a8ebcc92b969
|
37e32a7c7f819e0cb8e2a8784f8448f68b9a4215
|
refs/heads/master
| 2023-07-25T23:34:17.754310 | 2023-07-25T02:00:14 | 2023-07-25T02:00:14 | 264,697,738 | 137 | 33 |
MIT
| 2022-06-05T18:21:02 | 2020-05-17T15:27:11 |
Python
|
UTF-8
|
Python
| false | false | 2,921 |
pyi
|
# DO NOT EDIT - AUTOMATICALLY GENERATED BY tests/make_test_stubs.py!
from typing import List
from pandas.core.frame import DataFrame
from typing import (
List,
Optional,
Union,
)
def Bahadori_gas(T: float, MW: int) -> float: ...
def Bahadori_liquid(T: float, M: int) -> float: ...
def Chung(T: float, MW: float, Tc: float, omega: float, Cvm: float, mu: float) -> float: ...
def Chung_dense(
T: float,
MW: float,
Tc: float,
Vc: float,
omega: float,
Cvm: float,
Vm: float,
mu: float,
dipole: float,
association: float = ...
) -> float: ...
def DIPPR9B(
T: float,
MW: float,
Cvm: float,
mu: float,
Tc: Optional[float] = ...,
chemtype: Optional[str] = ...
) -> float: ...
def DIPPR9G(T: float, P: float, Tc: float, Pc: float, kl: float) -> float: ...
def DIPPR9H(ws: List[float], ks: List[float]) -> float: ...
def DIPPR9I(zs: List[float], Vms: List[float], ks: List[float]) -> float: ...
def Eli_Hanley(T: float, MW: float, Tc: float, Vc: float, Zc: float, omega: float, Cvm: float) -> float: ...
def Eli_Hanley_dense(
T: float,
MW: float,
Tc: float,
Vc: float,
Zc: float,
omega: float,
Cvm: float,
Vm: float
) -> float: ...
def Eucken(MW: float, Cvm: float, mu: float) -> float: ...
def Eucken_modified(MW: float, Cvm: float, mu: float) -> float: ...
def Filippov(ws: List[float], ks: List[float]) -> float: ...
def Gharagheizi_gas(T: float, MW: float, Tb: float, Pc: float, omega: float) -> float: ...
def Gharagheizi_liquid(T: int, M: int, Tb: int, Pc: float, omega: float) -> float: ...
def Lakshmi_Prasad(T: float, M: int) -> float: ...
def Lindsay_Bromley(
T: float,
ys: List[float],
ks: List[float],
mus: List[float],
Tbs: List[float],
MWs: List[float]
) -> float: ...
def Mersmann_Kind_thermal_conductivity_liquid(T: int, MW: float, Tc: float, Vc: float, na: int) -> float: ...
def Missenard(T: float, P: float, Tc: float, Pc: float, kl: float) -> float: ...
def Nicola(T: int, M: float, Tc: float, Pc: float, omega: float) -> float: ...
def Nicola_original(T: int, M: float, Tc: float, omega: float, Hfus: int) -> float: ...
def Sato_Riedel(T: int, M: int, Tb: int, Tc: int) -> float: ...
def Sheffy_Johnson(T: int, M: int, Tm: int) -> float: ...
def Stiel_Thodos_dense(T: float, MW: float, Tc: float, Pc: float, Vc: float, Zc: float, Vm: float, kg: float) -> float: ...
def Wassiljewa_Herning_Zipperer(
zs: List[float],
ks: List[float],
MWs: Union[List[int], List[float]],
MW_roots: Optional[List[float]] = ...
) -> float: ...
def __getattr__(name: str) -> DataFrame: ...
def _load_k_data() -> None: ...
def k_IAPWS(
T: float,
rho: float,
Cp: Optional[float] = ...,
Cv: Optional[float] = ...,
mu: Optional[float] = ...,
drho_dP: Optional[float] = ...
) -> float: ...
__all__: List[str]
|
[
"[email protected]"
] | |
47c2d02951db7604b35e5da5d690abc1744355f0
|
5cb8b2e8496a2e6d0cfa7f2bae26d43bce263632
|
/example2.py
|
820d5006d40bf4e7f280e7882c49bd00a38c06f4
|
[] |
no_license
|
Malak-Abdallah/Intro_to_python
|
3745786e499c8d6a95c414d3e5d87d27e5332039
|
9dbd8fa6e52b3056ef5406ab1337291feefad8da
|
refs/heads/master
| 2023-06-25T09:09:16.709645 | 2021-07-28T12:26:18 | 2021-07-28T12:26:18 | 383,780,163 | 0 | 1 | null | 2021-07-26T13:02:32 | 2021-07-07T11:48:29 |
Python
|
UTF-8
|
Python
| false | false | 306 |
py
|
if __name__ == '__main__':
x=list(map(int,input().split(" ")))
lists={}
for i in range(x[0]):
lists[i]=list(map(int,input().split(" ")))
lists[i].sort(reverse=True)
num=0
for i in range(x[0]):
num = num+ lists[i][0] ** 2
if num <x[1]:
print(num)
|
[
"[email protected]"
] | |
efde0bd1158f2af6861f6ccd43d34cadf581acf1
|
c0e7f3e1801bfd99c5fc86067ea552a8436c47dd
|
/junk_drawer/store.py
|
91bbab4f943ccdcd3bb220b99dccdc8e8a27896d
|
[] |
no_license
|
Opentrons/junk-drawer
|
27a463053dbb45e56902f3d8286f57f931631f59
|
32ca381f754489b476e26fbf18001bbf98881ea9
|
refs/heads/main
| 2023-08-29T06:11:09.028658 | 2021-10-08T18:03:20 | 2021-10-08T18:03:20 | 298,059,402 | 0 | 0 | null | 2023-05-23T16:17:06 | 2020-09-23T18:26:53 |
Python
|
UTF-8
|
Python
| false | false | 5,183 |
py
|
"""Store module for junk_drawer."""
from __future__ import annotations
from logging import getLogger
from typing import Optional
from .read_store import SCHEMA_VERSION_KEY, ReadStore, ModelT
from .filesystem import (
PathNotFoundError,
RemoveFileError,
FileEncodeError,
FileWriteError,
)
log = getLogger(__name__)
class Store(ReadStore[ModelT]):
"""A Store is used to create, read, update, and delete items in a collection."""
async def put(self, item: ModelT, key: Optional[str] = None) -> Optional[str]:
"""
Put a single item to the store.
Returns the key of the added item. If `ignore_errors` is set to `True`,
`put` will return None if the item was unable to be added.
"""
item_key = self._get_item_key(item, key)
key_path = self._get_key_path(item_key)
try:
await self._filesystem.write_json(
key_path, item, encode_json=self.encode_json
)
return item_key
except (FileWriteError, FileEncodeError) as error:
self._maybe_raise_file_error(error)
return None
def put_sync(self, item: ModelT, key: Optional[str] = None) -> Optional[str]:
"""
Put a single item to the store.
Synchronous version of :py:meth:`put`.
"""
item_key = self._get_item_key(item, key)
key_path = self._get_key_path(item_key)
try:
self._filesystem.sync.write_json(
key_path, item, encode_json=self.encode_json
)
return item_key
except (FileWriteError, FileEncodeError) as error:
self._maybe_raise_file_error(error)
return None
async def ensure(self, default_item: ModelT, key: Optional[str] = None) -> ModelT:
"""
Ensure an item exists in the store at the given key.
If an item with `key` already exists, `ensure` will return the item. If
no item with `key` exists, it will write `default_item` to the store
before returning the item.
This method is a shortcut for a `get` followed by a `put` if the `get`
returns `None`.
"""
item_key = self._get_item_key(default_item, key)
result = await self.get(item_key)
if result is None:
await self.put(default_item, key)
result = default_item
return result
def ensure_sync(self, default_item: ModelT, key: Optional[str] = None) -> ModelT:
"""
Ensure an item exists in the store at the given key.
Synchronous version of :py:meth:`ensure`.
"""
item_key = self._get_item_key(default_item, key)
result = self.get_sync(item_key)
if result is None:
self.put_sync(default_item, key)
result = default_item
return result
async def delete(self, key: str) -> Optional[str]:
"""
Delete a single item in the store.
Returns the deleted key if the item was removed or None if no item was
found at that key. If `ignore_errors` is set, delete will also return
None if the item is unable to be removed.
"""
key_path = self._get_key_path(key)
try:
await self._filesystem.remove(key_path)
return key
except (PathNotFoundError, RemoveFileError) as error:
self._maybe_raise_file_error(error)
return None
def delete_sync(self, key: str) -> Optional[str]:
"""
Delete a single item in the store.
Synchronous version of :py:meth:`delete`.
"""
key_path = self._get_key_path(key)
try:
self._filesystem.sync.remove(key_path)
return key
except (PathNotFoundError, RemoveFileError) as error:
self._maybe_raise_file_error(error)
return None
async def delete_store(self) -> None:
"""Delete the store and all its items."""
return await self._filesystem.remove_dir(self._directory)
def delete_store_sync(self) -> None:
"""
Delete the store and all its items.
Synchronous version of :py:meth:`delete_store`.
"""
return self._filesystem.sync.remove_dir(self._directory)
def encode_json(self, item: ModelT) -> str:
"""Encode a model instance into JSON."""
obj = item.dict()
obj[SCHEMA_VERSION_KEY] = len(self._migrations)
# NOTE(mc, 2020-10-25): __json_encoder__ is an undocumented property
# of BaseModel, but its usage here is to ensure Pydantic model config
# related to serialization is properly used. This functionality is
# covered by basic integration tests
return item.__config__.json_dumps(obj, default=item.__json_encoder__)
def parse_json(self, data: str) -> ModelT:
"""Decode a string into a model instance."""
obj = self._schema.__config__.json_loads(data)
schema_version = obj.pop(SCHEMA_VERSION_KEY, 0)
for migrate in self._migrations[schema_version:]:
obj = migrate(obj)
return self._schema.parse_obj(obj)
|
[
"[email protected]"
] | |
375dc7a9f08e87def6b9d83af33b3624c9f7ab69
|
56df6683865fd9319b389afd6dd4a922299da593
|
/source/scripts/python/host/source/host.py.in
|
0e4829aa2a05b70be869733b69853d892d4ff567
|
[
"Python-2.0",
"GPL-2.0-or-later",
"MPL-1.1",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MPL-2.0",
"Ruby",
"BSD-2-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
metacall/core
|
4f36fe0b13924853aab6d0f053285b649398cc1d
|
419ffb573b17501c91662f0f161032bb19ea1ab3
|
refs/heads/develop
| 2023-08-23T10:19:30.898387 | 2023-08-10T18:39:08 | 2023-08-10T18:39:08 | 163,221,062 | 1,391 | 167 |
Apache-2.0
| 2023-09-13T23:49:43 | 2018-12-26T22:02:57 |
C
|
UTF-8
|
Python
| false | false | 957 |
in
|
#!/usr/bin/env python3
import sys
# Insert Python Port folder first in the system path list
sys.path.insert(0, '@PROJECT_METACALL_PORTS_DIRECTORY@')
from metacall import metacall, metacall_load_from_memory
script = '''#!/usr/bin/env node
'use strict';
const path = require('path');
/* Load MetaCall addon */
const addon = (() => {
try {
/* This forces metacall port to be run always by metacall cli */
return process._linkedBinding('node_loader_port_module');
} catch (e) {
console.error('MetaCall failed to load, probably you are importing this file from NodeJS directly.');
console.error('You should use MetaCall CLI instead. Install it from: https://github.com/metacall/install');
throw e;
}
})();
function b() {
return addon.metacall('c');
}
module.exports = {
b
};
'''
metacall_load_from_memory('node', script)
def a():
result = metacall('b')
print('Result call from b:')
print(result)
return result
def c():
return 3.0
|
[
"[email protected]"
] | |
53d2fbfd9f3c99ec42a32fc5ee87f71345a8cd07
|
14e7058adf766352a0b90b66b7dcf887105a481c
|
/djangoappengine/settings_base.py
|
b62b7cbd0c0dfb09d8af41eeb26eecfd9cb6af34
|
[
"BSD-2-Clause"
] |
permissive
|
brunogamacatao/portalsaladeaula
|
2b7f07f07c2518dd359f043483fbb27417f62aaf
|
9429e485aa37ffea3208339a807032e9230a3c84
|
refs/heads/master
| 2020-12-29T01:42:18.594281 | 2012-06-22T12:24:44 | 2012-06-22T12:24:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,102 |
py
|
# Initialize App Engine SDK if necessary
try:
from google.appengine.api import api_proxy_stub_map
except ImportError:
from .boot import setup_env
setup_env()
from djangoappengine.utils import on_production_server, have_appserver
DEBUG = not on_production_server
TEMPLATE_DEBUG = DEBUG
ROOT_URLCONF = 'urls'
DATABASES = {
'default': {
'ENGINE': 'djangoappengine.db',
},
}
if on_production_server:
EMAIL_BACKEND = 'djangoappengine.mail.AsyncEmailBackend'
else:
EMAIL_BACKEND = 'djangoappengine.mail.EmailBackend'
PREPARE_UPLOAD_BACKEND = 'djangoappengine.storage.prepare_upload'
SERVE_FILE_BACKEND = 'djangoappengine.storage.serve_file'
DEFAULT_FILE_STORAGE = 'djangoappengine.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangoappengine.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
CACHE_BACKEND = 'memcached://?timeout=0'
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
if not on_production_server:
INTERNAL_IPS = ('127.0.0.1',)
|
[
"[email protected]"
] | |
080bcf39abb2b1192174b56c122775222dc094e5
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Pygame/pygame_widgets/widgets/holder.py
|
96b94e5e4579dd18844513a989799935872a599d
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:72853c292faeedcdb3d5218c2cef2738641f5b2b222c1b069415a537cc49181f
size 7770
|
[
"[email protected]"
] | |
af1a0b1e3fbc5532f301616c7de79889ed3c1f13
|
338298474c517e28d9a214c3525b9709625fa438
|
/YouWeesh/Controllers/RegisterController.py
|
e1c6418416713ae4a61fcdd2a707f2d628d8db50
|
[] |
no_license
|
vincehar/Backend
|
f5a8f0e264de2ba7ccadba3bce015f3a30e9c478
|
fb143c6c70cb65018d0436bf5b891cb72620208d
|
refs/heads/master
| 2023-07-06T10:37:50.057555 | 2023-06-28T13:04:36 | 2023-06-28T13:04:36 | 73,698,999 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,284 |
py
|
from base64 import b64decode
from django.core.files.base import ContentFile
from django.http import Http404
from mongoengine.django.auth import User
from rest_framework.decorators import api_view, renderer_classes, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from YouWeesh.Models.Address import Address
from YouWeesh.Models.Preferences import Preferences
from YouWeesh.Models.SocialNetworks import SocialNetworks
from YouWeesh.Models.Users import Users
from YouWeesh.Tools.app import App
@api_view(('POST',))
@permission_classes((AllowAny,))
@renderer_classes((JSONRenderer,))
def registeruser(request):
username = request.POST['username']
password = request.POST['password']
email = request.POST['email'].lower();
lastname = request.POST['lastname']
firstname = request.POST['firstname']
socialnetwork = request.POST['socialnetwork']
pictureBase64 = request.POST['picture']
home_town = 'Geneve'#request.POST['home_town']
picturedata = b64decode(pictureBase64)
socialnetworkObject = SocialNetworks.objects.get(label=socialnetwork)
u=User.objects.create(username=username, email=email, first_name=firstname, last_name=lastname)
if socialnetwork == 'Youweesh':
u.set_password(password)
u.save()
preferences = Preferences()
preferences.save()
if home_town != "":
addr = Address()
addr.city = home_town
addr.getorUpdateCoordinates()
addr.save()
users = Users.objects.create(user=u, social_network=socialnetworkObject, address=addr, preferences=preferences)
else:
users = Users.objects.create(user=u, social_network=socialnetworkObject, preferences=preferences)
if socialnetwork == 'Facebook' or socialnetwork == 'Twitter':
users.picture.replace(ContentFile(picturedata))
users.save()
return Response(True)
@api_view(('POST',))
@permission_classes((AllowAny,))
@renderer_classes((JSONRenderer,))
def registerFCMToken(request):
try:
connected_user = App.getCurrentUser(request)
connected_user.update_fcm_token(request.POST['fcmToken'])
except connected_user.DoesNotExist:
raise Http404('Not logged')
return Response(True)
|
[
"[email protected]"
] | |
1fa1bec403921087904bbafbee13cec85e2e510f
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/trapping_20200617172315.py
|
650a589856ffb33697b3724367c5a84d8aea7bf7
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 459 |
py
|
def trap(arr):
# left = max(arr)
# copy = []
# for j in arr:
# copy.append(j)
# arr.remove(left)
# right = max(arr)
# total = 0
# print(copy)
# for i in range(len(copy)-1):
# total += min(left,right) - copy[i]
# print(min(left,right),"-",copy[i],"==",total)
# print (total)
res = 0
trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1])
|
[
"[email protected]"
] | |
bd4a9a56ca71e397b6a266f1919c1626b4d31214
|
5390d79dad71ad0d9ff9d0777435dcaf4aad16b3
|
/chapter_07/pizza_topping3.py
|
d3a2378353388256cf56dcd63ea0eaf942f43e2c
|
[] |
no_license
|
JasperMi/python_learning
|
19770d79cce900d968cec76dac11e45a3df9c34c
|
8111d0d12e4608484864dddb597522c6c60b54e8
|
refs/heads/master
| 2020-11-26T08:57:02.983869 | 2020-03-11T10:14:55 | 2020-03-11T10:14:55 | 218,935,548 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 262 |
py
|
prompt = "\nPlease input your pizza toppings:"
prompt += "\nEnter 'quit' to end the program. "
message = ""
while True:
pizza_topping = input(prompt)
if pizza_topping == 'quit':
break
else:
print("We'll add " + pizza_topping + ".")
|
[
"[email protected]"
] | |
691cf31fb1d7e831b764492ca883e2c4ccfdeb40
|
9a8ff03d27e4822fa27f78fb6ba1dd419725cf90
|
/home/urls.py
|
1411cf4d18bcdf2783251f6cd45251f3234642e5
|
[] |
no_license
|
Pavlo-Olshansky/Social-lemon
|
0f9f994fbbba78cd0e7defa1e7fcf60b6ed55165
|
3b7f0b9e8526f6c74d98ad38412151ea2678f808
|
refs/heads/master
| 2022-12-11T01:12:30.919023 | 2018-06-17T14:57:10 | 2018-06-17T14:57:10 | 104,945,732 | 0 | 0 | null | 2022-11-22T01:53:50 | 2017-09-26T22:48:52 |
Python
|
UTF-8
|
Python
| false | false | 2,089 |
py
|
from django.conf.urls import url, include
from . import views
from django.contrib.auth import views as auth_views
from .forms import CustomAuthForm
from django.contrib.auth.models import User
urlpatterns = [
# url(r'^$', views.HomePage.as_view(), name='home'),
# Register new user
url(r'^signup/', views.SignUp.as_view(), name='signup'),
# Login URL
url(r'^login/$', auth_views.login, {'template_name': 'registration/login.html', 'authentication_form': CustomAuthForm}, name='login'),
url(r'^$', auth_views.login,
{'template_name': 'home.html',
'authentication_form': CustomAuthForm,
'extra_context':
{'recommendations': views.recommendation_list }
}, name='home-login'),
# Logout URL
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
# Reset password
url(r'^password_reset/$', auth_views.password_reset, {'post_reset_redirect': '/password_reset/done/'}, name='password_reset'),
url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',auth_views.password_reset_confirm, {'post_reset_redirect': '/reset/done/'}, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
# Send an activation URL
url(r'^account_activation_sent/$', views.account_activation_sent, name='account_activation_sent'),
# Activation URL
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate, name='activate'),
# Profile URL's
url(r'^profile/$', views.ViewProfile.as_view(), name='view_profile'),
url(r'^profile/(?P<pk>\d+)/$', views.ViewProfile.as_view(), name='view_profile_with_pk'),
url(r'^profile/edit/$', views.edit_profile, name='edit_profile'),
url(r'^profile/password/$', views.ChangePassword.as_view(), name='change_password'),
]
|
[
"[email protected]"
] | |
824a8a299cdea984c99f9a2b32fe5eb4b4918082
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/traits/tests/test_container_events.py
|
7ccbe4fccef1ce095b0e9bfabb18e60297996524
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 |
Apache-2.0
| 2022-12-09T21:01:00 | 2019-04-18T03:57:00 |
CSS
|
UTF-8
|
Python
| false | false | 5,193 |
py
|
# ------------------------------------------------------------------------------
#
# Copyright (c) 2007, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# ------------------------------------------------------------------------------
"""
Tests for Dict items_changed events
"""
from __future__ import absolute_import, print_function
from traits.testing.unittest_tools import unittest
from traits.api import HasTraits, Dict
class MyClass(HasTraits):
""" A dummy HasTraits class with a Dict """
d = Dict({"a": "apple", "b": "banana", "c": "cherry", "d": "durian"})
def __init__(self, callback):
"The callback is called with the TraitDictEvent instance"
self.callback = callback
return
def _d_items_changed(self, event):
if self.callback:
self.callback(event)
return
class MyOtherClass(HasTraits):
""" A dummy HasTraits class with a Dict """
d = Dict({"a": "apple", "b": "banana", "c": "cherry", "d": "durian"})
class Callback:
"""
A stateful callback that gets initialized with the values to check for
"""
def __init__(self, obj, added={}, changed={}, removed={}):
self.obj = obj
self.added = added
self.changed = changed
self.removed = removed
self.called = False
return
def __call__(self, event):
if event.added != self.added:
print("\n\n******Error\nevent.added:", event.added)
else:
self.obj.assertEqual(event.added, self.added)
self.obj.assertEqual(event.changed, self.changed)
self.obj.assertEqual(event.removed, self.removed)
self.called = True
return
class DictEventTestCase(unittest.TestCase):
def test_setitem(self):
# overwriting an existing item
cb = Callback(self, changed={"c": "cherry"})
foo = MyClass(cb)
foo.d["c"] = "coconut"
self.assertTrue(cb.called)
# adding a new item
cb = Callback(self, added={"g": "guava"})
bar = MyClass(cb)
bar.d["g"] = "guava"
self.assertTrue(cb.called)
return
def test_delitem(self):
cb = Callback(self, removed={"b": "banana"})
foo = MyClass(cb)
del foo.d["b"]
self.assertTrue(cb.called)
return
def test_clear(self):
removed = MyClass(None).d.copy()
cb = Callback(self, removed=removed)
foo = MyClass(cb)
foo.d.clear()
self.assertTrue(cb.called)
return
def test_update(self):
update_dict = {"a": "artichoke", "f": "fig"}
cb = Callback(self, changed={"a": "apple"}, added={"f": "fig"})
foo = MyClass(cb)
foo.d.update(update_dict)
self.assertTrue(cb.called)
return
def test_setdefault(self):
# Test retrieving an existing value
cb = Callback(self)
foo = MyClass(cb)
self.assertEqual(foo.d.setdefault("a", "dummy"), "apple")
self.assertFalse(cb.called)
# Test adding a new value
cb = Callback(self, added={"f": "fig"})
bar = MyClass(cb)
self.assertTrue(bar.d.setdefault("f", "fig") == "fig")
self.assertTrue(cb.called)
return
def test_pop(self):
# Test popping a non-existent key
cb = Callback(self)
foo = MyClass(cb)
self.assertEqual(foo.d.pop("x", "dummy"), "dummy")
self.assertFalse(cb.called)
# Test popping a regular item
cb = Callback(self, removed={"c": "cherry"})
bar = MyClass(cb)
self.assertEqual(bar.d.pop("c"), "cherry")
self.assertTrue(cb.called)
return
def test_popitem(self):
foo = MyClass(None)
foo.d.clear()
foo.d["x"] = "xylophone"
cb = Callback(self, removed={"x": "xylophone"})
foo.callback = cb
self.assertEqual(foo.d.popitem(), ("x", "xylophone"))
self.assertTrue(cb.called)
return
def test_dynamic_listener(self):
foo = MyOtherClass()
# Test adding
func = Callback(self, added={"g": "guava"})
foo.on_trait_change(func.__call__, "d_items")
foo.d["g"] = "guava"
foo.on_trait_change(func.__call__, "d_items", remove=True)
self.assertTrue(func.called)
# Test removing
func2 = Callback(self, removed={"a": "apple"})
foo.on_trait_change(func2.__call__, "d_items")
del foo.d["a"]
foo.on_trait_change(func2.__call__, "d_items", remove=True)
self.assertTrue(func2.called)
# Test changing
func3 = Callback(self, changed={"b": "banana"})
foo.on_trait_change(func3.__call__, "d_items")
foo.d["b"] = "broccoli"
foo.on_trait_change(func3.__call__, "d_items", remove=True)
self.assertTrue(func3.called)
return
|
[
"[email protected]"
] | |
0330944a234507649832eb94badabbf3a9353faf
|
5a9194df7e40b1f9694576c88c536b24d22f548b
|
/tests/projects/test_delete_project.py
|
96d89108aa6b51da09f660ffdc8b7e8fd51e6b38
|
[] |
no_license
|
jamesstidard/Talk-Zoho
|
17230611e40e5c232dcd33bdbd5148ba20543810
|
3a918d72146dae1ed6bb8afee09dfe658a540048
|
refs/heads/master
| 2021-05-08T10:44:05.881154 | 2017-03-03T16:49:34 | 2017-03-03T16:49:34 | 119,862,940 | 0 | 0 | null | 2018-02-01T16:34:38 | 2018-02-01T16:34:38 | null |
UTF-8
|
Python
| false | false | 383 |
py
|
import pytest
from tests.projects.fixtures import * # noqa
@pytest.mark.gen_test
def test_cant_delete_user(projects, portal_id):
# Deleting user with wrong id always returns true (CRM API limitation)
# Pull projects down to lowest common denominator for unified interface.
success = yield projects.projects.delete('123456789', portal_id=portal_id)
assert success
|
[
"[email protected]"
] | |
1963788b916b4fec844fe1d1523a7cfee98a0955
|
bd109656f1ea18fe2eae9afffcc0074d75826bb9
|
/setup.py
|
55a44300afd9c9ab3918a9fb2a7ad146c8367a9b
|
[
"MIT"
] |
permissive
|
ffreemt/baidu-tr-async-free
|
4db5356e24e1ac818a6f641ccad7093113dd32ec
|
3bf422e8d8406123479c5bcdb679af795db0ba8f
|
refs/heads/master
| 2021-01-02T21:41:05.188476 | 2020-02-12T06:15:30 | 2020-02-12T06:15:30 | 239,812,578 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,407 |
py
|
''' setup '''
# pylint: disable=invalid-name
from pathlib import Path
import re
from setuptools import setup, find_packages
name = """baidu-tr-async-free"""
description = 'baidu translate for free with async and proxy support'
dir_name, *_ = find_packages()
# dir_name = 'bdtr_async'
curr_dir = Path(__file__).parent
_ = Path(f'{dir_name}/__init__.py').read_text(encoding='utf-8')
version, *_ = re.findall(r"__version__\W*=\W*'([^']+)'", _)
targz = 'v_' + version.replace('.', '') + '.tar.gz'
install_requires = ['httpx', 'loguru', 'google-sign']
README_rst = f'{curr_dir}/README.md'
long_description = (
open(README_rst, encoding='utf-8').read() if Path(README_rst).exists() else ''
)
setup(
name=name,
packages=find_packages(),
version=version,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
keywords=['machine translation', 'free', 'sign'],
author="mikeee",
url=fr'http://github.com/ffreemt/{name}',
download_url=fr'https://github.com/ffreemt/{name}/archive/' + targz,
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
license='MIT License',
)
|
[
"[email protected]"
] | |
c8e6f159a7813608572c6285f8a0b42c0a56fd09
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_213/61.py
|
cfe6796cc3346eddb991963bab839f4b05e02f73
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,296 |
py
|
# link: https://code.google.com/codejam/contest/5314486/dashboard#s=1
import string
import time
testIndex=2
problemRoot="d:/prog/versenyek/googlejam"
problemDir="2017/round2"
problemName="B"
inputFiles= ["-example.in", "-small.in", "-large.in"]
outputFiles=["-example.out", "-small.out", "-large.out"]
time1=time.time()
fileName=string.strip(problemRoot)+"/"+problemDir+"/"+problemName+inputFiles[testIndex]
inputData=[map(int,line.split()) for line in open(fileName,'r') if line.strip()]
fileName=string.strip(problemRoot)+"/"+problemDir+"/"+problemName+outputFiles[testIndex]
fileToWrite=open(fileName,'wb')
time2=time.time()
lineIdx=1
for case in xrange(inputData[0][0]):
n,c,m=inputData[lineIdx]
tick=[0]*n
cost=[0]*c
lineIdx+=1
for i in xrange(m):
tick[inputData[lineIdx+i][0]-1]+=1
cost[inputData[lineIdx+i][1]-1]+=1
lineIdx+=m
rides=max(cost) # the most ticket at one user
fstk=0
for i in xrange(n):
fstk+=tick[i]
rides=max(rides,(fstk-1)/(i+1)+1)
pro=0
for i in xrange(n):
if tick[i]>rides:
pro+=tick[i]-rides
fileToWrite.write("Case #"+str(case+1)+": "+str(rides)+" "+str(pro)+"\n")
fileToWrite.close()
print 'Total time: ', time.time() - time1
print 'Solving time: ', time.time() - time2
|
[
"[email protected]"
] | |
2dea5afe2da38332a9e1ae100dcd6b3750a2efc4
|
c6374029bcba930ab37098e8e954067aeae252d8
|
/mla/svm/kernerls.py
|
da289a183af1d605a4fb65c9a4a197ad9621ecda
|
[
"MIT"
] |
permissive
|
bhumikadhingra/MLAlgorithms
|
ab6f20aa8a899ff265668155cb4083ec19535429
|
8f002d0804663854eaec41b4ead698caaaf11c69
|
refs/heads/master
| 2020-08-07T09:12:36.194453 | 2019-10-07T13:15:12 | 2019-10-07T13:15:12 | 213,387,107 | 1 | 0 |
MIT
| 2019-10-07T13:13:09 | 2019-10-07T13:13:09 | null |
UTF-8
|
Python
| false | false | 721 |
py
|
# coding:utf-8
import numpy as np
import scipy.spatial.distance as dist
class Linear(object):
def __call__(self, x, y):
return np.dot(x, y.T)
def __repr__(self):
return "Linear kernel"
class Poly(object):
def __init__(self, degree=2):
self.degree = degree
def __call__(self, x, y):
return np.dot(x, y.T) ** self.degree
def __repr__(self):
return "Poly kernel"
class RBF(object):
def __init__(self, gamma=0.1):
self.gamma = gamma
def __call__(self, x, y):
x = np.atleast_2d(x)
y = np.atleast_2d(y)
return np.exp(-self.gamma * dist.cdist(x, y) ** 2).flatten()
def __repr__(self):
return "RBF kernel"
|
[
"[email protected]"
] | |
387523e464797ebfe8e34406b339dc22c29b74c0
|
69099b95bb1507b30e6be8d4ad1d39f923833e97
|
/BIRL_optimal_demos.py
|
737cf7525eced539ad6a481bc67d6068cbee618d
|
[
"MIT"
] |
permissive
|
dsbrown1331/aima-based-irl
|
80d8cc2eafd751bd84bdcda6ad5a9a44060947c6
|
553550030240ae886f4260ece59dd252adb1fc6e
|
refs/heads/master
| 2021-01-17T23:12:03.038606 | 2016-09-27T15:30:27 | 2016-09-27T15:30:27 | 67,889,747 | 1 | 0 | null | 2016-09-10T19:16:56 | 2016-09-10T19:16:55 | null |
UTF-8
|
Python
| false | false | 4,300 |
py
|
import numpy as np
import mdp
from my_birl_batch import *
from my_birl import *
from halfspace_uncertainty import *
from mdp_feature_counts import *
from optimal_teaching import *
from activeLearning import chain_variance
import operator
for size in range(3,4):
print "^^^^^^", size, "^^^^^^^"
f = open('active_results/optimalTest' + str(size)+ '2.txt','w')
for iter in range(10):
print "-----", iter, "------"
#generate a random n by n world
grid_width = size
grid_height = size
rand_reward = []
for row in range(grid_height):
temp = []
for col in range(grid_width):
temp.append(np.random.randint(-10,0))
rand_reward.append(temp)
rand_reward[0][0] = 10
###for debugging
#rand_reward = [[10.00, -5.00, -5.00],
#[-1.00, -1.00, -1.00 ]]
###
terminals=[(0,grid_height-1)]
init = []
for row in range(grid_height):
for col in range(grid_width):
if row == grid_height-1 and col == 0:
continue
init.append((col,row))
print "init"
print init
expert_mdp = mdp.GridMDP(deepcopy(rand_reward), terminals, init)
expert_mdp.print_rewards()
expert_mdp.print_arrows()
#try Cakmak's Task 1 with just one start to see if it gets the same demo
#birlToy = DeterministicWeightGridMDP(
# features = ['f0', 'f1', 'f2'],
# weights = {'f0': 10, 'f1': -5, 'f2': -1, None: None},
# grid = [['f0', 'f1', 'f1'],
# ['f2', 'f2', 'f2']],
# terminals=[(0,1)],
# init = [(0,0),(1,0),(1,1),(2,0),(2,1)], gamma = 0.9)
features = []
count = 0
for row in range(grid_height):
for col in range(grid_width):
features.append('f' + str(count))
count += 1
#print "features"
#print features
weights = {}
count = 0
for row in range(grid_height):
for col in range(grid_width):
#print row,col
weights[features[count]] = rand_reward[row][col]
count += 1
weights[None] = None
print "weights"
print weights
grid = []
count = 0
for row in range(grid_height):
temp = []
for col in range(grid_width):
temp.append(features[count])
count += 1
grid.append(temp)
#print "grid"
#print grid
#select random init state
demo_init = init[np.random.randint(0,len(init))]
print "demo_init"
print demo_init
#generate random demo
demo = []
expert_policy = best_policy(expert_mdp, value_iteration(expert_mdp, 0.001))
demo.append(mdp.generate_demonstration(demo_init, expert_policy, expert_mdp))
print "demo"
print demo
rand_task = DeterministicWeightGridMDP(
features, weights, grid, terminals, init, gamma = 0.95)
#rand_task.print_rewards()
#rand_task.print_arrows()
cakmak_optimal = seeded_optimal_teaching(demo,rand_task, 100000,10)
#print("solution: ", cakmak_optimal)
score, cakmak_demo = cakmak_optimal
cakmak_init = cakmak_demo[0][0]
print "cakmak", cakmak_init
#compare to BIRL active learning reward variance approach
chain_length = 12000
chain_burn = 2000
birl = BIRL_BATCH(demo, expert_mdp.get_grid_size(), expert_mdp.terminals, expert_mdp.init,
step_size=1.0, birl_iteration = chain_length)
chain, mapMDP = birl.run_birl()
chain_var = chain_variance(chain, chain_burn)
#find highest variance that's not start of demo or terminal state
chain_var.pop(terminals[0])
sorted_var = sorted(chain_var.items(), key=operator.itemgetter(1))
sorted_var.reverse()
query_states = [state for state, var in sorted_var]
print query_states
indx = query_states.index(cakmak_init)
print indx
f.write(str(indx) + '\n') # python will convert \n to os.linesep
f.close()
|
[
"[email protected]"
] | |
6e4e5e3c39abdfef03c473cadda68be2c7a10fa9
|
97072bdb023dd3916d0ced6aba1c98ec0893ee01
|
/tests/test_user.py
|
6c75c87d9a88a701e3f63fcc1efb7784b662cc2f
|
[
"MIT"
] |
permissive
|
AnumAsif/my-blogs
|
ed814d0559a1d84e138a02b846d2a2b85aacfebd
|
8dd6d8e9e84867582dad10265203d1219c00926c
|
refs/heads/master
| 2020-04-23T01:51:07.902859 | 2019-02-19T12:18:42 | 2019-02-19T12:18:42 | 170,826,439 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 482 |
py
|
import unittest
from app.models import User
class TestUserModel(unittest.TestCase):
def setUp(self):
self.user = User(password="anum123")
def test_password_setter(self):
self.assertTrue(self.user.password_hash is not None)
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.user.password
def test_password_verification(self):
self.assertTrue(self.user.verify_password('anum123'))
|
[
"[email protected]"
] | |
6416d21e330f6923f19a06e51308eeb8b9f4168b
|
f26d67e3e9f8b90e5d6243279a1c2ce87fa41d46
|
/tests/api/test_prodstats.py
|
c06973ae4a39c46587d14ccdcf139af25afd3c4a
|
[
"MIT"
] |
permissive
|
OCB-DS/prodstats
|
cf554e3abee651463e9f81606d4b633f464658a7
|
4ff5a6e0b0d6152af2d7e1f3844ede2d33ad4824
|
refs/heads/master
| 2022-11-25T15:30:06.988683 | 2020-08-02T16:08:05 | 2020-08-02T16:08:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,722 |
py
|
# import logging
# import pandas as pd
# import pytest
# import starlette.status as codes
# from db.models import ProdStat as Model
# from tests.utils import rand_str
# logger = logging.getLogger(__name__)
# pytestmark = pytest.mark.asyncio
# @pytest.fixture(scope="session")
# def prodstat_records(json_fixture):
# yield json_fixture("prodstats.json")
# @pytest.fixture
# def prodstat_df(prodstat_records):
# yield pd.DataFrame(prodstat_records).set_index(["api10", "prod_date"])
# @pytest.fixture(autouse=True)
# async def seed_prodstats(bind, prodstat_records):
# await Model.bulk_insert(prodstat_records)
# class TestPlaceEndpoint:
# path: str = "/api/v1/prodstats"
# async def test_create_prodstat(self, client):
# prodstat_name = "test"
# response = await client.post(self.path, json=[{"name": prodstat_name}])
# assert response.status_code == codes.HTTP_202_ACCEPTED
# async def test_list_prodstats(self, client):
# expected_record_count = 25
# response = await client.get(self.path)
# assert response.status_code == codes.HTTP_200_OK
# data = response.json()
# assert len(data) == expected_record_count
# assert response.links["next"] is not None
# async def test_get_prodstat(self, client):
# id = 20
# response = await client.get(f"{self.path}/{id}")
# assert response.status_code == codes.HTTP_200_OK
# data = response.json()
# assert data["id"] == 20
# async def test_update_exising_prodstat(self, client):
# id = 10
# value = rand_str(length=8)
# response = await client.put(f"{self.path}/{id}", json={"state": value})
# assert response.status_code == codes.HTTP_200_OK
# data = response.json()
# assert data["id"] == id
# assert data["state"] == value
# async def test_update_prodstat_not_found(self, client):
# id = 99999
# value = rand_str(length=8)
# response = await client.put(f"{self.path}/{id}", json={"state": value})
# assert response.status_code == codes.HTTP_404_NOT_FOUND
# async def test_delete_existing_prodstat(self, client):
# id = 20
# response = await client.delete(f"{self.path}/{id}")
# assert response.status_code == codes.HTTP_200_OK
# data = response.json()
# assert data["id"] == id
# async def test_delete_prodstat_not_found(self, client):
# id = 99999
# response = await client.delete(f"{self.path}/{id}")
# assert response.status_code == codes.HTTP_404_NOT_FOUND
# data = response.json()
# assert data["detail"] == "prodstat not found"
|
[
"[email protected]"
] | |
8e3b3c81c0c614f310d3cacfaea2b523e16773bf
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_344/ch46_2019_03_19_20_17_36_654772.py
|
5d9a674902235b456d5687c756b4218f596434d0
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 212 |
py
|
lista_palavras = []
palavra= input('Escreva uma palavra: ')
while palavra != 'fim':
lista_palvras.append(palavra)
palavra= input('Escreva outra palavra')
if palavra[0] = 'a':
print (palavra)
|
[
"[email protected]"
] | |
d5c5c47e59e9a5bc56b001df5aa50bcd31f4ad79
|
ab8117bc5b5040e5107fc59337fabc966cb062ba
|
/.history/twitter/engine_20200328094821.py
|
ee9b4bd9c70c405259d659015ba54699abc23b6b
|
[] |
no_license
|
mirfarzam/DownloaderBro
|
6019ab561c67a397135d0a1585d01d4c6f467df4
|
8e0a87dd1f768cfd22d24a7f8c223ce968e9ecb6
|
refs/heads/master
| 2022-04-16T15:31:38.551870 | 2020-04-15T17:36:26 | 2020-04-15T17:36:26 | 255,090,475 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,772 |
py
|
import tweepy
import datetime
import configparser
import time
config = configparser.ConfigParser()
config.read('credential.conf')
consumer_key = config['API']["API_key"]
consumer_secret = config['API']["API_secret_key"]
access_token = config['ACCESS']["Access_token"]
access_token_secret = config['ACCESS']["Access_token_secert"]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# api.verify_credentials()
def check_mentions(api, keywords, since_id):
new_since_id = since_id
for tweet in tweepy.Cursor(api.mentions_timeline,
since_id=since_id).items():
new_since_id = max(tweet.id, new_since_id)
if tweet.in_reply_to_status_id is None:
continue
main = (api.statuses_lookup([tweet.in_reply_to_status_id], include_entities=True ))[0]
try :
if 'media' in main.extended_entities:
for video in main.extended_entities['media'][0]['video_info']['variants']:
videos = {}
try:
print(f"{video['bitrate']} and is {video['url']}")
except:
print(f"Error in finding video in tweet id : {main.id}")
# if 'variants' in main.extended_entities['media'][0]:
# for video in main.extended_entities['media'][0]['variants']:
# if 'bitrate' in video:
# print(f"{video['bitrate']} and is {video['url']}")
except:
print(f"Cannot get Tweet video and tweet id is : {main.id}")
return new_since_id
since_id = 1
while True:
since_id = check_mentions(api, ["help", "support"], since_id)
time.sleep(5)
|
[
"[email protected]"
] | |
9855860eeee26a97c487f030f08eba7c367d287f
|
4b4828d3c98d76d7bf38f90a015945acc408ddc5
|
/PythonAI/Practice/DAY03/src/URL_Parsing_02.py
|
0ef5fb5e164d8ee32d3ebfbbf7f7046114d8a105
|
[] |
no_license
|
Huh-jae-won/Study
|
cb5d32728e8dcded492e7edb054b500c91ec607c
|
e4dbc3fef69bb273b62b866fb5ef2a7250222f10
|
refs/heads/main
| 2023-06-20T13:06:26.691899 | 2021-07-11T07:43:41 | 2021-07-11T07:43:41 | 362,759,927 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 436 |
py
|
import sys
import urllib.request as req
import urllib.parse as parse
# 명령줄 매개변수 추출
if len(sys.argv) <= 1 :
print("USAGE : download-forecast-argv <Region Number>")
sys.exit()
regionNumber = sys.argv[1]
# 매개변수를 URL 인코딩
API = "http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp"
values = { 'stnid': regionNumber }
params = parse.urlencode(values)
url = API + "?" + params
print("url=", url)
|
[
"[email protected]"
] | |
66fb33b0030c894e919d60edb6cc528e910809b4
|
8cce0b5a4be09783016906a36192c52e9daa84aa
|
/equipment_engineering/meter_reader_4_pointer/main.py
|
d3851c2d25d932f0aa84ba3adc0c8e32b8fd3a3b
|
[
"MIT"
] |
permissive
|
Castrol68/opencv-practice
|
fcc9495553d3a10fb045c396697391a5d2a06f36
|
83d76132d004ebbc96d99d34a0fd3fc37a044f9f
|
refs/heads/master
| 2023-08-31T07:18:51.497902 | 2020-05-03T17:43:12 | 2020-05-03T17:43:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,288 |
py
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import abc
from abc import ABC
import argparse
import os
class EquipmentRunner(ABC):
def __init__(self):
self.__successor = None
@property
def mode(self):
if not self.__successor:
exit(404)
return self.__successor
@mode.setter
def mode(self, successor):
self.__successor = successor
@abc.abstractmethod
def run(self, request): ...
class RunSettingMode(EquipmentRunner):
def run(self, request):
if True is request["set"]:
try:
log("设置模式 ...")
min_angle = input_number_check("表盘最小值对应的刻度")
max_angle = input_number_check("表盘最大值对应的刻度")
min_value = input_number_check("表盘最小值")
max_value = input_number_check("表盘最大值")
util = input("仪表单位: ")
set_detector_argument(min_angle, max_angle, min_value, max_value, util)
except Exception as e:
log(e, ERROR)
else:
self.next.run(request)
class RunDebugMode(EquipmentRunner):
def run(self, request):
if True is request["debug"]:
try:
log("调试模式 ...")
start_with_debug()
except Exception as e:
log(e, ERROR)
else:
self.next.run(request)
class RunVisionMode(EquipmentRunner):
def run(self, request):
if True is request["windows"]:
try:
log("可视化模式 ...")
start_with_vision()
except Exception as e:
log(e, ERROR)
class RunBackendMode(EquipmentRunner):
def run(self, request):
if True is request["backend"]:
try:
log("后台模式 ...")
start_with_backend()
except Exception as e:
log(e, ERROR)
else:
self.next.run(request)
def fork():
setting_mode = RunSettingMode()
debug_mode = RunDebugMode()
vision_mode = RunVisionMode()
backend_mode = RunBackendMode()
setting_mode.next = debug_mode
debug_mode.next = backend_mode
backend_mode.next = vision_mode
# try:
# os.chdir("/tmp")
# os.setsid()
# os.umask(0)
setting_mode.run(args)
# except OSError:
# pass
if "__main__" == __name__:
from reader_4_pointer import start_with_vision, start_with_debug, set_detector_argument, start_with_backend
from reader_4_pointer import version, log, ERROR, input_number_check
version()
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--debug", type=bool, help=" debug模式", default=False)
ap.add_argument("-s", "--set", type=bool, help="设置模式", default=False)
ap.add_argument("-w", "--windows", type=bool, help="可视化模式", default=True)
ap.add_argument("-b", "--backend", type=bool, help="后台模式", default=False)
ap.add_argument("-p", "--path", help="日志存放位置")
args = vars(ap.parse_args())
fork()
|
[
"[email protected]"
] | |
0ecfd4ade7a4eb7a0332e62085aa20d4d147faea
|
0550c08cee19be891fde34fa109b5a4ad9f07e3a
|
/countingdnanucleotides/countingdnanucleotides.py
|
f3f714d93042c72c14090a87793d10895a8e4cca
|
[] |
no_license
|
bendavidsteel/rosalind-solutions
|
92653c49d8ef938306ac1289ccb4e4cfe4b8d3ae
|
0749f2662efcac62383a8476ce13fcdd039928b1
|
refs/heads/master
| 2020-03-28T04:17:00.959446 | 2018-09-06T21:32:06 | 2018-09-06T21:32:06 | 147,705,059 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 426 |
py
|
a = 0
c = 0
g = 0
t = 0
i = 0
with open('rosalind_dna.txt') as stringfile:
s = [x.strip('\n') for x in stringfile.readlines()][0]
for i in range(len(s)):
if s[i] == 'A':
a += 1
elif s[i] == 'C':
c += 1
elif s[i] == 'G':
g += 1
elif s[i] == 'T':
t += 1
output = open("output.txt", 'w')
output.write(str(a) + ' ' + str(c) + ' ' + str(g) + ' ' + str(t))
output.close()
|
[
"[email protected]"
] | |
7d2d02d8ebc5d63a3b86766ce7a466835da3c7fb
|
16caebb320bb10499d3712bf0bdc07539a4d0007
|
/objc/_SleepHealth.py
|
b5472aa777bd2319957d026862d02d97921f506b
|
[] |
no_license
|
swosnick/Apple-Frameworks-Python
|
876d30f308a7ac1471b98a9da2fabd22f30c0fa5
|
751510137e9fa35cc806543db4e4415861d4f252
|
refs/heads/master
| 2022-12-08T07:08:40.154553 | 2020-09-04T17:36:24 | 2020-09-04T17:36:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 725 |
py
|
'''
Classes from the 'SleepHealth' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
HKSHSleepDaySummary = _Class('HKSHSleepDaySummary')
HKSleepHealthStore = _Class('HKSleepHealthStore')
HKSHSleepPeriod = _Class('HKSHSleepPeriod')
HKSHSleepPeriodSegment = _Class('HKSHSleepPeriodSegment')
HKSHGoalProgressEngine = _Class('HKSHGoalProgressEngine')
HKSHGoalProgress = _Class('HKSHGoalProgress')
HKSHSleepDaySummaryQuery = _Class('HKSHSleepDaySummaryQuery')
HKSHSleepDaySummaryQueryConfiguration = _Class('HKSHSleepDaySummaryQueryConfiguration')
|
[
"[email protected]"
] | |
da4dcc87474cb0400f18d2293569fa8d6e209747
|
1a9852fe468f18e1ac3042c09286ccda000a4135
|
/Specialist Certificate in Data Analytics Essentials/DataCamp/06-Writing_Functions_in_Python/e11_a_read-only_open_context_manager.py
|
956cc65e4b032f0ee852b2821b8fb559599e271b
|
[] |
no_license
|
sarmabhamidipati/UCD
|
452b2f1e166c1079ec06d78e473730e141f706b2
|
101ca3152207e2fe67cca118923896551d5fee1c
|
refs/heads/master
| 2023-08-14T15:41:24.312859 | 2021-09-22T17:33:01 | 2021-09-22T17:33:01 | 386,592,878 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 764 |
py
|
"""
A read-only open() context manager
Yield control from open_read_only() to the context block, ensuring that the read_only_file object
gets assigned to my_file.
Use read_only_file's .close() method to ensure that you don't leave open files lying around.
"""
from contextlib import contextmanager
import time
@contextmanager
def open_read_only(filename):
"""Open a file in read-only mode.
Args:
filename (str): The location of the file to read
Yields:
file object
"""
read_only_file = open(filename, mode='r')
# Yield read_only_file so it can be assigned to my_file
yield read_only_file
# Close read_only_file
read_only_file.close()
with open_read_only('my_file.txt') as my_file:
print(my_file.read())
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.