hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd037892f04f2a0ec86c850579b001591ee90f88
| 1,381 |
py
|
Python
|
libkol/request/clan_rumpus.py
|
danheath/pykol-lib
|
bdc9aa8dbae64ead07e7dbc36f9d6ba802f65ddc
|
[
"BSD-3-Clause"
] | 6 |
2019-06-11T19:25:32.000Z
|
2022-01-21T17:05:01.000Z
|
libkol/request/clan_rumpus.py
|
danheath/pykol-lib
|
bdc9aa8dbae64ead07e7dbc36f9d6ba802f65ddc
|
[
"BSD-3-Clause"
] | 8 |
2019-06-17T11:41:14.000Z
|
2019-08-07T17:28:50.000Z
|
libkol/request/clan_rumpus.py
|
python-kol/pykollib
|
bdc9aa8dbae64ead07e7dbc36f9d6ba802f65ddc
|
[
"BSD-3-Clause"
] | 9 |
2019-06-09T22:23:06.000Z
|
2021-07-10T00:49:00.000Z
|
import re
from enum import Enum
from typing import List
import libkol
from .request import Request
furniture_pattern = re.compile(r"rump([0-9])_([0-9])\.gif")
| 23.40678 | 85 | 0.569153 |
cd03952161db20fd79bc08d5412273256911f00a
| 2,155 |
py
|
Python
|
utils/utils.py
|
ZhenqiSong/OCR_Pytorch
|
df4e8c53353b6c515509241d4c9af3b153224a10
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
ZhenqiSong/OCR_Pytorch
|
df4e8c53353b6c515509241d4c9af3b153224a10
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
ZhenqiSong/OCR_Pytorch
|
df4e8c53353b6c515509241d4c9af3b153224a10
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# __author__:Song Zhenqi
# 2021-01-20
import os
import sys
import yaml
import logging
import functools
logger_initialized = set()
def get_config(file):
"""
yaml
:param file: yaml/yml
:return: dict
"""
_, ext = os.path.splitext(file)
assert ext in ['.yaml', '.yml'], "yaml/yml"
config = yaml.load(open(file, 'rb'), Loader=yaml.Loader)
return config
| 27.278481 | 91 | 0.645476 |
cd0601891b2dad5746ac7c08ac9655b6e8d13ab9
| 2,130 |
py
|
Python
|
monitoring/uss_qualifier/webapp/tasks.py
|
interuss/InterUSS-Platform
|
099abaa1159c4c143f8f1fde6b88956c86608281
|
[
"Apache-2.0"
] | null | null | null |
monitoring/uss_qualifier/webapp/tasks.py
|
interuss/InterUSS-Platform
|
099abaa1159c4c143f8f1fde6b88956c86608281
|
[
"Apache-2.0"
] | 1 |
2021-11-29T21:53:39.000Z
|
2021-11-29T21:53:39.000Z
|
monitoring/uss_qualifier/webapp/tasks.py
|
interuss/InterUSS-Platform
|
099abaa1159c4c143f8f1fde6b88956c86608281
|
[
"Apache-2.0"
] | null | null | null |
from monitoring.uss_qualifier.test_data import test_report
from monitoring.uss_qualifier.utils import USSQualifierTestConfiguration
from monitoring.uss_qualifier.main import uss_test_executor
from monitoring.uss_qualifier.rid.simulator import flight_state_from_kml
from monitoring.uss_qualifier.rid.utils import FullFlightRecord
from monitoring.uss_qualifier.rid.utils import FullFlightRecord
import json
from typing import List
import redis
import rq
import uuid
from . import resources
from monitoring.monitorlib.typing import ImplicitDict
def remove_rq_job(job_id):
"""Removes a job from the queue."""
try:
rq_job = resources.qualifier_queue.remove(job_id)
except (redis.exceptions.RedisError, rq.exceptions.NoSuchJobError):
return None
return rq_job
| 30.869565 | 88 | 0.746948 |
cd06e20fb3a5b8f7301bbddc6604a232ac3d8294
| 11,853 |
py
|
Python
|
grenades_services/modules/basket.py
|
Parveen3300/Reans
|
6dfce046b01099284a8c945a04600ed83e5099a4
|
[
"Apache-2.0"
] | null | null | null |
grenades_services/modules/basket.py
|
Parveen3300/Reans
|
6dfce046b01099284a8c945a04600ed83e5099a4
|
[
"Apache-2.0"
] | null | null | null |
grenades_services/modules/basket.py
|
Parveen3300/Reans
|
6dfce046b01099284a8c945a04600ed83e5099a4
|
[
"Apache-2.0"
] | null | null | null |
"""
BasketManagementRelated modules
"""
# import basket models
from basket.models import Basket
from basket.models import BasketProductLine
# import configuration models
from grenades_services.all_configuration_data import get_currency_instance
from grenades_services.all_configuration_data import get_customer_instance_from_request_user
from grenades_services.all_configuration_data import product_price_calculator
# import home modules
from grenades_services.modules.home import Home
# import serializers modules
from grenades_services.separate_serializers.basket_serializers import \
BasketProductSerializer
def collect_basket_product_values(self):
"""collect_basket_product_values
This 'collect_basket_product_values' method used to collect the all basket related value data
to entered in basket table with customer and session maintain instance
"""
home_instance = self._use_common_module(dict(
product_get_data={
'product_alias_name': self.basket_data['product_alias_name']
}
)
)
product_instance = home_instance.get_product_instance()
if product_instance:
home_instance = self._use_common_module(
dict(filter_input_data={'mapped_products__id__in': [product_instance.id]}))
category_product_mapping_instance = \
home_instance.category_product_mapping_instance()
home_instance = self._use_common_module(
dict(filter_input_data={
'included_products__id__in': [product_instance.id],
'offer_type': 'offer'
})
)
product_offer_instance = home_instance.offer_products()
payable_amount = self.calculate_offer_value(
product_offer_instance,
product_instance.price) if product_offer_instance else product_instance.price
return (product_instance,
category_product_mapping_instance,
payable_amount)
def collect_basket_details(self, basket_instance):
"""
This 'collect_basket_details' method collect the basket common code details
"""
product_instance, category_product_mapping_instance, payable_amount = \
self.collect_basket_product_values()
return {
'basket': basket_instance,
'line_reference': str(product_instance.id),
'product': product_instance,
'category': category_product_mapping_instance.last(
).category if category_product_mapping_instance else None,
'quantity': self.basket_data.get('quantity', 1),
'price_currency': get_currency_instance(),
'price_excl_tax': None,
'price_incl_tax': None,
'payable_amount': payable_amount
}
def add_new_basket(self):
"""
This 'add_new_basket' method used to create a fresh basket for a customer or user
"""
if self.customer_instance:
self.filter_query_data['owner'] = self.customer_instance
create_basket = Basket.objects.create(**self.filter_query_data)
print("63546735435463543564", create_basket)
if create_basket:
if self.create_basket_product_line(self.collect_basket_details(create_basket)):
self._request.session['basket_id'] = create_basket.id
return True
return False
def update_product_basket(self):
"""
This 'update_product_basket' method used to update the product in the basket
"""
if self.basket_id:
self.filter_query_data['id'] = self.basket_id
if self.customer_instance:
self.filter_query_data['owner'] = self.customer_instance
basket_instance = self.get_basket_instance()
if basket_instance:
if self.create_basket_product_line(self.collect_basket_details(
basket_instance)):
return True
else:
return False
def add_to_basket(self):
"""
This 'add_to_basket' method used to add the product in the basket
"""
self.customer_instance = get_customer_instance_from_request_user(
self._request.user)
if 'basket_id' in self._request.session.keys():
self.basket_id = self._request.session['basket_id']
return self.update_product_basket()
else:
return self.add_new_basket()
class DisplayProductsBasket(UpdateProductsBasket):
"""
DisplayProductsBasket
return: {
'products_description': {
'id': 14,
'products_list': [],
'line_reference': '2',
'quantity': 1,
'price_currency': 'INR',
'price_excl_tax': None,
'price_incl_tax': None,
'payable_amount': '1000.00',
'date_created': '2021-11-01T10:29:50.091484Z',
'date_updated': '2021-11-01T10:29:50.091502Z',
'basket': 5,
'product': 2,
'category': 5,
'collection': None
},
'product_price_details': {'total_item': 0},
'random_products_list': <QuerySet [<Product: Instruments>]>
}
"""
def basket_product_description(self):
"""basket_product_description
This 'basket_product_description' method used to get the all product description with
all products details from baskets
"""
if self.basket_id:
self.filter_data['id'] = self.basket_id
if self.customer_instance:
self.filter_data['owner'] = self.customer_instance
basket_instance = self.get_basket_instance(self.filter_data)
if basket_instance:
product_line_last_obj = self.get_basket_product_lines(
{'basket': basket_instance}).last()
self.products_description_data = BasketProductSerializer(
product_line_last_obj).data
def create_product_order_summary_dict(self, order_summary_dict):
"""
This 'create_product_order_summary_dict' method used to create dict for product order summary
total_price, coupon_price, offer_price
"""
self.product_price_details['total'] = order_summary_dict['total_price']
self.product_price_details['sub_total'] = order_summary_dict['total_price']
self.product_price_details['estimate_tax'] = self.estimate_tax
self.product_price_details['coupon_name'] = self.coupon_name
self.product_price_details['coupon_price'] = order_summary_dict['coupon_price']
self.product_price_details['offer_name'] = self.offer_name
self.product_price_details['offer_price'] = order_summary_dict['offer_price']
def order_product_price_details(self):
"""order_product_price_details
This 'order_product_price_details' method used to get the all product order summary with price calculation
and manage the all coupon and offers
"""
self.product_price_details['total_item'] = len(
self.products_description_data['products_list'])
for _products_details in self.products_description_data['products_list']:
order_summary_dict = product_price_calculator(_products_details,
self.coupon_details,
self.offer_details)
# create product order summary
# return total_price, coupon_price, offer_price
self.create_product_order_summary_dict(order_summary_dict)
def display_products(self):
"""
This 'display_products' method used to get the all session and customer related
basket products for help on display
"""
if 'basket_id' in self._request.session.keys():
self.basket_id = self._request.session.get('basket_id')
else:
self.basket_id = None
self.customer_instance = get_customer_instance_from_request_user(
self._request.user)
self.basket_product_description()
self.order_product_price_details()
home_instance = Home()
random_products_list = home_instance.random_products_list()
return {
'products_description': self.products_description_data,
'product_price_details': self.product_price_details,
'random_products_list': random_products_list if random_products_list else []
}
| 40.731959 | 114 | 0.651312 |
cd08e29c15d2756e6bc4a870585c434ad2c07d7a
| 2,935 |
py
|
Python
|
plots.py
|
klowrey/speed_arch
|
edb002b6d57915fa5e2024b36eb66acf30a7130a
|
[
"MIT"
] | null | null | null |
plots.py
|
klowrey/speed_arch
|
edb002b6d57915fa5e2024b36eb66acf30a7130a
|
[
"MIT"
] | null | null | null |
plots.py
|
klowrey/speed_arch
|
edb002b6d57915fa5e2024b36eb66acf30a7130a
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
acc = np.array([7.95549917, 7.46641684, 8.16141701, 8.80025005, 7.29208231,
7.73391724, 8.16333294, 9.02033329, 7.60566664, 7.88175011,
7.77574968, 8.79116631, 8.24524975, 8.98549938, 7.3717494 ,
7.32324982, 8.14583302, 8.53608322, 9.30125046, 8.53458309,
8.01708317, 8.36941624, 8.23241711, 8.93550014, 8.73683262,
8.05008316, 8.68758297, 8.59083271, 9.0852499 , 9.07924938,
7.3904171 , 8.82283497, 9.41650009, 8.45791626, 8.04416656,
7.70391607, 9.05191612, 7.78883314, 8.56858349, 9.07366657,
8.77991581, 7.94008255, 8.1746664 , 8.28074932, 7.91550064,
7.4872508 , 8.59158325, 9.33758259, 8.21591663, 8.64350033,
9.00899982, 9.26983356, 8.7885828 , 9.43066692, 9.09299946,
8.55266666, 8.73725033, 7.50575018, 7.99300003, 8.16366673,
8.97633266, 8.19683361, 7.71091652, 8.65974998, 8.97108364,
8.03375053, 8.99700069, 9.18599987, 8.26491737, 8.64508343,
8.00825024, 7.80483294, 7.45008326, 8.23791695, 8.90425014,
9.47108269, 8.0963335 , 8.88658333, 7.99116659, 7.48541689,
8.23633289, 8.61583424, 7.75775003, 8.10883331, 8.57058334,
7.72616577, 7.29199982, 8.26725006, 7.80841637, 8.8257494 ,
9.35824871, 8.85208321, 7.50433302, 8.03266716, 8.77825069,
8.94516659, 8.56558323, 8.64266682, 8.70541668, 8.4321661 ])
spd = np.array([-15.733922 , -17.69332123, -15.09789562, -14.98722076,
-19.22259712, -20.7837429 , -19.90324211, -13.48655987,
-13.42676544, -10.76375103, -18.15335083, -9.28313065,
-11.35249805, -12.09126663, -13.63445187, -17.17600822,
-11.39536953, -13.01688385, -14.5902586 , -9.40825558,
-11.72452641, -9.74875546, -15.47906494, -17.58286476,
-13.81764889, -15.5894928 , -9.33745289, -11.58790493,
-12.6633606 , -12.95300007, -6.5169816 , -15.54349899,
-9.18311691, -11.59814739, -11.74293232, -18.68121147,
-12.44590282, -13.20860291, -8.75187683, -23.9044342 ,
-10.90840054, -11.39770985, -14.83057499, -13.2543335 ,
-13.18600559, -13.31662369, -12.91320515, -9.9495573 ,
-10.87206936, -11.35480595, -13.06026745, -10.52530384,
-13.57276917, -13.95710754, -9.0244627 , -12.21132755,
-9.00012493, -9.07794476, -12.50325108, -9.44294643,
-12.86182499, -8.95974827, -10.34585476, -16.70100594,
-7.63287163, -11.60797215, -11.73308086, -10.89833736,
-11.40105438, -8.59499645, -11.1452837 , -11.61797333,
-9.25040531, -9.30110741, -8.68466759, -10.68533611,
-11.68466282, -10.05351353, -11.61765003, -9.72268772,
-9.05587578, -10.88561535, -11.85619068, -12.46191692,
-8.43530369, -6.79801893, -9.91088772, -9.89115238,
-16.34910393, -12.32227421, -13.36759472, -17.33267021,
-10.66337585, -10.35019398, -11.29328632, -9.45415211,
-10.61021137, -14.06766415, -8.31783295, -11.77228069])
| 56.442308 | 79 | 0.65247 |
cd08e9d07146abb0712c59ab83d9a3d247ba38c2
| 27 |
py
|
Python
|
tool/klint/bpf/__init__.py
|
kylerky/klint
|
77be216ec3f4315a835b7bcdaef1b66ed3144603
|
[
"MIT"
] | 2 |
2022-03-08T16:10:27.000Z
|
2022-03-11T14:14:04.000Z
|
tool/klint/bpf/__init__.py
|
kylerky/klint
|
77be216ec3f4315a835b7bcdaef1b66ed3144603
|
[
"MIT"
] | null | null | null |
tool/klint/bpf/__init__.py
|
kylerky/klint
|
77be216ec3f4315a835b7bcdaef1b66ed3144603
|
[
"MIT"
] | 1 |
2022-03-24T09:27:41.000Z
|
2022-03-24T09:27:41.000Z
|
"""
BPF-related stuff.
"""
| 6.75 | 18 | 0.555556 |
cd094ee5dcfd76a9bf766f06eb8cdcb0b8027094
| 4,485 |
py
|
Python
|
tests/test_geojson.py
|
geographika/mappyfile-geojson
|
9525bb840ae243a0f5072730f6432bf98dcacbe9
|
[
"MIT"
] | 8 |
2018-08-08T06:47:38.000Z
|
2022-01-30T13:25:35.000Z
|
tests/test_geojson.py
|
geographika/mappyfile-geojson
|
9525bb840ae243a0f5072730f6432bf98dcacbe9
|
[
"MIT"
] | 4 |
2020-09-24T05:28:19.000Z
|
2022-03-29T22:18:13.000Z
|
tests/test_geojson.py
|
geographika/mappyfile-geojson
|
9525bb840ae243a0f5072730f6432bf98dcacbe9
|
[
"MIT"
] | 1 |
2018-08-08T06:47:42.000Z
|
2018-08-08T06:47:42.000Z
|
import os
import json
import geojson
import mappyfile_geojson
import mappyfile
import pytest
def test_pointZ():
"""
Z-values are simply removed as they are not supported by inline
MapServer Features
"""
gj = get_geojson("PointZ.json")
layer = mappyfile_geojson.convert(gj)
s = mappyfile.dumps(layer)
print(s)
assert s == """LAYER
EXTENT 102 0.5 102 0.5
STATUS ON
TYPE POINT
PROCESSING "ITEMS=prop0"
FEATURE
POINTS
102.0 0.5
END
ITEMS "value0"
END
END"""
if __name__ == '__main__':
# test_multipolygon()
run_tests()
print("Done!")
| 19.933333 | 68 | 0.515942 |
cd0b619e6db23ae007998ba9f088e9c319778c9d
| 517 |
py
|
Python
|
230.py
|
BYOUINZAKA/LeetCodeNotes
|
48e1b4522c1f769eeec4944cfbd57abf1281d09a
|
[
"MIT"
] | null | null | null |
230.py
|
BYOUINZAKA/LeetCodeNotes
|
48e1b4522c1f769eeec4944cfbd57abf1281d09a
|
[
"MIT"
] | null | null | null |
230.py
|
BYOUINZAKA/LeetCodeNotes
|
48e1b4522c1f769eeec4944cfbd57abf1281d09a
|
[
"MIT"
] | null | null | null |
'''
@Author: Hata
@Date: 2020-05-24 15:30:19
@LastEditors: Hata
@LastEditTime: 2020-05-24 15:32:04
@FilePath: \LeetCode\230.py
@Description: https://leetcode-cn.com/problems/kth-smallest-element-in-a-bst/
'''
| 22.478261 | 77 | 0.558994 |
cd0c0c186a507173da38fb9c91812fd94be9043a
| 3,430 |
py
|
Python
|
Scripts/TestParsers/PyUnittestTestParser.py
|
davidbrownell/v3-Common_Environment
|
8f42f256e573cbd83cbf9813db9958025ddf12f2
|
[
"BSL-1.0"
] | null | null | null |
Scripts/TestParsers/PyUnittestTestParser.py
|
davidbrownell/v3-Common_Environment
|
8f42f256e573cbd83cbf9813db9958025ddf12f2
|
[
"BSL-1.0"
] | 1 |
2018-06-08T06:45:16.000Z
|
2018-06-08T06:45:16.000Z
|
Scripts/TestParsers/PyUnittestTestParser.py
|
davidbrownell/v3-Common_Environment
|
8f42f256e573cbd83cbf9813db9958025ddf12f2
|
[
"BSL-1.0"
] | 1 |
2018-06-08T04:15:17.000Z
|
2018-06-08T04:15:17.000Z
|
# ----------------------------------------------------------------------
# |
# | PythonUnittestTestParser.py
# |
# | David Brownell <[email protected]>
# | 2018-05-22 07:59:46
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Contains the TestParser object"""
import os
import re
import CommonEnvironment
from CommonEnvironment.Interface import staticderived, override, DerivedProperty
from CommonEnvironment.TestParserImpl import TestParserImpl
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
| 38.539326 | 159 | 0.473178 |
cd0c8d9af792a61f23cb21cb4b226023ec5c2f1f
| 7,116 |
py
|
Python
|
fairseq/models/transformer_xlm_iwslt_decoder.py
|
jm-glowienke/fairseq
|
ca45353322f92776e34a7308bf3fab75af9c1d50
|
[
"MIT"
] | null | null | null |
fairseq/models/transformer_xlm_iwslt_decoder.py
|
jm-glowienke/fairseq
|
ca45353322f92776e34a7308bf3fab75af9c1d50
|
[
"MIT"
] | null | null | null |
fairseq/models/transformer_xlm_iwslt_decoder.py
|
jm-glowienke/fairseq
|
ca45353322f92776e34a7308bf3fab75af9c1d50
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Dict
from fairseq import checkpoint_utils
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture as transformer_base_architecture,
)
def upgrade_state_dict_with_xlm_weights(
state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str
) -> Dict[str, Any]:
"""
Load XLM weights into a Transformer encoder or decoder model.
Args:
state_dict: state dict for either TransformerEncoder or
TransformerDecoder
pretrained_xlm_checkpoint: checkpoint to load XLM weights from
Raises:
AssertionError: If architecture (num layers, attention heads, etc.)
does not match between the current Transformer encoder or
decoder and the pretrained_xlm_checkpoint
"""
if not os.path.exists(pretrained_xlm_checkpoint):
raise IOError(
"Model file not found: {}".format(pretrained_xlm_checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint)
xlm_state_dict = state["model"]
for key in xlm_state_dict.keys():
for search_key in ["embed_tokens", "embed_positions", "layers"]:
if search_key in key:
subkey = key[key.find(search_key):]
if "in_proj_weight" in subkey or \
"in_proj_bias" in subkey:
continue
else:
assert subkey in state_dict, (
"{} \nTransformer encoder / decoder "
"state_dict does not contain {}. \nCannot "
"load {} from pretrained XLM checkpoint "
"{} into Transformer.".format(
str(state_dict.keys()), subkey, key,
pretrained_xlm_checkpoint
)
)
state_dict[subkey] = xlm_state_dict[key]
return state_dict
# class TransformerDecoderFromPretrainedXLM(TransformerDecoder):
# def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
# super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
# if getattr(args, "init_encoder_only", False):
# # Don't load XLM weights for decoder if --init-encoder-only
# return
# assert hasattr(args, "pretrained_xlm_checkpoint"), (
# "--pretrained-xlm-checkpoint must be specified to load Transformer "
# "decoder from pretrained XLM"
# )
#
# xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(
# state_dict=self.state_dict(),
# pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint,
# )
# self.load_state_dict(xlm_loaded_state_dict, strict=True)
| 40.662857 | 82 | 0.647274 |
cd0d1977c612b5942005c2d4eceddb8039516a10
| 7,249 |
py
|
Python
|
test/unit/mysql_db_admin/process_request.py
|
mjpernot/mysql-mysql-db-admin
|
4821d6923155a48362869a6f2bf8c69fe3e533d4
|
[
"MIT"
] | null | null | null |
test/unit/mysql_db_admin/process_request.py
|
mjpernot/mysql-mysql-db-admin
|
4821d6923155a48362869a6f2bf8c69fe3e533d4
|
[
"MIT"
] | null | null | null |
test/unit/mysql_db_admin/process_request.py
|
mjpernot/mysql-mysql-db-admin
|
4821d6923155a48362869a6f2bf8c69fe3e533d4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Classification (U)
"""Program: process_request.py
Description: Unit testing of process_request in mysql_db_admin.py.
Usage:
test/unit/mysql_db_admin/process_request.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import mysql_db_admin
import lib.gen_libs as gen_libs
import version
__version__ = version.__version__
def func_holder(server, dbs, tbl):
"""Method: func_holder
Description: Function stub holder for a generic function call.
Arguments:
server
dbs
tbl
"""
status = True
if server and dbs and tbl:
status = True
return status
if __name__ == "__main__":
unittest.main()
| 26.36 | 76 | 0.646986 |
cd0e89b4b693cd65319eaacf6298dcfed09dbd78
| 594 |
py
|
Python
|
fsttest/__init__.py
|
eddieantonio/fsttest
|
8ff71a9aa41a70a30832fa219b72e7478872c16f
|
[
"MIT"
] | null | null | null |
fsttest/__init__.py
|
eddieantonio/fsttest
|
8ff71a9aa41a70a30832fa219b72e7478872c16f
|
[
"MIT"
] | 1 |
2020-01-27T21:43:04.000Z
|
2020-01-28T15:57:05.000Z
|
fsttest/__init__.py
|
eddieantonio/fsttest
|
8ff71a9aa41a70a30832fa219b72e7478872c16f
|
[
"MIT"
] | 1 |
2021-04-26T17:46:19.000Z
|
2021-04-26T17:46:19.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
FST test -- test your Foma finite-state transducers!
"""
from .__version__ import VERSION as __version__
from ._fst import FST
from ._results import FailedTestResult, PassedTestResult, TestResults
from ._run import execute_test_case, run_tests
from ._test_case import TestCase
from .exceptions import FSTTestError, TestCaseDefinitionError
__all__ = [
"FST",
"FSTTestError",
"FailedTestResult",
"PassedTestResult",
"TestCase",
"TestCaseDefinitionError",
"TestResults",
"execute_test_case",
"run_tests",
]
| 22.846154 | 69 | 0.725589 |
cd0ff0154f3a2ed2059c34dae1964cf271d9a2e1
| 3,674 |
py
|
Python
|
analysis/sharpness.py
|
sanketvmehta/lifelong-learning-pretraining-and-sam
|
2fee18a4b13c918f6005f88c19089b86f4a8aae2
|
[
"Apache-2.0"
] | null | null | null |
analysis/sharpness.py
|
sanketvmehta/lifelong-learning-pretraining-and-sam
|
2fee18a4b13c918f6005f88c19089b86f4a8aae2
|
[
"Apache-2.0"
] | null | null | null |
analysis/sharpness.py
|
sanketvmehta/lifelong-learning-pretraining-and-sam
|
2fee18a4b13c918f6005f88c19089b86f4a8aae2
|
[
"Apache-2.0"
] | null | null | null |
import copy
import numpy as np
import torch
from scipy import optimize
import logging
def sharpness(model, criterion_fn, A, epsilon=1e-3, p=0, bounds=None):
"""Computes sharpness metric according to https://arxiv.org/abs/1609.04836.
Args:
model: Model on which to compute sharpness
criterion_fn: Function that takes in a model and returns the loss
value and gradients on the appropriate data that will be used in
the loss maximization done in the sharpness calculation.
A: Projection matrix that defines the subspace in which the loss
maximization will be done. If A=1, no projection will be done.
epsilon: Defines the size of the neighborhood that will be used in the
loss maximization.
p: The dimension of the random projection subspace in which maximization
will be done. If 0, assumed to be the full parameter space.
"""
run_fn = create_run_model(model, A, criterion_fn)
if bounds is None:
bounds = compute_bounds(model, A, epsilon)
dim = flatten_parameters(model).shape[0] if p == 0 else p
# Find the maximum loss in the neighborhood of the minima
y = optimize.minimize(
lambda x: run_fn(x),
np.zeros(dim),
method="L-BFGS-B",
bounds=bounds,
jac=True,
options={"maxiter": 10},
).x.astype(np.float32)
model_copy = copy.deepcopy(model)
if A is 1:
flat_diffs = y
else:
flat_diffs = A @ y
apply_diffs(model_copy, flat_diffs)
maximum = criterion_fn(model_copy)["loss"]
loss_value = criterion_fn(model)["loss"]
sharpness = 100 * (maximum - loss_value) / (1 + loss_value)
return sharpness
def flatten_parameters(model):
"""Returns a flattened numpy array with the parameters of the model."""
return np.concatenate(
[
param.detach().cpu().numpy().flatten()
for param in model.parameters()
if param.requires_grad
]
)
def compute_bounds(model, A, epsilon):
"""Computes the bounds in which to search for the maximum loss."""
x = flatten_parameters(model)
if A is 1:
bounds = epsilon * (np.abs(x) + 1)
else:
b, _, _, _ = np.linalg.lstsq(A, x)
bounds = epsilon * (np.abs(b) + 1)
return optimize.Bounds(-bounds, bounds)
def create_run_model(model, A, criterion_fn):
"""Creates a run function that takes in parameters in the subspace that loss
maximization takes place in, and computes the loss and gradients
corresponding to those parameters.
"""
return run
def apply_diffs(model, diffs):
"""Adds deltas to the parameters in the model corresponding to diffs."""
parameters = model.parameters()
idx = 0
for parameter in parameters:
if parameter.requires_grad:
n_elements = parameter.nelement()
cur_diff = diffs[idx : idx + n_elements]
parameter.data = parameter.data + torch.tensor(
cur_diff.reshape(parameter.shape)
).to(device=parameter.device)
idx += n_elements
| 32.803571 | 80 | 0.631464 |
cd10ef939588bc49c75df0d3a2c4ba2f987aa04b
| 1,164 |
py
|
Python
|
benchmark_runner.py
|
mamrehn/simplification
|
cb43ccadcbe011b89845142910d844b0bf7ca510
|
[
"MIT"
] | null | null | null |
benchmark_runner.py
|
mamrehn/simplification
|
cb43ccadcbe011b89845142910d844b0bf7ca510
|
[
"MIT"
] | null | null | null |
benchmark_runner.py
|
mamrehn/simplification
|
cb43ccadcbe011b89845142910d844b0bf7ca510
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Standalone benchmark runner
"""
import cProfile
import pstats
import profile
import numpy as np
print("Running Rust + Cython benchmarks")
# calibrate
pr = profile.Profile()
calibration = np.mean([pr.calibrate(100000) for x in xrange(5)])
# add the bias
profile.Profile.bias = calibration
cProfile.run(open('simplification/test/cprofile_rust_cython.py', 'rb'), 'simplification/test/output_stats_rust_cython')
rust_cython = pstats.Stats('simplification/test/output_stats_rust_cython')
cProfile.run(open('simplification/test/cprofile_rust_cython_complex.py', 'rb'), 'simplification/test/output_stats_rust_cython_complex')
rust_cython_c = pstats.Stats('simplification/test/output_stats_rust_cython_complex')
cProfile.run(open('simplification/test/cprofile_rust_cython_shapely.py', 'rb'), 'simplification/test/output_stats_rust_cython_shapely')
shapely = pstats.Stats('simplification/test/output_stats_rust_cython_shapely')
print("Rust Cython Benchmarks\n")
rust_cython.sort_stats('cumulative').print_stats(5)
rust_cython_c.sort_stats('cumulative').print_stats(5)
shapely.sort_stats('cumulative').print_stats(5)
| 35.272727 | 135 | 0.803265 |
cd13a01142ccf63d717a89caf8e588ed9c337f8d
| 850 |
py
|
Python
|
D_QuickS.py
|
rut999/Algo
|
9180f66452597a758a31073cb2b8fa4a3e6a93fe
|
[
"MIT"
] | null | null | null |
D_QuickS.py
|
rut999/Algo
|
9180f66452597a758a31073cb2b8fa4a3e6a93fe
|
[
"MIT"
] | null | null | null |
D_QuickS.py
|
rut999/Algo
|
9180f66452597a758a31073cb2b8fa4a3e6a93fe
|
[
"MIT"
] | null | null | null |
import time
from random import randint
random_list = random_int(100000)
#list2 = [0,0,99,34,56,54,-1,-1,32,2.5,-1.1,1000,1000,-2,30,21,24,15,10,6]
t1 = time.time()
Quick_sort(random_list)
t2 = time.time()
print(t2-t1)
# def Quick_Sort(list1):
# if (list1[0]<list1[-1]):
# partition_index =partition(list1)
# quicksort(list1,)
# quicksort()
| 22.368421 | 74 | 0.583529 |
cd18f52d3cd9d807fe305ade001766cc89245405
| 1,929 |
py
|
Python
|
cride/circles/views/circles.py
|
LhernerRemon/Rider
|
30783cf58513698d23730f5fa477dfeddda8ee6b
|
[
"MIT"
] | null | null | null |
cride/circles/views/circles.py
|
LhernerRemon/Rider
|
30783cf58513698d23730f5fa477dfeddda8ee6b
|
[
"MIT"
] | null | null | null |
cride/circles/views/circles.py
|
LhernerRemon/Rider
|
30783cf58513698d23730f5fa477dfeddda8ee6b
|
[
"MIT"
] | null | null | null |
#REST
from rest_framework import viewsets,mixins
from rest_framework.permissions import IsAuthenticated
#Filters
from rest_framework.filters import SearchFilter,OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
#Models, serializers
from cride.circles.models import Circle,Membership
from cride.circles.serializers import CircleModelSerializer
#Permission
from cride.circles.permissions import IsCircleAdmin
| 32.694915 | 83 | 0.690513 |
cd18f82e759c1f805c2c156a96b2d6d4fe352c3d
| 780 |
py
|
Python
|
api/service/cidades_atendimento_service.py
|
FinotelliCarlos/ewipesimple-adminweb-python
|
3bf779250efeb9f85b4283ffbf210bf227aa8e8c
|
[
"MIT"
] | 1 |
2021-06-17T06:13:33.000Z
|
2021-06-17T06:13:33.000Z
|
api/service/cidades_atendimento_service.py
|
FinotelliCarlos/ewipesimple-adminweb-python
|
3bf779250efeb9f85b4283ffbf210bf227aa8e8c
|
[
"MIT"
] | null | null | null |
api/service/cidades_atendimento_service.py
|
FinotelliCarlos/ewipesimple-adminweb-python
|
3bf779250efeb9f85b4283ffbf210bf227aa8e8c
|
[
"MIT"
] | null | null | null |
from adminweb.services import cep_service
from adminweb.models import Profissional
from rest_framework import serializers
import json
| 32.5 | 91 | 0.75 |
cd190e09b3c36d0f4700cf8693b8dfde027f164e
| 6,680 |
py
|
Python
|
초보를 위한 셀레니움/#1 Google Screenshots Scrapping/main.py
|
donddog/Nomad_Academy_Online_Course_Codes
|
391fde26052a67f7b533219ab0de6096830697b6
|
[
"MIT"
] | 1 |
2021-02-11T16:45:22.000Z
|
2021-02-11T16:45:22.000Z
|
초보를 위한 셀레니움/#1 Google Screenshots Scrapping/main.py
|
donddog/Nomad_Academy_Online_Course_Codes
|
391fde26052a67f7b533219ab0de6096830697b6
|
[
"MIT"
] | null | null | null |
초보를 위한 셀레니움/#1 Google Screenshots Scrapping/main.py
|
donddog/Nomad_Academy_Online_Course_Codes
|
391fde26052a67f7b533219ab0de6096830697b6
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import shutil
import os
domain_competitors = GoogleKeywordScreenshooter("buy domain", "screenshots")
domain_competitors.start()
domain_competitors.finish()
domain_competitors.tozipfile()
# python_competitors = GoogleKeywordScreenshooter("python book", "screenshots")
# python_competitors.start()
# python_competitors.finish()
| 38.171429 | 107 | 0.572156 |
cd1a66acf2cfd6c3c481c4c94e53d436215cbbe7
| 9,414 |
py
|
Python
|
omicron/core/numpy_extensions.py
|
evimacs/omicron
|
abe77fd25a93cf3d0d17661ae957373474724535
|
[
"MIT"
] | 4 |
2020-11-09T02:23:51.000Z
|
2021-01-24T00:45:21.000Z
|
omicron/core/numpy_extensions.py
|
evimacs/omicron
|
abe77fd25a93cf3d0d17661ae957373474724535
|
[
"MIT"
] | 14 |
2020-11-09T02:31:34.000Z
|
2021-12-22T10:15:47.000Z
|
omicron/core/numpy_extensions.py
|
evimacs/omicron
|
abe77fd25a93cf3d0d17661ae957373474724535
|
[
"MIT"
] | 2 |
2021-01-24T00:45:25.000Z
|
2021-12-24T06:18:37.000Z
|
"""Extension function related to numpy
"""
from __future__ import annotations
from typing import List, Tuple
import numpy as np
import pandas
from numpy.typing import ArrayLike
def dict_to_numpy_array(d: dict, dtype: List[Tuple]) -> np.array:
"""convert dictionary to numpy array
Examples:
>>> d = {"aaron": 5, "jack": 6}
>>> dtype = [("name", "S8"), ("score", "<i4")]
>>> dict_to_numpy_array(d, dtype)
array([(b'aaron', 5), (b'jack', 6)],
dtype=[('name', 'S8'), ('score', '<i4')])
Args:
d (dict): [description]
dtype (List[Tuple]): [description]
Returns:
np.array: [description]
"""
return np.fromiter(d.items(), dtype=dtype, count=len(d))
def dataframe_to_structured_array(
df: pandas.DataFrame, dtypes: List[Tuple] = None
) -> ArrayLike:
"""convert dataframe (with all columns, and index possibly) to numpy structured arrays
`len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array.
Args:
df: the one needs to be converted
dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted.
Returns:
ArrayLike: [description]
"""
v = df
if dtypes is not None:
dtypes_in_dict = {key: value for key, value in dtypes}
col_len = len(df.columns)
if len(dtypes) == col_len + 1:
v = df.reset_index()
rename_index_to = set(dtypes_in_dict.keys()).difference(set(df.columns))
v.rename(columns={"index": list(rename_index_to)[0]}, inplace=True)
elif col_len != len(dtypes):
raise ValueError(
f"length of dtypes should be either {col_len} or {col_len + 1}, is {len(dtypes)}"
)
# re-arrange order of dtypes, in order to align with df.columns
dtypes = []
for name in v.columns:
dtypes.append((name, dtypes_in_dict[name]))
else:
dtypes = df.dtypes
return np.array(np.rec.fromrecords(v.values), dtype=dtypes)
def find_runs(x):
"""Find runs of consecutive items in an array."""
# ensure array
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError("only 1D array supported")
n = x.shape[0]
# handle empty array
if n == 0:
return np.array([]), np.array([]), np.array([])
else:
# find run starts
loc_run_start = np.empty(n, dtype=bool)
loc_run_start[0] = True
np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])
run_starts = np.nonzero(loc_run_start)[0]
# find run values
run_values = x[loc_run_start]
# find run lengths
run_lengths = np.diff(np.append(run_starts, n))
return run_values, run_starts, run_lengths
def count_between(arr, start, end):
"""`start``end`
arr
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> count_between(arr, 20050104, 20050111)
6
>>> count_between(arr, 20050104, 20050109)
4
"""
pos_start = np.searchsorted(arr, start, side="right")
pos_end = np.searchsorted(arr, end, side="right")
counter = pos_end - pos_start + 1
if start < arr[0]:
counter -= 1
if end > arr[-1]:
counter -= 1
return counter
def shift(arr, start, offset):
"""numpyarrstart(offset
`arr``offset``offset`
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> shift(arr, 20050104, 1)
20050105
>>> shift(arr, 20050105, -1)
20050104
>>> # shift
>>> shift(arr, 20050120, 1)
20050120
Args:
arr :
start : numpy
offset (int): [description]
Returns:
"""
pos = np.searchsorted(arr, start, side="right")
if pos + offset - 1 >= len(arr):
return start
else:
return arr[pos + offset - 1]
def floor(arr, item):
"""
arritemitemarrarr[0];item
arrarr[-1]
`minute_frames_floor`.
Examples:
>>> a = [3, 6, 9]
>>> floor(a, -1)
3
>>> floor(a, 9)
9
>>> floor(a, 10)
9
>>> floor(a, 4)
3
>>> floor(a,10)
9
Args:
arr:
item:
Returns:
"""
if item < arr[0]:
return arr[0]
index = np.searchsorted(arr, item, side="right")
return arr[index - 1]
def join_by_left(key, r1, r2, mask=True):
""" `r1`, `r2` by `key`
`r1``r2``r2``fill`
same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows
r1 have duplicat keys
[Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693)
Examples:
>>> # to join the following
>>> # [[ 1, 2],
>>> # [ 1, 3], x [[1, 5],
>>> # [ 2, 3]] [4, 7]]
>>> # only first two rows in left will be joined
>>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')])
>>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')])
>>> joined = join_by_left('seq', r1, r2)
>>> print(joined)
[(1, 2, 5) (1, 3, 5) (2, 3, --)]
>>> print(joined.dtype)
(numpy.record, [('seq', '<i4'), ('score', '<i4'), ('age', '<i4')])
>>> joined[2][2]
masked
>>> joined.tolist()[2][2] == None
True
Args:
key : join
r1 : 1
r2 : 2
fill : cell
Returns:
a numpy array
"""
# figure out the dtype of the result array
descr1 = r1.dtype.descr
descr2 = [d for d in r2.dtype.descr if d[0] not in r1.dtype.names]
descrm = descr1 + descr2
# figure out the fields we'll need from each array
f1 = [d[0] for d in descr1]
f2 = [d[0] for d in descr2]
# cache the number of columns in f1
ncol1 = len(f1)
# get a dict of the rows of r2 grouped by key
rows2 = {}
for row2 in r2:
rows2.setdefault(row2[key], []).append(row2)
# figure out how many rows will be in the result
nrowm = 0
for k1 in r1[key]:
if k1 in rows2:
nrowm += len(rows2[k1])
else:
nrowm += 1
# allocate the return array
# ret = np.full((nrowm, ), fill, dtype=descrm)
_ret = np.recarray(nrowm, dtype=descrm)
if mask:
ret = np.ma.array(_ret, mask=True)
else:
ret = _ret
# merge the data into the return array
i = 0
for row1 in r1:
if row1[key] in rows2:
for row2 in rows2[row1[key]]:
ret[i] = tuple(row1[f1]) + tuple(row2[f2])
i += 1
else:
for j in range(ncol1):
ret[i][j] = row1[j]
i += 1
return ret
def numpy_append_fields(base, names, data, dtypes):
"""`base`
`numpy.lib.recfunctions.rec_append_fields``rec_append_fields`
`data`Object
Example:
>>> #
>>> import numpy
>>> old = np.array([i for i in range(3)], dtype=[('col1', '<f4')])
>>> new_list = [2 * i for i in range(3)]
>>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '<f4')])
>>> print(res)
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0.) (1., 2.) (2., 4.)]
>>> #
>>> data = [res['col1'].tolist(), res['new_col'].tolist()]
>>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', '<f4'), ('col4', '<f4')]))
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0., 0.) (1., 1., 2.) (2., 2., 4.)]
Args:
base ([numpy.array]):
name ([type]):
data (list): list
dtypes ([type]): dtype
"""
if isinstance(names, str):
names = [
names,
]
data = [
data,
]
result = np.empty(base.shape, dtype=base.dtype.descr + dtypes)
for col in base.dtype.names:
result[col] = base[col]
for i in range(len(names)):
result[names[i]] = data[i]
return result
def ffill_na(s: np.array) -> np.array:
"""np.NaN
snp.NaNnp.NaN
Examples:
>>> arr = np.arange(6, dtype=np.float32)
>>> arr[3:5] = np.NaN
>>> ffill_na(arr)
... # doctest: +NORMALIZE_WHITESPACE
array([0., 1., 2., 2., 2., 5.], dtype=float32)
>>> arr[0:2] = np.nan
>>> ffill_na(arr)
... # doctest: +NORMALIZE_WHITESPACE
array([nan, nan, 2., 2., 2., 5.], dtype=float32)
Args:
s (np.array): [description]
Returns:
np.array: [description]
"""
mask = np.isnan(s)
idx = np.where(~mask, np.arange(len(mask)), 0)
np.maximum.accumulate(idx, out=idx)
return s[idx]
| 26.222841 | 161 | 0.546633 |
cd1bfaec0e66cc493fec447100454ceabadeff14
| 838 |
py
|
Python
|
pepy/domain/read_model.py
|
daghan/pepy
|
11e15e0a7af922cf72647dde95b6cc26760ee8ab
|
[
"MIT"
] | null | null | null |
pepy/domain/read_model.py
|
daghan/pepy
|
11e15e0a7af922cf72647dde95b6cc26760ee8ab
|
[
"MIT"
] | null | null | null |
pepy/domain/read_model.py
|
daghan/pepy
|
11e15e0a7af922cf72647dde95b6cc26760ee8ab
|
[
"MIT"
] | null | null | null |
from datetime import date
from typing import List
from attr import attrs, attrib
| 22.052632 | 85 | 0.657518 |
cd1c390db89d68211aa13e58ba3a2a89676c5247
| 3,039 |
py
|
Python
|
finetuning/pretrain_scripts/create_sentiment_mask.py
|
tatsu-lab/mlm_inductive_bias
|
2d99e2477293036949ba356c88513729244dc1f9
|
[
"MIT"
] | 10 |
2021-04-14T22:06:19.000Z
|
2022-01-12T19:41:12.000Z
|
finetuning/pretrain_scripts/create_sentiment_mask.py
|
tatsu-lab/mlm_inductive_bias
|
2d99e2477293036949ba356c88513729244dc1f9
|
[
"MIT"
] | null | null | null |
finetuning/pretrain_scripts/create_sentiment_mask.py
|
tatsu-lab/mlm_inductive_bias
|
2d99e2477293036949ba356c88513729244dc1f9
|
[
"MIT"
] | 3 |
2021-06-06T09:43:14.000Z
|
2022-02-20T00:40:42.000Z
|
"""
This script computes word masks based on sentiment lexicons
"""
import os
import torch
import argparse
from tqdm import tqdm
from transformers import AutoTokenizer
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import GlueDataset as Dataset
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", type=str, default="./data/SST-2", help="path to the dir containing lm data.")
parser.add_argument("--lexicon-dir", type=str, default="./data/sentiment_lexicon", help="path to the dir containing sentiment lexicon.")
parser.add_argument("--tokenizer-name", type=str, default="bert-base-uncased", help="name of the tokenizer to use.")
parser.add_argument("--block_size", type=int, default=72, help="maximum length of the mask")
args = parser.parse_args()
positive_words = set()
with open(os.path.join(args.lexicon_dir, "positive-words.txt"), "r", encoding="ISO-8859-1") as f:
for line in f:
line = line.strip()
# skip the initial comments with ; and empty lines
if not line.startswith(";") and len(line) > 0:
positive_words.add(line.lower())
negative_words = set()
with open(os.path.join(args.lexicon_dir, "negative-words.txt"), "r", encoding="ISO-8859-1") as f:
for line in f:
line = line.strip()
# skip the initial comments with ; and empty lines
if not line.startswith(";") and len(line) > 0:
negative_words.add(line.lower())
salient_words = positive_words | negative_words
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name)
splits = ["train", "dev", "test"]
for split in splits:
with open(os.path.join(args.data_dir, f"{split}.lm"), "r") as f:
all_sens = [s.strip() for s in f.readlines()]
salient_word_masks = torch.zeros(len(all_sens), args.block_size, dtype=torch.bool)
total_word_count = 0
salient_word_count = 0
# Main loop that handles subword tokenization
for i, sen in tqdm(enumerate(all_sens), total=len(all_sens)):
words = sen.split()
curr_idx = 1 # skip the [CLS] token
total_word_count += len(words)
for word in words:
tokens = tokenizer.tokenize(word)
# Need to truncate SQuAD
if curr_idx + len(tokens) > args.block_size:
raise ValueError("Encountered examples longer than block size.")
if word in salient_words:
salient_word_count += 1
for j in range(len(tokens)):
salient_word_masks[i, curr_idx + j] = 1
curr_idx += len(tokens)
print(f"{(salient_word_count/total_word_count):.2%} salient words")
salient_pct = salient_word_masks.any(dim=1).sum().float() / len(all_sens)
print(f"{split} {salient_pct:.2%} documents have salient words")
torch.save(
salient_word_masks,
os.path.join(
args.data_dir,
f"cached_{split}_{args.tokenizer_name.replace('-', '_')}_{args.block_size}.sentiment_mask",
),
)
| 37.518519 | 136 | 0.66535 |
cd26104c6eb130ab45214eda4f1934869ef8a4f2
| 3,524 |
py
|
Python
|
src/data_reader.py
|
jazzsewera/mops-projekt
|
75924546eb73c266ba81e8e22c68ad939dea19d6
|
[
"MIT"
] | null | null | null |
src/data_reader.py
|
jazzsewera/mops-projekt
|
75924546eb73c266ba81e8e22c68ad939dea19d6
|
[
"MIT"
] | null | null | null |
src/data_reader.py
|
jazzsewera/mops-projekt
|
75924546eb73c266ba81e8e22c68ad939dea19d6
|
[
"MIT"
] | null | null | null |
from logger import Logger
from numpy import average
log = Logger(None)
| 24.136986 | 86 | 0.638763 |
cd2798a9ad4d90fcc9bb40c5df39c9d1117edd80
| 5,946 |
py
|
Python
|
fetch.py
|
kirillvarn/grocerycomparator-stat
|
861f90a2d5b4c2b52d89b6cdb574b722eae2327d
|
[
"MIT"
] | null | null | null |
fetch.py
|
kirillvarn/grocerycomparator-stat
|
861f90a2d5b4c2b52d89b6cdb574b722eae2327d
|
[
"MIT"
] | null | null | null |
fetch.py
|
kirillvarn/grocerycomparator-stat
|
861f90a2d5b4c2b52d89b6cdb574b722eae2327d
|
[
"MIT"
] | null | null | null |
import repo
import export.csv as csv
# CONSTANTS
milk_q = "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%1l%%' OR name ILIKE '%%1 l%%') AND (name ILIKE '%%piim %%' OR name ILIKE '%%piim,%%') AND name NOT ILIKE '%%juust%%' AND name NOT ILIKE '%%kohupiim%%' AND name NOT ILIKE '%%laktoos%%' AND name NOT ILIKE '%%tis%%' AND name NOT ILIKE '%%kookos%%' AND name NOT ILIKE '%%latte%%'"
wheat_kilos = 1
query_to_parse: dict = {
"milk": milk_q,
"cookies": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%kpsised %%' OR name ILIKE '%%kpsis %%') AND name NOT ILIKE '%%koer%%';",
"sugar": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%suhkur%%'",
#"rimi milk": f"{milk_q} AND shop ILIKE '%%rimi%%'",
#"other shop milk": f"{milk_q} AND shop NOT ILIKE '%%rimi%%'",
#"eggs": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%munad %%' OR name ILIKE '%%munad, %%' OR name ILIKE '%%muna,%%') AND name NOT ilike '%%salvrt%%' AND name NOT ILIKE '%%okolaad%%' AND name NOT ILIKE '%%Martsipani%%' AND name NOT ILIKE '%%SELVERI KK%%' AND name NOT ILIKE '%%kitkat%%'" ,
"wheat": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%{wheat_kilos}kg%%' OR name ILIKE '%%{wheat_kilos} kg%%') AND (name ILIKE '%%nisujahu %%' OR name ILIKE '%%nisujahu,%%')",
"beef": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%veise %%' OR name ILIKE '%%veisepraad%%' OR name ILIKE '%%lihaveise%%') AND name NOT ILIKE '%%koera%%' AND name NOT ILIKE '%%pelmeen%%' AND name NOT ILIKE '%%pltsama%%' AND name NOT ILIKE '%%sink%%'",
"tomatoes": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%tomat %%' OR name ILIKE '%%tomat, %%') AND name NOT ILIKE '%%pasta%%' AND name NOT ILIKE '%%0g%%' AND name NOT ILIKE '%%0 g%%' AND name NOT ILIKE '%%harilik%%' AND name NOT ILIKE '%%krpsud%%' AND name NOT ILIKE '%%marinaad%%' AND name NOT ILIKE '%%eine%%'",
#"cucumber": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%kg%%' AND (name ILIKE '%%kurk %%' OR name ILIKE '%%kurk,%%')",
#"banana": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%kg%%' OR name ILIKE '%%chiq%%') AND (name ILIKE '%%banaan %%' OR name ILIKE '%%banaan,%%')",
"apple": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%kg%%' AND (name ILIKE '%%un %%' OR name ILIKE '%%un,%%')",
"pear": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%kg%%' AND (name ILIKE '%%pirn %%' OR name ILIKE '%%pirn,%%')",
"pizza": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%pizza%%' OR name ILIKE '%%pitsa%%' AND name NOT ILIKE '%%pitsamaitseline%%')",
"pig meat": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%sea kaela%%' OR name ILIKE '%%sea vlisfilee%%' OR name ILIKE '%%sea sisefilee%%')",
"cake": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%kook,%%' OR name ILIKE '%%kook%%') AND name NOT ILIKE '%%van kook%%' AND name NOT ILIKE '%%selveri kk%%' AND name NOT ILIKE '%%kookos%%' AND name NOT LIKE '%%smuuti%%' AND name NOT ILIKE '%%pannkook%%'",
"chicken": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%broileri rinnafilee%%' OR name ILIKE '%%pooltiivad%%' OR name ILIKE '%%poolkoivad%%' OR name ILIKE '%%kanafilee%%' OR name ILIKE '%%broilerifilee%%') AND name NOT ILIKE '%%HAU-HAU%%'"
}
# def save_to_excel(dataset, sheet_name: str = "Sheet") -> None:
# tables = [i[0] for i in main.get_tables(main.connect(db="naive_products"))]
# # tables.remove("initial_products")
# header = ["Product name", "Shop name"] + tables
# data = []
# for item in dataset:
# prices = get_normalized_price(
# [dataset[item]["prices"][value]
# for value in dataset[item]["prices"]]
# )
# prices = get_trend(prices)
# value = [item, dataset[item]["shop"]] + prices
# data.append(value)
# table.append_header(header, sheet_name)
# table.put_data(data, sheet_name)
for i in query_to_parse:
products = get_products_by_name(query=query_to_parse[i])
save_to_csv(i, products)
| 58.871287 | 365 | 0.601245 |
cd27a3a7d166518d8d7678101792de0e23b578ef
| 1,755 |
py
|
Python
|
code1.py
|
roshangol/executed-path-visualize
|
1759c12b0048fe117205990b151d2f5f57ad9616
|
[
"MIT"
] | null | null | null |
code1.py
|
roshangol/executed-path-visualize
|
1759c12b0048fe117205990b151d2f5f57ad9616
|
[
"MIT"
] | null | null | null |
code1.py
|
roshangol/executed-path-visualize
|
1759c12b0048fe117205990b151d2f5f57ad9616
|
[
"MIT"
] | null | null | null |
# EX1
# if x < y:
# y = 0
# x = x + 1
# else:
# x = y
max(30, 28, 18)
# def triangleType(a, b, c):
# isATriangle = False
# if (a < b + c) and\
# (b < a + c) and\
# (c < a + b):
# isATriangle = True
# if isATriangle:
# if (a == b) and (b == c):
# print("the triangle was a EQUILATERAL")
# elif (a != b) and \
# (a != c) and \
# (b != c):
# print("the triangle was a SCALENE")
# else:
# print("invalid")
#
# triangleType(3, 5, 8)
# def testfunc(x, y):
# if x >= 0 and y >= 0:
# if y*y >= x*10 and y <= math.sin(math.radians(x*30))*25:
# if y >= math.cos(math.radians(x*40))*15:
# print('oooookk')
# testfunc(2, 3)
# EX2
# if (x < y):
# y = 0
# x = x + 1
# EX3
# if x < y:
# return
# print(x)
# return
# EX4
# x = 0
# while (x < y):
# y = f(x,y)
# x = x + 1
# EX5
# for x in range(10):
# y = f(x,y)
# a = [2 * x for x in y if x > 0 for y in z if y[0] < 3]
#
# digits = [0, 1, 5]
# a = 0
#
# for i in digits:
# a += i
# if i == 5:
# print("5 in list")
# break
# else:
# print("out of the loop")
# try:
# b = b + 5
# except KeyError:
# a += 1
# except ZeroDivisionError:
# a += 2
# else:
# a += 3
# finally:
# b += 1
# a = a - b
#
# x = 0
# while(x < y):
# y = f(x, y)
# if(y == 0):
# break
# elif(y < 0):
# y = y * 2
# continue
# x = x + 1
| 16.25 | 66 | 0.4 |
cd28f531641b97aa10ded06e3c6b7fdb2de0d2e7
| 1,193 |
py
|
Python
|
GameProject/dice.py
|
CreativeUsernameThatWontInsultAnyone/GameProject
|
998274e4587d93ff0564af174f4fc1e3a3e60174
|
[
"CC0-1.0"
] | 1 |
2021-11-13T17:14:03.000Z
|
2021-11-13T17:14:03.000Z
|
GameProject/dice.py
|
CreativeUsernameThatWontInsultAnyone/GameProject
|
998274e4587d93ff0564af174f4fc1e3a3e60174
|
[
"CC0-1.0"
] | null | null | null |
GameProject/dice.py
|
CreativeUsernameThatWontInsultAnyone/GameProject
|
998274e4587d93ff0564af174f4fc1e3a3e60174
|
[
"CC0-1.0"
] | null | null | null |
import random
import time
while (1):
wmsg = "Good morning!"
events = {
1 : "calm",
2 : "calm",
3 : "rainy",
4 : "rainy",
5 : "rainy",
6 : "thunder",
}
array = [1,2,3,4,5,6] ## Array used to get events or smth
output = random.choice(array)
defevent = events[output]
if defevent == "calm":
print(wmsg ,"It's a sunny day outside.")
clear()
elif defevent == "rainy":
print(wmsg, "You can hear the droplets falling onto your tent.")
clear()
else:
print(wmsg,"You hear thunder rumbling outside")
clear()
del array[output - 1]
if len(array) == 0: ##Array reset
array.append('1','2','3','4','5','6')
##Actually, we could throw out them specifics outta window and use it's skelly as
##our primary dice. def could take out the variables from other files and juggle them to our delight
break
| 28.404762 | 105 | 0.506287 |
cd2c1598eaae27b2b8504f6e96bc81711b260dde
| 774 |
py
|
Python
|
multivision/oa_image_io.py
|
olaals/tpktools
|
50416ca554809e3d2f364b25531c78cf4751311c
|
[
"MIT"
] | null | null | null |
multivision/oa_image_io.py
|
olaals/tpktools
|
50416ca554809e3d2f364b25531c78cf4751311c
|
[
"MIT"
] | null | null | null |
multivision/oa_image_io.py
|
olaals/tpktools
|
50416ca554809e3d2f364b25531c78cf4751311c
|
[
"MIT"
] | null | null | null |
import numpy as np
import OpenEXR as exr
import cv2
import Imath
import matplotlib.pyplot as plt
| 29.769231 | 90 | 0.630491 |
cd314573d937025d1a50953b27cb47b89f485e85
| 2,972 |
py
|
Python
|
yggdrasil/serialize/FunctionalSerialize.py
|
astro-friedel/yggdrasil
|
5ecbfd083240965c20c502b4795b6dc93d94b020
|
[
"BSD-3-Clause"
] | 22 |
2019-02-05T15:20:07.000Z
|
2022-02-25T09:00:40.000Z
|
yggdrasil/serialize/FunctionalSerialize.py
|
astro-friedel/yggdrasil
|
5ecbfd083240965c20c502b4795b6dc93d94b020
|
[
"BSD-3-Clause"
] | 48 |
2019-02-15T20:41:24.000Z
|
2022-03-16T20:52:02.000Z
|
yggdrasil/serialize/FunctionalSerialize.py
|
astro-friedel/yggdrasil
|
5ecbfd083240965c20c502b4795b6dc93d94b020
|
[
"BSD-3-Clause"
] | 16 |
2019-04-27T03:36:40.000Z
|
2021-12-02T09:47:06.000Z
|
from yggdrasil.serialize.SerializeBase import SerializeBase
| 43.072464 | 84 | 0.654778 |
cd3156dd0e4a0a15e50945d8d1d506c7eefae69c
| 2,046 |
py
|
Python
|
wordy_chat.py
|
thecodingchannel/wordy-discord-bot-tutorial
|
be70d237abcb302b6516f985ae900c61b598296a
|
[
"Apache-2.0"
] | null | null | null |
wordy_chat.py
|
thecodingchannel/wordy-discord-bot-tutorial
|
be70d237abcb302b6516f985ae900c61b598296a
|
[
"Apache-2.0"
] | null | null | null |
wordy_chat.py
|
thecodingchannel/wordy-discord-bot-tutorial
|
be70d237abcb302b6516f985ae900c61b598296a
|
[
"Apache-2.0"
] | 1 |
2022-03-09T04:55:56.000Z
|
2022-03-09T04:55:56.000Z
|
'''
This file is the glue between the Discord bot and the game logic.
'''
from wordle_logic import evaluate_guess, generate_new_word
from wordy_types import ActiveGame, EndResult, LetterState
def begin_game() -> ActiveGame:
"""
Begin a game for a user.
"""
# Select a word
answer = generate_new_word()
# Create and store new game state
new_game = ActiveGame(answer=answer)
return new_game
def enter_guess(guess: str, game: ActiveGame) -> EndResult:
"""
Enter a guess for a user's game, updating the game state.
>>> game=ActiveGame(answer="abcd")
>>> enter_guess("aaaa", game) == EndResult.PLAYING
True
>>> render_result(game.results[-1])
''
>>> game=ActiveGame(answer="abca")
>>> enter_guess("aaaz", game) == EndResult.PLAYING
True
>>> render_result(game.results[-1])
''
>>> game=ActiveGame(answer="abca")
>>> enter_guess("aaab", game) == EndResult.PLAYING
True
>>> render_result(game.results[-1])
''
"""
if game.state != EndResult.PLAYING:
return game.state
# Evaluate guess
result = tuple(evaluate_guess(guess, game.answer))
# Update game state
game.board_state.append(guess)
game.results.append(result)
# Check if game is over
if result == (LetterState.CORRECT,)*len(game.answer):
game.state = EndResult.WIN
elif len(game.board_state) > len(game.answer):
game.state = EndResult.LOSE
return game.state
def render_result(result: tuple[LetterState]) -> str:
"""
Render a result to a string.
>>> render_result((LetterState.ABSENT, LetterState.PRESENT, LetterState.CORRECT))
''
>>> render_result((LetterState.ABSENT,)*4)
''
"""
absent, present, correct = '', '', ''
return "".join(
absent if state == LetterState.ABSENT else
present if state == LetterState.PRESENT else correct
for state in result
)
| 26.230769 | 86 | 0.608504 |
cd318b68f4231a08be74b1a2c64d0b4969b29c51
| 2,422 |
py
|
Python
|
NNet/utils/readNNet.py
|
noyahoch/Marabou
|
03eb551498287e5372d462e3c2ad4fcc3210a5fa
|
[
"BSD-3-Clause"
] | 7 |
2020-01-27T21:25:49.000Z
|
2022-01-07T04:37:37.000Z
|
NNet/utils/readNNet.py
|
noyahoch/Marabou
|
03eb551498287e5372d462e3c2ad4fcc3210a5fa
|
[
"BSD-3-Clause"
] | 1 |
2022-01-25T17:41:54.000Z
|
2022-01-26T02:27:51.000Z
|
NNet/utils/readNNet.py
|
noyahoch/Marabou
|
03eb551498287e5372d462e3c2ad4fcc3210a5fa
|
[
"BSD-3-Clause"
] | 3 |
2020-03-14T17:12:17.000Z
|
2022-03-16T09:50:46.000Z
|
import numpy as np
def readNNet(nnetFile, withNorm=False):
'''
Read a .nnet file and return list of weight matrices and bias vectors
Inputs:
nnetFile: (string) .nnet file to read
withNorm: (bool) If true, return normalization parameters
Returns:
weights: List of weight matrices for fully connected network
biases: List of bias vectors for fully connected network
'''
# Open NNet file
f = open(nnetFile,'r')
# Skip header lines
line = f.readline()
while line[:2]=="//":
line = f.readline()
# Extract information about network architecture
record = line.split(',')
numLayers = int(record[0])
inputSize = int(record[1])
line = f.readline()
record = line.split(',')
layerSizes = np.zeros(numLayers+1,'int')
for i in range(numLayers+1):
layerSizes[i]=int(record[i])
# Skip extra obsolete parameter line
f.readline()
# Read the normalization information
line = f.readline()
inputMins = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
inputMaxes = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
means = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
ranges = [float(x) for x in line.strip().split(",")[:-1]]
# Initialize list of weights and biases
weights = [np.zeros((layerSizes[i],layerSizes[i+1])) for i in range(numLayers)]
biases = [np.zeros(layerSizes[i+1]) for i in range(numLayers)]
# Read remainder of file and place each value in the correct spot in a weight matrix or bias vector
layer=0
i=0
j=0
line = f.readline()
record = line.split(',')
while layer+1 < len(layerSizes):
while i<layerSizes[layer+1]:
while record[j]!="\n":
weights[layer][j,i] = float(record[j])
j+=1
j=0
i+=1
line = f.readline()
record = line.split(',')
i=0
while i<layerSizes[layer+1]:
biases[layer][i] = float(record[0])
i+=1
line = f.readline()
record = line.split(',')
layer+=1
i=0
j=0
f.close()
if withNorm:
return weights, biases, inputMins, inputMaxes, means, ranges
return weights, biases
| 27.83908 | 103 | 0.562758 |
cd336f08882633e139c7b8cf8e6bbf9503123d24
| 13,668 |
py
|
Python
|
models/model.py
|
hearai/hearai
|
2f2bc2923fa2bb170d9ed895c3f638e99811442f
|
[
"MIT"
] | 16 |
2021-12-16T20:19:31.000Z
|
2022-03-19T15:59:23.000Z
|
models/model.py
|
hearai/hearai
|
2f2bc2923fa2bb170d9ed895c3f638e99811442f
|
[
"MIT"
] | 34 |
2021-12-21T19:33:31.000Z
|
2022-03-31T19:04:39.000Z
|
models/model.py
|
hearai/hearai
|
2f2bc2923fa2bb170d9ed895c3f638e99811442f
|
[
"MIT"
] | 5 |
2021-12-18T22:35:20.000Z
|
2022-02-20T12:26:39.000Z
|
from typing import Dict
import neptune.new as neptune
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from config import NEPTUNE_API_TOKEN, NEPTUNE_PROJECT_NAME
from sklearn.metrics import classification_report, f1_score
from utils.summary_loss import SummaryLoss
from math import ceil
from models.feature_extractors.multi_frame_feature_extractor import (
MultiFrameFeatureExtractor,
)
from models.model_loader import ModelLoader
from models.common.simple_sequential_model import SimpleSequentialModel
from models.landmarks_models.lanmdarks_sequential_model import LandmarksSequentialModel
from models.head_models.head_sequential_model import HeadClassificationSequentialModel
# initialize neptune logging
| 41.92638 | 136 | 0.592479 |
cd33abe036b992ac7ac194a0541c5439617437c4
| 2,305 |
py
|
Python
|
solutions/day09/solution.py
|
dbjohnson/advent-of-code-2021
|
2ed1d30362afa0a73c890730cea46de3291be21f
|
[
"MIT"
] | null | null | null |
solutions/day09/solution.py
|
dbjohnson/advent-of-code-2021
|
2ed1d30362afa0a73c890730cea46de3291be21f
|
[
"MIT"
] | null | null | null |
solutions/day09/solution.py
|
dbjohnson/advent-of-code-2021
|
2ed1d30362afa0a73c890730cea46de3291be21f
|
[
"MIT"
] | null | null | null |
from functools import lru_cache
from collections import defaultdict
import pandas as pd
import numpy as np
with open('input.txt') as fh:
depthmap = pd.DataFrame([{
'row': row,
'col': col,
'height': int(d)
}
for row, line in enumerate(fh)
for col, d in enumerate(line.strip())
]).pivot_table(
index='row',
columns='col',
values='height'
).values
idx = (
# right neighbor
np.pad(
depthmap[:, :-1] < depthmap[:, 1:],
((0, 0), (0, 1)),
'constant',
constant_values=1
) &
# left neighbor
np.pad(
depthmap[:, 1:] < depthmap[:, :-1],
((0, 0), (1, 0)),
'constant',
constant_values=1
) &
# lower neighbor
np.pad(
depthmap[:-1, :] < depthmap[1:, :],
((0, 1), (0, 0)),
'constant',
constant_values=1
) &
# upper neighbor
np.pad(
depthmap[1:, :] < depthmap[:-1, :],
((1, 0), (0, 0)),
'constant',
constant_values=1
)
)
print('part 1', (depthmap[np.where(idx)] + 1).sum())
# lru_cache here is essentially cheap DP - once we've calculated
# the basin for any point A, we know the basin for any point B that
# flows through point A
lowpoint_to_basin = defaultdict(list)
for r in range(depthmap.shape[0]):
for c in range(depthmap.shape[1]):
lowpoint_to_basin[lowpoint(r, c)].append((r, c))
print(
'part 2',
np.prod(sorted([
len(points)
for basin, points in lowpoint_to_basin.items()
if basin
])[-3:])
)
# part 1 now that we solved part 2...
print(
'part 1 redux',
sum([
depthmap[lowpoint] + 1
for lowpoint in lowpoint_to_basin
if lowpoint
])
)
| 21.342593 | 67 | 0.516269 |
cd3470135bfe7a2b8866c6a268c9e629dad7a8b7
| 3,467 |
py
|
Python
|
docs/conf.py
|
ocefpaf/pystac-client
|
ddf0e0566b2b1783a4d32d3d77f9f51b80270df3
|
[
"Apache-2.0"
] | 52 |
2021-04-15T23:24:12.000Z
|
2022-03-09T23:02:27.000Z
|
docs/conf.py
|
ocefpaf/pystac-client
|
ddf0e0566b2b1783a4d32d3d77f9f51b80270df3
|
[
"Apache-2.0"
] | 119 |
2021-04-13T11:42:01.000Z
|
2022-02-24T10:02:35.000Z
|
docs/conf.py
|
ocefpaf/pystac-client
|
ddf0e0566b2b1783a4d32d3d77f9f51b80270df3
|
[
"Apache-2.0"
] | 14 |
2021-04-13T19:00:19.000Z
|
2022-02-23T09:17:30.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import re
import subprocess
import sys
from pathlib import Path
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, str(Path(__file__).parent.parent.parent.resolve()))
from pystac_client import __version__ # noqa: E402
git_branch = (
subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
.decode("utf-8")
.strip()
)
# -- Project information -----------------------------------------------------
project = 'pystac-client'
copyright = '2021, Jon Duckworth'
author = 'Matthew Hanson, Jon Duckworth'
github_user = 'stac-utils'
github_repo = 'pystac-client'
package_description = 'A Python client for the STAC and STAC-API specs'
# The full version, including alpha/beta/rc tags
version = re.fullmatch(r'^(\d+\.\d+\.\d).*$', __version__).group(1)
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon',
'sphinx.ext.extlinks', 'sphinxcontrib.fulltoc', 'nbsphinx', 'myst_parser'
]
extlinks = {
"tutorial": (
"https://github.com/stac-utils/pystac-client/"
"tree/{}/docs/tutorials/%s".format(git_branch),
"tutorial",
)
}
nbsphinx_allow_errors = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
source_suffix = [".rst", "*.md", "*.ipynb"]
exclude_patterns = ['build/*']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme_options = {
# 'sidebar_collapse': False,
'fixed_sidebar': True,
'github_button': True,
'github_user': github_user,
'github_repo': github_repo,
'description': package_description
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'requests': ('https://requests.readthedocs.io/en/master', None),
'pystac': ('https://pystac.readthedocs.io/en/latest', None),
'dateutil': ('https://dateutil.readthedocs.io/en/stable/', None),
}
# -- Options for autodoc extension -------------------------------------------
autodoc_typehints = "none"
| 33.660194 | 97 | 0.654168 |
cd358b914861d3a881968cfd805aae9c0f7bed42
| 1,345 |
py
|
Python
|
modules/lexer/token.py
|
DavidMacDonald11/sea-to-c-transpiler-python-based
|
20c41931346b13d4bf2a12e96037f44b1add8145
|
[
"MIT"
] | null | null | null |
modules/lexer/token.py
|
DavidMacDonald11/sea-to-c-transpiler-python-based
|
20c41931346b13d4bf2a12e96037f44b1add8145
|
[
"MIT"
] | 11 |
2021-04-22T13:09:34.000Z
|
2022-01-29T22:53:58.000Z
|
modules/lexer/token.py
|
DavidMacDonald11/sea-to-c-transpiler-python-based
|
20c41931346b13d4bf2a12e96037f44b1add8145
|
[
"MIT"
] | null | null | null |
from .token_types import TT
from .token_types import BadTT
from .position import Position
from .keywords import is_keyword
from .keywords import keyword_declared_type
from ..lexer import errors
| 33.625 | 80 | 0.665428 |
cd36eb6513428b0c0f981f91eaea0aa21154992a
| 689 |
py
|
Python
|
cb_scripts/nums_square_cube.py
|
christopher-burke/python-scripts
|
bdbea2456130e0958b6a6ab8d138f4f19b39b934
|
[
"MIT"
] | 1 |
2022-02-05T06:39:05.000Z
|
2022-02-05T06:39:05.000Z
|
cb_scripts/nums_square_cube.py
|
christopher-burke/python-scripts
|
bdbea2456130e0958b6a6ab8d138f4f19b39b934
|
[
"MIT"
] | null | null | null |
cb_scripts/nums_square_cube.py
|
christopher-burke/python-scripts
|
bdbea2456130e0958b6a6ab8d138f4f19b39b934
|
[
"MIT"
] | 1 |
2021-06-10T22:04:35.000Z
|
2021-06-10T22:04:35.000Z
|
#!/usr/bin/env python3
"""Squares and Cubes for a range of numbers.
Given a start and end, calucate the Square x**2 and
the Cube x**3 for all numbers.
Example of generator and functools.partial.
"""
from functools import partial
def power(base, exponent):
"""Raise a base to the exponent."""
return base ** exponent
square = partial(power, exponent=2)
cube = partial(power, exponent=3)
if __name__ == "__main__":
print("number\tsquare\tcube")
for x in main(1, 10):
print("{}\t{}\t{}".format(*x))
| 20.264706 | 63 | 0.651669 |
cd36ecd76329e8d74ce6fdd1bc24ac05a02cc921
| 101 |
py
|
Python
|
Darlington/phase2/LIST/day 41 solution/qtn2.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6 |
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Darlington/phase2/LIST/day 41 solution/qtn2.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8 |
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Darlington/phase2/LIST/day 41 solution/qtn2.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39 |
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
#program to find the index of an item in a specified list.
num =[10, 30, 4, -6]
print(num.index(30))
| 25.25 | 58 | 0.683168 |
cd36fd075f7cd95707b64e346e7a7db96e365eac
| 1,748 |
py
|
Python
|
mozdns/txt/tests.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 22 |
2015-01-16T01:36:32.000Z
|
2020-06-08T00:46:18.000Z
|
mozdns/txt/tests.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 8 |
2015-12-28T18:56:19.000Z
|
2019-04-01T17:33:48.000Z
|
mozdns/txt/tests.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 13 |
2015-01-13T20:56:22.000Z
|
2022-02-23T06:01:17.000Z
|
from django.test import TestCase
from django.core.exceptions import ValidationError
from mozdns.txt.models import TXT
from mozdns.domain.models import Domain
| 29.627119 | 69 | 0.587529 |
cd39f1397ad328542fed8bb62d6c47dc4c191597
| 6,698 |
py
|
Python
|
xtesting/tests/unit/core/test_behaveframework.py
|
collivier/functest-xtesting
|
17739d718901a10f7ec0aaf9a6d53141294a347d
|
[
"Apache-2.0"
] | 1 |
2020-05-15T12:58:58.000Z
|
2020-05-15T12:58:58.000Z
|
xtesting/tests/unit/core/test_behaveframework.py
|
collivier/functest-xtesting
|
17739d718901a10f7ec0aaf9a6d53141294a347d
|
[
"Apache-2.0"
] | null | null | null |
xtesting/tests/unit/core/test_behaveframework.py
|
collivier/functest-xtesting
|
17739d718901a10f7ec0aaf9a6d53141294a347d
|
[
"Apache-2.0"
] | 3 |
2018-02-28T15:55:14.000Z
|
2022-02-24T15:46:12.000Z
|
#!/usr/bin/env python
# Copyright (c) 2019 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""Define the classes required to fully cover behave."""
import logging
import os
import unittest
import mock
from xtesting.core import behaveframework
__author__ = "Deepak Chandella <[email protected]>"
def test_parse_results_exc_console(self):
self.test_parse_results_exc(console=True)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
| 37.418994 | 77 | 0.640042 |
cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803c
| 963 |
py
|
Python
|
data/test/python/cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803cdjango.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84 |
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/test/python/cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803cdjango.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5 |
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/test/python/cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803cdjango.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24 |
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
# coding=utf-8
from fabric.api import env, run
COMMAND_COLLECTSTATIC = 'collectstatic'
COMMAND_SYNCDB = 'syncdb'
COMMAND_MIGRATE = 'migrate'
_default_command = '{python} {manage} {command}'
_commands_list = {
COMMAND_COLLECTSTATIC: 'yes yes | {python} {manage} {command}',
COMMAND_MIGRATE: '{python} {manage} {command} --noinput',
}
| 24.075 | 67 | 0.677051 |
cd3da08c421072d75aa5562437930fcd09889489
| 8,820 |
py
|
Python
|
commercialoperator/components/bookings/utils.py
|
wilsonc86/ledger
|
a60a681e547f37e4ac81cb93dffaf90aea8c8151
|
[
"Apache-2.0"
] | null | null | null |
commercialoperator/components/bookings/utils.py
|
wilsonc86/ledger
|
a60a681e547f37e4ac81cb93dffaf90aea8c8151
|
[
"Apache-2.0"
] | null | null | null |
commercialoperator/components/bookings/utils.py
|
wilsonc86/ledger
|
a60a681e547f37e4ac81cb93dffaf90aea8c8151
|
[
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
from django.core.exceptions import ValidationError
from datetime import datetime, timedelta
from commercialoperator.components.main.models import Park
from commercialoperator.components.proposals.models import Proposal
from ledger.checkout.utils import create_basket_session, create_checkout_session, calculate_excl_gst
from ledger.payments.models import Invoice
from ledger.payments.utils import oracle_parser
import json
from decimal import Decimal
from commercialoperator.components.bookings.models import Booking, ParkBooking, ApplicationFee
import logging
logger = logging.getLogger('payment_checkout')
def create_booking(request, proposal_id):
""" Create the ledger lines - line items for invoice sent to payment system """
#import ipdb; ipdb.set_trace()
booking = Booking.objects.create(proposal_id=proposal_id)
tbody = json.loads(request.POST['payment'])['tbody']
for row in tbody:
park_id = row[0]['value']
arrival = row[1]
no_adults = int(row[2]) if row[2] else 0
no_children = int(row[3]) if row[3] else 0
no_free_of_charge = int(row[4]) if row[4] else 0
park = Park.objects.get(id=park_id)
if any([no_adults, no_children, no_free_of_charge]) > 0:
park_booking = ParkBooking.objects.create(
booking = booking,
park_id = park_id,
arrival = datetime.strptime(arrival, '%Y-%m-%d').date(),
no_adults = no_adults,
no_children = no_children,
no_free_of_charge = no_free_of_charge,
cost = no_adults*park.adult_price + no_children*park.child_price
)
if not park_booking:
raise ValidationError('Must have at least one person visiting the park')
return booking
def get_session_application_invoice(session):
""" Application Fee session ID """
if 'cols_app_invoice' in session:
application_fee_id = session['cols_app_invoice']
else:
raise Exception('Application not in Session')
try:
#return Invoice.objects.get(id=application_invoice_id)
#return Proposal.objects.get(id=proposal_id)
return ApplicationFee.objects.get(id=application_fee_id)
except Invoice.DoesNotExist:
raise Exception('Application not found for application {}'.format(application_fee_id))
def set_session_application_invoice(session, application_fee):
""" Application Fee session ID """
session['cols_app_invoice'] = application_fee.id
session.modified = True
def delete_session_application_invoice(session):
""" Application Fee session ID """
if 'cols_app_invoice' in session:
del session['cols_app_invoice']
session.modified = True
def create_fee_lines(proposal, invoice_text=None, vouchers=[], internal=False):
""" Create the ledger lines - line item for application fee sent to payment system """
#import ipdb; ipdb.set_trace()
now = datetime.now().strftime('%Y-%m-%d %H:%M')
price = proposal.application_type.application_fee
line_items = [{
'ledger_description': 'Application Fee - {} - {}'.format(now, proposal.lodgement_number),
'oracle_code': proposal.application_type.oracle_code,
'price_incl_tax': price,
'price_excl_tax': price if proposal.application_type.is_gst_exempt else calculate_excl_gst(price),
'quantity': 1,
}]
logger.info('{}'.format(line_items))
return line_items
def create_lines(request, invoice_text=None, vouchers=[], internal=False):
""" Create the ledger lines - line items for invoice sent to payment system """
#import ipdb; ipdb.set_trace()
lines = []
tbody = json.loads(request.POST['payment'])['tbody']
for row in tbody:
park_id = row[0]['value']
arrival = row[1]
no_adults = int(row[2]) if row[2] else 0
no_children = int(row[3]) if row[3] else 0
no_free_of_charge = int(row[4]) if row[4] else 0
park= Park.objects.get(id=park_id)
if no_adults > 0:
lines.append(add_line_item(park, arrival, 'Adult', price=park.adult_price, no_persons=no_adults))
if no_children > 0:
lines.append(add_line_item(park, arrival, 'Child', price=park.child_price, no_persons=no_children))
if no_free_of_charge > 0:
lines.append(add_line_item(park, arrival, 'Free', price=0.0, no_persons=no_free_of_charge))
return lines
| 40.090909 | 175 | 0.675057 |
cd3eb5a68afae376fb0cdba9c6455dc19c74e74f
| 3,552 |
py
|
Python
|
src/application/models.py
|
Chitrank-Dixit/WikiG
|
74d99a16afc635a991c17de8d237eb4f6eccbe86
|
[
"CNRI-Python"
] | 1 |
2015-11-05T03:51:44.000Z
|
2015-11-05T03:51:44.000Z
|
src/application/models.py
|
Chitrank-Dixit/WikiG
|
74d99a16afc635a991c17de8d237eb4f6eccbe86
|
[
"CNRI-Python"
] | null | null | null |
src/application/models.py
|
Chitrank-Dixit/WikiG
|
74d99a16afc635a991c17de8d237eb4f6eccbe86
|
[
"CNRI-Python"
] | null | null | null |
"""
models.py
App Engine datastore models
Documentation: https://developers.google.com/appengine/docs/python/ndb/entities
"""
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import users
import functools
import flask
from flaskext import login
from flaskext.login import current_user
from flaskext import oauth
from hashlib import md5
import util
import model
import config
from application import app
import urls
# from application.metadata import Session, Base
################################################################################
# Flaskext Login
################################################################################
login_manager = login.LoginManager()
login_manager.anonymous_user = AnonymousUser
login_manager.init_app(app)
login_manager.login_view = 'signin'
| 23.368421 | 155 | 0.677928 |
cd3ebb35376a9ad6bb35907b043a70f74ff3d06d
| 2,488 |
py
|
Python
|
driver.py
|
Nobregaigor/Robot-path-tracking-and-obstacle-avoidance-simulation
|
23ab060316c5978724b3f109d851ea33206d0e10
|
[
"MIT"
] | 6 |
2020-05-01T23:33:13.000Z
|
2021-12-18T08:13:50.000Z
|
driver.py
|
Nobregaigor/Robot-path-tracking-and-obstacle-avoidance-simulation--Python
|
23ab060316c5978724b3f109d851ea33206d0e10
|
[
"MIT"
] | null | null | null |
driver.py
|
Nobregaigor/Robot-path-tracking-and-obstacle-avoidance-simulation--Python
|
23ab060316c5978724b3f109d851ea33206d0e10
|
[
"MIT"
] | 2 |
2020-05-06T11:54:10.000Z
|
2020-07-30T01:58:06.000Z
|
import pygame
import math
import path_planning as pp
| 35.542857 | 123 | 0.63545 |
cd423af6c5271daa0eac7f6a8ca5e2cf87ffc2fe
| 2,752 |
py
|
Python
|
test/test_api_v1_module.py
|
feizhihui/deepnlp
|
cc6647d65ec39aadd35e4a4748da92df5b79bd48
|
[
"MIT"
] | null | null | null |
test/test_api_v1_module.py
|
feizhihui/deepnlp
|
cc6647d65ec39aadd35e4a4748da92df5b79bd48
|
[
"MIT"
] | null | null | null |
test/test_api_v1_module.py
|
feizhihui/deepnlp
|
cc6647d65ec39aadd35e4a4748da92df5b79bd48
|
[
"MIT"
] | 1 |
2019-05-13T14:24:15.000Z
|
2019-05-13T14:24:15.000Z
|
#coding:utf-8
'''
Demo for calling API of deepnlp.org web service
Anonymous user of this package have limited access on the number of API calling 100/day
Please Register and Login Your Account to deepnlp.org to get unlimited access to fully support
api_service API module, now supports both windows and linux platforms.
'''
from __future__ import unicode_literals
import json, requests, sys, os
if (sys.version_info>(3,0)): from urllib.parse import quote
else : from urllib import quote
from deepnlp import api_service
login = api_service.init() # registration, if failed, load default empty login {} with limited access
login = {} # use your personal login {'username': 'your_user_name' , 'password': 'your_password'}
conn = api_service.connect(login) # save the connection with login cookies
# API Setting
text = ("").encode('utf-8') # convert text from unicode to utf-8 bytes, quote() function
# Segmentation
url_segment = "http://www.deepnlp.org/api/v1.0/segment/?" + "lang=zh" + "&text=" + quote(text)
web = requests.get(url_segment, cookies = conn)
tuples = json.loads(web.text)
wordsList = tuples['words'] # segmentation json {'words', [w1, w2,...]} return list
print ("Segmentation API:")
print (" ".join(wordsList).encode("utf-8"))
# POS tagging
url_pos = "http://www.deepnlp.org/api/v1.0/pos/?"+ "lang=zh" + "&text=" + quote(text)
web = requests.get(url_pos, cookies = conn)
tuples = json.loads(web.text)
pos_str = tuples['pos_str'] # POS json {'pos_str', 'w1/t1 w2/t2'} return string
print ("POS API:")
print (pos_str.encode("utf-8"))
# NER tagging
url_ner = "http://www.deepnlp.org/api/v1.0/ner/?" + "lang=zh" + "&text=" + quote(text)
web = requests.get(url_ner, cookies = conn)
tuples = json.loads(web.text)
ner_str = tuples['ner_str'] # NER json {'ner_str', 'w1/t1 w2/t2'} return list
print ("NER API:")
print (ner_str.encode("utf-8"))
# Pipeline
annotators = "segment,pos,ner"
url_pipeline = "http://www.deepnlp.org/api/v1.0/pipeline/?" + "lang=zh" + "&text=" + quote(text) + "&annotators=" + quote(annotators)
web = requests.get(url_pipeline, cookies = conn)
tuples = json.loads(web.text)
segment_str = tuples['segment_str'] # segment module
pos_str = tuples['pos_str'] # pos module
ner_str = tuples['ner_str'] # ner module
ner_json = tuples['ner_json'] # ner result in json
# output
print ("Pipeline API:")
print (segment_str.encode("utf-8"))
print (pos_str.encode("utf-8"))
print (ner_str.encode("utf-8"))
print ("NER JSON:")
print (json_to_str(ner_json).encode("utf-8"))
| 38.222222 | 133 | 0.682776 |
cd448e3854b74fee56a6672cdae1ce1e148e593d
| 1,195 |
py
|
Python
|
spring_cloud/commons/client/loadbalancer/round_robin.py
|
haribo0915/Spring-Cloud-in-Python
|
0bcd7093869c797df14428bf2d1b0a779f96e573
|
[
"Apache-2.0"
] | 5 |
2020-10-06T09:48:23.000Z
|
2020-10-07T13:19:46.000Z
|
spring_cloud/commons/client/loadbalancer/round_robin.py
|
haribo0915/Spring-Cloud-in-Python
|
0bcd7093869c797df14428bf2d1b0a779f96e573
|
[
"Apache-2.0"
] | 5 |
2020-10-05T09:57:01.000Z
|
2020-10-12T19:52:48.000Z
|
spring_cloud/commons/client/loadbalancer/round_robin.py
|
haribo0915/Spring-Cloud-in-Python
|
0bcd7093869c797df14428bf2d1b0a779f96e573
|
[
"Apache-2.0"
] | 8 |
2020-10-05T06:34:49.000Z
|
2020-10-07T13:19:46.000Z
|
# -*- coding: utf-8 -*-
"""
The built-in Round-Robin algorithm.
"""
# standard library
from typing import Union
# scip plugin
from spring_cloud.commons.client.service_instance import ServiceInstance
from spring_cloud.utils.atomic import AtomicInteger
from .loadbalancer import LoadBalancer
from .supplier import ServiceInstanceListSupplier
__author__ = "Waterball ([email protected])"
__license__ = "Apache 2.0"
| 30.641026 | 88 | 0.738075 |
cd46541bba89d45678808a7b911ed3c9f61dd510
| 4,245 |
py
|
Python
|
utils/dataset_utils.py
|
dpaiton/DeepSparseCoding
|
5ea01fa8770794df5e13743aa3f2d85297c27eb1
|
[
"MIT"
] | 12 |
2017-04-27T17:19:31.000Z
|
2021-11-07T03:37:59.000Z
|
utils/dataset_utils.py
|
dpaiton/DeepSparseCoding
|
5ea01fa8770794df5e13743aa3f2d85297c27eb1
|
[
"MIT"
] | 12 |
2018-03-21T01:16:25.000Z
|
2022-02-10T00:21:58.000Z
|
utils/dataset_utils.py
|
dpaiton/DeepSparseCoding
|
5ea01fa8770794df5e13743aa3f2d85297c27eb1
|
[
"MIT"
] | 12 |
2017-02-01T19:49:57.000Z
|
2021-12-08T03:16:58.000Z
|
import os
import sys
import numpy as np
import torch
from torchvision import datasets, transforms
ROOT_DIR = os.path.dirname(os.getcwd())
if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR)
import DeepSparseCoding.utils.data_processing as dp
import DeepSparseCoding.datasets.synthetic as synthetic
| 42.878788 | 107 | 0.660306 |
cd4856841cf209c6c31d8cf4b1d4a02e1669fe87
| 1,051 |
py
|
Python
|
adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/master_definitions/led_interface.py
|
ArrowElectronics/Vital-Signs-Monitoring
|
ba43fe9a116d94170561433910fd7bffba5726e7
|
[
"Unlicense"
] | 5 |
2021-06-13T17:11:19.000Z
|
2021-12-01T18:20:38.000Z
|
adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/master_definitions/led_interface.py
|
ArrowElectronics/Vital-Signs-Monitoring
|
ba43fe9a116d94170561433910fd7bffba5726e7
|
[
"Unlicense"
] | null | null | null |
adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/master_definitions/led_interface.py
|
ArrowElectronics/Vital-Signs-Monitoring
|
ba43fe9a116d94170561433910fd7bffba5726e7
|
[
"Unlicense"
] | 1 |
2022-01-08T15:01:44.000Z
|
2022-01-08T15:01:44.000Z
|
#!/usr/bin/env python3
from ctypes import *
import m2m2_core
| 30.028571 | 53 | 0.726927 |
cd485ea8847607e1b8262b17b33a7d95c7b05c48
| 2,327 |
py
|
Python
|
src/empirical_study.py
|
arshajithwolverine/Recommentation-System_KGNN-LS
|
82ad10633a56794bbc38dc7e6c40a3636c7d570a
|
[
"MIT"
] | 133 |
2019-06-20T08:38:04.000Z
|
2022-03-30T07:57:14.000Z
|
src/empirical_study.py
|
piaofu110/KGNN-LS
|
3afd76361b623e9e38b822861c79bcd61dae41aa
|
[
"MIT"
] | 10 |
2019-07-06T12:53:01.000Z
|
2021-11-10T12:58:50.000Z
|
src/empirical_study.py
|
piaofu110/KGNN-LS
|
3afd76361b623e9e38b822861c79bcd61dae41aa
|
[
"MIT"
] | 40 |
2019-08-07T06:02:31.000Z
|
2022-01-05T15:19:29.000Z
|
import networkx as nx
import numpy as np
import argparse
if __name__ == '__main__':
np.random.seed(555)
NUM = 10000
parser = argparse.ArgumentParser()
parser.add_argument('-d', type=str, default='music')
args = parser.parse_args()
DATASET = args.d
kg_np = np.load('../data/' + DATASET + '/kg_final.npy')
kg = nx.Graph()
kg.add_edges_from([(triple[0], triple[2]) for triple in kg_np]) # construct knowledge graph
rating_np = np.load('../data/' + DATASET + '/ratings_final.npy')
item_history = dict()
item_set = set()
for record in rating_np:
user = record[0]
item = record[1]
rating = record[2]
if rating == 1:
if item not in item_history:
item_history[item] = set()
item_history[item].add(user)
item_set.add(item)
item_pair_num_no_common_rater = 0
item_pair_num_with_common_rater = 0
sp_no_common_rater = dict()
sp_with_common_rater = dict()
while True:
item1, item2 = np.random.choice(list(item_set), size=2, replace=False)
if item_pair_num_no_common_rater == NUM and item_pair_num_with_common_rater == NUM:
break
if item_pair_num_no_common_rater < NUM and len(item_history[item1] & item_history[item2]) == 0:
item_pair_num_no_common_rater += 1
if not nx.has_path(kg, item1, item2):
sp = 'infinity'
else:
sp = nx.shortest_path_length(kg, item1, item2)
if sp not in sp_no_common_rater:
sp_no_common_rater[sp] = 0
sp_no_common_rater[sp] += 1
print(item_pair_num_no_common_rater, item_pair_num_with_common_rater)
if item_pair_num_with_common_rater < NUM and len(item_history[item1] & item_history[item2]) > 0:
item_pair_num_with_common_rater += 1
if not nx.has_path(kg, item1, item2):
sp = 'infinity'
else:
sp = nx.shortest_path_length(kg, item1, item2)
if sp not in sp_with_common_rater:
sp_with_common_rater[sp] = 0
sp_with_common_rater[sp] += 1
print(item_pair_num_no_common_rater, item_pair_num_with_common_rater)
print(sp_no_common_rater)
print(sp_with_common_rater)
| 36.359375 | 104 | 0.621401 |
cd48bacc37dd1b8304c3c30daa2f346ee7aa4309
| 6,317 |
py
|
Python
|
Survival_Pygame/data.py
|
Lily-Li828/effective-octo-fiesta
|
4dbfeaec6158141bb03005aa25240dd337694ee3
|
[
"Apache-2.0"
] | null | null | null |
Survival_Pygame/data.py
|
Lily-Li828/effective-octo-fiesta
|
4dbfeaec6158141bb03005aa25240dd337694ee3
|
[
"Apache-2.0"
] | null | null | null |
Survival_Pygame/data.py
|
Lily-Li828/effective-octo-fiesta
|
4dbfeaec6158141bb03005aa25240dd337694ee3
|
[
"Apache-2.0"
] | null | null | null |
import pygame
from pygame.locals import*
from pygame import mixer
pygame.init()
# loading in background image
backgroundClassic_image=pygame.image.load('image/WallPaper.png')
backgroundAncient_image=pygame.image.load('image/WallPaper2.png')
# loading in player image
player_imageClassic=pygame.image.load('image/player.png')
player_imageAncient=pygame.image.load('image/player2.png')
player_imageClassicR=pygame.image.load('image/playerR.png')
player_imageAncientR=pygame.image.load('image/player2R.png')
#loading sound for bullet
BulletSound=mixer.Sound('sound/bullet.wav')
#Loading sound for collision with enemy:
CollidewithEnemy=mixer.Sound('sound/Collide.wav')
#Loading sound for opening of game:
Opening_Sound=mixer.Sound('sound/opening.wav')
Mouse_Sound=mixer.Sound('sound/mouseclick.wav')
Selection_Sound=mixer.Sound('sound/selection.wav')
#loading sound for end of game:
End_GameSound=mixer.Sound('sound/gameover.wav')
#loading sound for win game:
Win_GameSound=mixer.Sound('sound/wingame.wav')
Door_GameSound=mixer.Sound('sound/doorappear.wav')
#Loading in image for opening animation:
Opening_Image= [pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening1.png'),
pygame.image.load('image/opening1.png'),pygame.image.load('image/opening1.png'),
pygame.image.load('image/opening.png')]
#loading in image for opening game mode selection:
OpeningSelect_BG=pygame.image.load('image/ModeSelection.png')
ClassicMode_image=pygame.image.load('image/ClassicMode.png')
AncientMode_image=pygame.image.load('image/AncientMode.png')
Glow_image=pygame.image.load('image/glow.png')
#Loading image for win game:
Won_Light=pygame.image.load('image/light.png')
Won_Door=pygame.image.load('image/door.png')
#Loading win game page:
Escape_image=pygame.image.load('image/Wingame.png')
#loading in image:
direction_key=pygame.image.load('image/direction1.png')
direction_arrow=pygame.image.load('image/direction2.png')
#loading in endgame page:
End_image=pygame.image.load('image/gameover.png')
# load in image of platform
platformClassic_img= pygame.image.load('image/icicle.png')
platformAncient_img=pygame.image.load('image/brickwall.png')
#Game map for two different game modes:
Classic_map = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
Ancient_map=[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,1,1,1,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,0,1,1,1,1,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,0,1,1,0,0,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,0,0,1,0,1,0,0,1,1,0,0,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,0,0,1,0,1,0,0,1,1,0,0,0,0,0]]
#Upload font type:
fontd1= pygame.font.Font('font/Pieces.ttf',32)
fontd2= pygame.font.Font('font/OldeEnglish.ttf',18)
fontdO= pygame.font.Font('font/Opening.ttf',28) # Font (Opening)
fontdS= pygame.font.Font('font/Pieces.ttf',30) # Font (For Game Mode Selection)
| 54.930435 | 114 | 0.625139 |
cd4c852bd50c3ecb65653b4479673255f18bc5fa
| 8,170 |
py
|
Python
|
test/filters_iso_test.py
|
todofixthis/filters-iso
|
da6052b49a2f71a3b3d6b66e2633debbb64f5b16
|
[
"MIT"
] | null | null | null |
test/filters_iso_test.py
|
todofixthis/filters-iso
|
da6052b49a2f71a3b3d6b66e2633debbb64f5b16
|
[
"MIT"
] | null | null | null |
test/filters_iso_test.py
|
todofixthis/filters-iso
|
da6052b49a2f71a3b3d6b66e2633debbb64f5b16
|
[
"MIT"
] | null | null | null |
import filters as f
from filters.test import BaseFilterTestCase
# noinspection PyProtectedMember
from iso3166 import Country, countries_by_alpha3
from language_tags import tags
from language_tags.Tag import Tag
from moneyed import Currency, get_currency
| 31.914063 | 75 | 0.623745 |
cd4d5dd7883050a254679a4b1f93de18a8465561
| 1,179 |
py
|
Python
|
datacamp-master/22-introduction-to-time-series-analysis-in-python/04-moving-average-ma-and-arma-models/08-equivalance-of-ar(1)-and-ma(infinity).py
|
vitthal10/datacamp
|
522d2b192656f7f6563bf6fc33471b048f1cf029
|
[
"MIT"
] | 1 |
2020-06-11T01:32:36.000Z
|
2020-06-11T01:32:36.000Z
|
22-introduction-to-time-series-analysis-in-python/04-moving-average-ma-and-arma-models/08-equivalance-of-ar(1)-and-ma(infinity).py
|
AndreasFerox/DataCamp
|
41525d7252f574111f4929158da1498ee1e73a84
|
[
"MIT"
] | null | null | null |
22-introduction-to-time-series-analysis-in-python/04-moving-average-ma-and-arma-models/08-equivalance-of-ar(1)-and-ma(infinity).py
|
AndreasFerox/DataCamp
|
41525d7252f574111f4929158da1498ee1e73a84
|
[
"MIT"
] | 1 |
2021-08-08T05:09:52.000Z
|
2021-08-08T05:09:52.000Z
|
'''
Equivalence of AR(1) and MA(infinity)
To better understand the relationship between MA models and AR models, you will demonstrate that an AR(1) model is equivalent to an MA(
) model with the appropriate parameters.
You will simulate an MA model with parameters 0.8,0.82,0.83,
0.8
,
0.8
2
,
0.8
3
,
for a large number (30) lags and show that it has the same Autocorrelation Function as an AR(1) model with =0.8
=
0.8
.
INSTRUCTIONS
100XP
Import the modules for simulating data and plotting the ACF from statsmodels
Use a list comprehension to build a list with exponentially decaying MA parameters: 1,0.8,0.82,0.83,
1
,
0.8
,
0.8
2
,
0.8
3
,
Simulate 5000 observations of the MA(30) model
Plot the ACF of the simulated series
'''
# import the modules for simulating data and plotting the ACF
from statsmodels.tsa.arima_process import ArmaProcess
from statsmodels.graphics.tsaplots import plot_acf
# Build a list MA parameters
ma = [0.8**i for i in range(30)]
# Simulate the MA(30) model
ar = np.array([1])
AR_object = ArmaProcess(ar, ma)
simulated_data = AR_object.generate_sample(nsample=5000)
# Plot the ACF
plot_acf(simulated_data, lags=30)
plt.show()
| 19.983051 | 136 | 0.74894 |
cd4e4c3a86cc4a31b024c46ddddde1fa3e66e93b
| 3,752 |
py
|
Python
|
imutils.py
|
shimoda-uec/ssdd
|
564c3e08fae7a158516cdbd9f3599a74dc748aff
|
[
"MIT"
] | 33 |
2019-11-05T07:15:36.000Z
|
2021-04-27T06:33:47.000Z
|
imutils.py
|
shimoda-uec/ssdd
|
564c3e08fae7a158516cdbd9f3599a74dc748aff
|
[
"MIT"
] | 1 |
2019-11-18T13:02:40.000Z
|
2019-11-18T13:02:54.000Z
|
imutils.py
|
shimoda-uec/ssdd
|
564c3e08fae7a158516cdbd9f3599a74dc748aff
|
[
"MIT"
] | 3 |
2019-11-25T11:00:39.000Z
|
2021-03-27T06:53:21.000Z
|
import PIL.Image
import random
import numpy as np
import cv2
def HWC_to_CHW(img):
return np.transpose(img, (2, 0, 1))
| 32.068376 | 91 | 0.567697 |
cd503144da89b34c7f7e0c6f7d30f63249106454
| 398 |
py
|
Python
|
dfmt/svg/run.py
|
wangrl2016/coding
|
fd6cd342cade42379c4a0447d83e17c6596fd3a3
|
[
"MIT"
] | 4 |
2021-02-20T03:47:48.000Z
|
2021-11-09T17:25:43.000Z
|
dfmt/svg/run.py
|
wangrl2016/coding
|
fd6cd342cade42379c4a0447d83e17c6596fd3a3
|
[
"MIT"
] | null | null | null |
dfmt/svg/run.py
|
wangrl2016/coding
|
fd6cd342cade42379c4a0447d83e17c6596fd3a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import subprocess
if __name__ == '__main__':
out_dir = 'out'
if not os.path.exists(out_dir):
os.mkdir(out_dir)
subprocess.run(['cargo', 'build', '--release'])
exe = 'target/release/svg'
subprocess.run([exe, '-i', 'test/simple-text.svg', '-o', 'out/simple-text.png', '--perf',
'--dump-svg', 'out/simple-text.svg'])
| 26.533333 | 93 | 0.585427 |
cd517441104b9bb84c95422e46c5a618c55415fc
| 79 |
py
|
Python
|
project/enums/string_types_enum.py
|
vinibiavatti1/PythonFlaskCms
|
e43a4db84d1f77a5f66b1f8fcb9dc96e05e6c023
|
[
"MIT"
] | null | null | null |
project/enums/string_types_enum.py
|
vinibiavatti1/PythonFlaskCms
|
e43a4db84d1f77a5f66b1f8fcb9dc96e05e6c023
|
[
"MIT"
] | null | null | null |
project/enums/string_types_enum.py
|
vinibiavatti1/PythonFlaskCms
|
e43a4db84d1f77a5f66b1f8fcb9dc96e05e6c023
|
[
"MIT"
] | null | null | null |
"""
String format type value enumeration.
"""
TRUE = '1'
FALSE = '0'
NONE = ''
| 11.285714 | 37 | 0.594937 |
cd52a473787d5199c37a49a98543ea8b45caa074
| 90 |
py
|
Python
|
LTA/museums/admin.py
|
valeriimartsyshyn/lviv_tourist_adviser
|
e8ce0c7ba97262b2d181e3373eb806f4dcc9bbf1
|
[
"MIT"
] | null | null | null |
LTA/museums/admin.py
|
valeriimartsyshyn/lviv_tourist_adviser
|
e8ce0c7ba97262b2d181e3373eb806f4dcc9bbf1
|
[
"MIT"
] | 1 |
2021-09-27T06:33:26.000Z
|
2021-09-27T06:33:26.000Z
|
LTA/museums/admin.py
|
valeriimartsyshyn/lviv_tourist_adviser
|
e8ce0c7ba97262b2d181e3373eb806f4dcc9bbf1
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Museums
admin.site.register(Museums)
| 22.5 | 32 | 0.833333 |
cd537e30b909d9612963bfa8b8f1c4d920b60f98
| 1,584 |
py
|
Python
|
grailapp-osc-6a60f9376f69/setup.py
|
yuan7407/TD_OpenCV_PythonOSC
|
f4424b1f7155f7942397212b97183cb749612f50
|
[
"MIT"
] | 20 |
2018-12-06T21:35:10.000Z
|
2022-02-08T23:22:35.000Z
|
grailapp-osc-6a60f9376f69/setup.py
|
phoebezhung/TD_OpenCV_PythonOSC
|
f4424b1f7155f7942397212b97183cb749612f50
|
[
"MIT"
] | null | null | null |
grailapp-osc-6a60f9376f69/setup.py
|
phoebezhung/TD_OpenCV_PythonOSC
|
f4424b1f7155f7942397212b97183cb749612f50
|
[
"MIT"
] | 4 |
2019-02-27T08:13:45.000Z
|
2021-11-02T15:14:41.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
setup
~~~~~
Setup Script
Run the build process by running the command 'python setup.py build'
:copyright: (c) 2018 by Oleksii Lytvyn.
:license: MIT, see LICENSE for more details.
"""
import osc.osc as osc
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='osc',
version=osc.__version__,
author='Oleksii Lytvyn',
author_email='[email protected]',
description='OSC implementation in pure Python',
long_description=open('README.rst').read(),
url='https://bitbucket.org/grailapp/osc',
download_url='https://bitbucket.org/grailapp/osc/get/default.zip',
platforms='any',
packages=['osc'],
keywords=['osc', 'protocol', 'utilities', 'osc-1.0', 'network', 'communication', 'udp'],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: System :: Networking',
'License :: OSI Approved :: MIT License'
],
install_requires=[]
)
| 31.058824 | 92 | 0.621212 |
cd5534b9b393b4ca6ad72c44a3438fcc6e74b3d0
| 2,501 |
py
|
Python
|
socketshark/utils.py
|
Play2Live/socketshark
|
9b1e40654bf629c593079fb44c548911d4c864af
|
[
"MIT"
] | null | null | null |
socketshark/utils.py
|
Play2Live/socketshark
|
9b1e40654bf629c593079fb44c548911d4c864af
|
[
"MIT"
] | null | null | null |
socketshark/utils.py
|
Play2Live/socketshark
|
9b1e40654bf629c593079fb44c548911d4c864af
|
[
"MIT"
] | null | null | null |
import asyncio
import ssl
import aiohttp
from . import constants as c
def _get_rate_limit_wait(log, resp, opts):
"""
Returns the number of seconds we should wait given a 429 HTTP response and
HTTP options.
"""
max_wait = 3600
wait = opts['wait']
header_name = opts['rate_limit_reset_header_name']
if header_name and header_name in resp.headers:
header_value = resp.headers[header_name]
try:
new_wait = float(header_value)
# Make sure we have a valid value (not negative, NaN, or Inf)
if 0 <= new_wait <= max_wait:
wait = new_wait
elif new_wait > max_wait:
log.warn('rate reset value too high',
name=header_name, value=header_value)
wait = max_wait
else:
log.warn('invalid rate reset value',
name=header_name, value=header_value)
except ValueError:
log.warn('invalid rate reset value',
name=header_name, value=header_value)
return wait
| 35.225352 | 78 | 0.551779 |
cd592812165ebec71f40378868573e5f9eda72b9
| 252 |
py
|
Python
|
download_and_create_reference_datasets/v02/create_ht__clinvar.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | 15 |
2017-11-22T14:48:04.000Z
|
2020-10-05T18:22:24.000Z
|
download_and_create_reference_datasets/v02/create_ht__clinvar.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | 94 |
2020-10-21T17:37:57.000Z
|
2022-03-29T14:59:46.000Z
|
download_and_create_reference_datasets/v02/create_ht__clinvar.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | 7 |
2019-01-29T09:08:10.000Z
|
2020-02-25T16:22:57.000Z
|
#!/usr/bin/env python3
from kubernetes.shell_utils import simple_run as run
run((
"python3 gcloud_dataproc/v02/run_script.py "
"--cluster create-ht-clinvar "
"download_and_create_reference_datasets/v02/hail_scripts/write_clinvar_ht.py"))
| 28 | 83 | 0.77381 |
cd597b04327e251c7079f983fdc1e98e38cf4a8a
| 4,324 |
py
|
Python
|
cogs/member_.py
|
himo1101/NFlegel
|
7621f5d71b41b71faaf44d142f3b903b0471873a
|
[
"MIT"
] | null | null | null |
cogs/member_.py
|
himo1101/NFlegel
|
7621f5d71b41b71faaf44d142f3b903b0471873a
|
[
"MIT"
] | null | null | null |
cogs/member_.py
|
himo1101/NFlegel
|
7621f5d71b41b71faaf44d142f3b903b0471873a
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
from flegelapi.pg import default, server
from distutils.util import strtobool
import discord
member_table= """ member_(
id serial PRIMARY KEY,
server_id interger NOT NULL,
role_ld interger,
channel_id interger,
custom_mes character varying DEFAULT ,
on_off boolean DEFAULT False)"""
| 32.757576 | 102 | 0.56568 |
cd59d9b93bd906d8d50478926274bfcb5696cb98
| 4,388 |
py
|
Python
|
old/policy_grads2.py
|
DarkElement75/cartpole-policy-gradients
|
ca6b7fb826fa023e2d845408d3d16d8032b07508
|
[
"MIT"
] | null | null | null |
old/policy_grads2.py
|
DarkElement75/cartpole-policy-gradients
|
ca6b7fb826fa023e2d845408d3d16d8032b07508
|
[
"MIT"
] | null | null | null |
old/policy_grads2.py
|
DarkElement75/cartpole-policy-gradients
|
ca6b7fb826fa023e2d845408d3d16d8032b07508
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
import sys
import theano
import theano.tensor as T
import layers
from layers import FullyConnectedLayer, SoftmaxLayer
env = gym.make('CartPole-v0')
#Number of actions
action_n = env.action_space.n
#Number of features observed
feature_n = env.observation_space.shape[0]
epochs = 100
mini_batch_size = 10
timesteps = 100
learning_rate = 1.0
epsilon_decay_rate = -0.04
initial_epsilon = 1.0
#avg_solved_perc = 97.5
#avg_solved_threshold = (avg_solved_perc/100*timesteps)
render = False
#Initialize network
layers = [
FullyConnectedLayer(n_in=4, n_out=10),
FullyConnectedLayer(n_in=10, n_out=10),
SoftmaxLayer(n_in=10, n_out=2)
]
params = [param for layer in layers for param in layer.params]
iterations = mini_batch_size
x = T.vector("x")
y = T.ivector("y")
init_layer = layers[0]
init_layer.set_inpt(x, 1)
for j in xrange(1, len(layers)):
prev_layer, layer = layers[j-1], layers[j]
layer.set_inpt(
prev_layer.output, 1)
cost = T.argmax(T.log(layers[-1].output))
R = 0
#iter_grads = [theano.shared([np.zeros(shape=param.get_value().shape, dtype=theano.config.floatX) for param in params])]
#grads = [theano.shared([np.zeros(shape=param.get_value().shape, dtype=theano.config.floatX) for param in params])]
grads = T.grad(cost, params)
iter_grads = [T.zeros_like(grad) for grad in grads]
t_updates = []
iter_updates = []
mb_updates = []
#t_updates.append((iter_grads, iter_grads+T.grad(cost, params)))
#iter_updates.append((iter_grads, T.dot(T.dot(iter_grads, R), 1/mini_batch_size)))
#iter_updates.append((grads, grads+iter_grads))
#mb_updates.append((params, params+learning_rate*grads))
for param, grad in zip(params, grads):
mb_updates.append((param, param+learning_rate*grad))#Update our params as we were
#To execute our updates when necessary
exec_t_updates = theano.function([], None, updates=t_updates)
exec_iter_updates = theano.function([], None, updates=iter_updates)
#exec_mb_updates = theano.function([], None, updates=mb_updates)
"""
mb = T.iscalar()
train_mb = theano.function(
[], cost, updates=mb_updates)
"""
#To get our action a possibilities from state s
s = T.vector()
NN_output = theano.function(
[s], layers[-1].output,
givens={
x: s
})
for e in range(epochs):
#grads = T.set_subtensor(grads, T.zeros_like(grads))
grads = grads * 0
epsilon = exp_decay(initial_epsilon, epsilon_decay_rate, e)
for mb in range(mini_batch_size):
s = env.reset()
R = 0
#iter_grads = T.set_subtensor(iter_grads, T.zeros_like(iter_grads))
iter_grads = grads * 0
for t in range(timesteps):
if render:
env.render()
if epsilon_greedy(epsilon):
#Random action
action = env.action_space.sample()
tmp = T.scalar("tmp")
max_action = T.ones_like(tmp)
else:
#Policy Action
a = NN_output(s)
action = np.argmax(a, axis=1)[0]
max_action = T.max(a)
#exec_t_update()
iter_grads = iter_grads + T.grad(max_action, params)
s, r, done, info = env.step(action)
R += r
if done:
break
#exec_iter_update()
iter_grads = [iter_grad * R / mini_batch_size for iter_grad in iter_grads]
grads += iter_grads
print "Epoch: %i, Reward: %i, Epsilon: %f" % (e, R, epsilon)
#exec_mb_updates()
#cost_asdf = train_mb()
#print "Updating params..."
for param, grad in zip(params, grads):
param = param + learning_rate * grad
| 26.756098 | 120 | 0.635369 |
cd5a19f0cbafdf639c273ea9eebb620d7cbc509e
| 7,720 |
py
|
Python
|
client.py
|
andreidorin13/cs544-messaging-protocol
|
40d26cb20234a4ad58095150795946aceaf9e4d4
|
[
"MIT"
] | null | null | null |
client.py
|
andreidorin13/cs544-messaging-protocol
|
40d26cb20234a4ad58095150795946aceaf9e4d4
|
[
"MIT"
] | null | null | null |
client.py
|
andreidorin13/cs544-messaging-protocol
|
40d26cb20234a4ad58095150795946aceaf9e4d4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
Andrei Dorin
06/10/2018
User interface for WISP chat implementation
'''
import argparse
import logging
import signal
import sys
import time
import queue
import select
import getpass
from wisp_client import WispClient
from wisp_common import State, WispRequest, WispResponse, WispMessage, WISP_DEFAULT_PORT
def signal_sigint(_, __):
'''
Signal handler for KeyboardInterrupt or SIGINT
'''
print('SIGINT Received, shutting down')
sys.exit(0)
def main():
'''
Main entry point of client
Argument parsing and initializing client
'''
parser = argparse.ArgumentParser(description='WISP protocol chat client')
parser.add_argument('-H', '--host', type=str,
help='IP of server, if none is specified, service discovery will be attempted')
parser.add_argument('-p', '--port', type=int, default=32500,
help='Port of server to connect, if none is specified, protocol default 32500 will be used')
parser.add_argument('-v', '--verbosity', type=int, default=4, choices=[4, 3, 2, 1],
help='Verbosity of logger, 4: Error, 3: Warning, 2: Info, 1: Debug')
args = parser.parse_args()
logging.basicConfig()
logging.getLogger().setLevel(args.verbosity * 10)
signal.signal(signal.SIGINT, signal_sigint)
# CLIENT
client = Client()
if args.host:
client.connect(args.host, args.port)
else:
client.discover()
client.start()
if __name__ == '__main__':
main()
| 32.166667 | 116 | 0.584197 |
cd5a2073c9ceff87b49af728a52895c0f1961f0b
| 61 |
py
|
Python
|
newpy/loggers/__init__.py
|
janithPet/newpy
|
feb264f4e3da371c3f2ddc7633f3fdd5a25db661
|
[
"MIT"
] | null | null | null |
newpy/loggers/__init__.py
|
janithPet/newpy
|
feb264f4e3da371c3f2ddc7633f3fdd5a25db661
|
[
"MIT"
] | 4 |
2021-09-03T06:18:29.000Z
|
2021-09-03T08:36:25.000Z
|
newpy/loggers/__init__.py
|
janithPet/newpy
|
feb264f4e3da371c3f2ddc7633f3fdd5a25db661
|
[
"MIT"
] | null | null | null |
from newpy.loggers.colored_formatter import ColoredFormatter
| 30.5 | 60 | 0.901639 |
cd5d8710df3d01c40879c8b39d50c6ffb79da254
| 181 |
py
|
Python
|
putao/source/__init__.py
|
ongyx/putao
|
e901402308b9b4c3c9acf8dae15eb4781ddfcede
|
[
"MIT"
] | 7 |
2021-06-29T00:50:46.000Z
|
2021-10-14T23:31:12.000Z
|
putao/source/__init__.py
|
ongyx/putao
|
e901402308b9b4c3c9acf8dae15eb4781ddfcede
|
[
"MIT"
] | 2 |
2021-08-28T05:34:01.000Z
|
2021-08-29T05:03:34.000Z
|
putao/source/__init__.py
|
ongyx/putao
|
e901402308b9b4c3c9acf8dae15eb4781ddfcede
|
[
"MIT"
] | null | null | null |
# coding: utf8
"""Sources provide an abstraction between a source of music notes and putao projects."""
from . import mml # noqa
from .reg import formats, loads, register # noqa
| 30.166667 | 88 | 0.734807 |
cd5dd7dda160122dc7f0149e4f5abf4d8e95ebe4
| 206 |
py
|
Python
|
parser/src/test/test-data/math_csc.py
|
luciansmith/sedml-script
|
d891645d0b3f89ff190fc7e719659c8e229c07da
|
[
"MIT"
] | null | null | null |
parser/src/test/test-data/math_csc.py
|
luciansmith/sedml-script
|
d891645d0b3f89ff190fc7e719659c8e229c07da
|
[
"MIT"
] | null | null | null |
parser/src/test/test-data/math_csc.py
|
luciansmith/sedml-script
|
d891645d0b3f89ff190fc7e719659c8e229c07da
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created by libsedmlscript v0.0.1
"""
from sed_roadrunner import model, task, plot
from mpmath import csc
#----------------------------------------------
csc(0.5)
| 15.846154 | 48 | 0.475728 |
cd5e1e26e39c56d3ae62b8fd2032ab324293acc8
| 526 |
py
|
Python
|
lib/redis_set_get.py
|
InformaticsResearchCenter/ITeung
|
2e3f76294c3affca07934293cdeb46d6d618180a
|
[
"MIT"
] | null | null | null |
lib/redis_set_get.py
|
InformaticsResearchCenter/ITeung
|
2e3f76294c3affca07934293cdeb46d6d618180a
|
[
"MIT"
] | 37 |
2020-03-22T23:21:14.000Z
|
2020-09-16T15:07:06.000Z
|
lib/redis_set_get.py
|
InformaticsResearchCenter/ITeung
|
2e3f76294c3affca07934293cdeb46d6d618180a
|
[
"MIT"
] | 1 |
2020-09-08T11:31:30.000Z
|
2020-09-08T11:31:30.000Z
|
import redis
| 21.04 | 48 | 0.579848 |
cd5e82adedde50cba3e364b3ccb25d0a6e80401a
| 18,185 |
py
|
Python
|
FTDISPI.py
|
g-i-wilson/spi-tools
|
1c961a97572a366235f9f3b0517d8201fa8be371
|
[
"MIT"
] | 1 |
2022-03-22T20:44:01.000Z
|
2022-03-22T20:44:01.000Z
|
FTDISPI.py
|
g-i-wilson/spi-tools
|
1c961a97572a366235f9f3b0517d8201fa8be371
|
[
"MIT"
] | null | null | null |
FTDISPI.py
|
g-i-wilson/spi-tools
|
1c961a97572a366235f9f3b0517d8201fa8be371
|
[
"MIT"
] | null | null | null |
from pyftdi.spi import SpiController
from pyftdi.gpio import GpioSyncController
import serial
import time
import sys
import JSONFile
dbg = False
def ui_hex(str):
return int(str,16)
def uiLoopHelp():
print()
print("Command set:")
print()
print("write <REG_NAME> XXXX1010 1XXXXXX0 | Write bits (any char not 0 or 1 is a don't-care)")
print("writeRaw 0xXX 0xXX 0xXX | Write a raw sequence of bytes")
print("read <REG_NAME> | Read register")
print("all | Read all registers")
print("save <fileName> | Save registers to JSON file")
print("load <fileName> | Load and write registers from JSON file")
print("loadCSV <fileName> | Write bytes from CSV file (each line is one transaction)")
print("loadDefault | Load datasheet default JSON configuration")
print("help | Print this command set")
print("exit | Exit the program")
def uiLoop(spiObject, printHelp=True):
if printHelp:
uiLoopHelp()
jsonObject = None
ui = [""]
while (ui[0] != "exit"):
print("\n> ", end='')
ui = sys.stdin.readline().rstrip().split(' ')
if (ui[0] == "read"):
spiObject.readStruct({ ui[1] : {} }, display=True)
if (ui[0] == "write"):
dataRegs = []
for i in range(2,len(ui)):
dataRegs.append( ui[i] )
spiObject.writeBits( ui[1], dataRegs )
if (ui[0] == "all"):
spiObject.readState()
if (ui[0] == "compare"):
spiObject.compare()
if (ui[0] == "trigger"):
while(1):
spiObject.trigger(pre_display=chr(27)+"[2J")
time.sleep(1)
if (ui[0] == "save"):
if jsonObject is None:
if len(ui) > 1:
jsonObject = JSONFile.new(ui[1])
else:
jsonObject = JSONFile.new(input("\nSave as: "))
jsonObject.write( spiObject.readState() )
if (ui[0] == "load"):
if jsonObject is None:
jsonObject = JSONFile.load(ui[1])
spiObject.writeStruct(jsonObject.read())
spiObject.readState()
if (ui[0] == "loadCSV"):
spiObject.writeCSV(ui[1])
print("Comparing changes...")
spiObject.compare()
if (ui[0] == "writeRaw"):
print("Writing raw bytes...")
byteList = []
for i in range(1,len(ui)):
byteList.append( int(ui[i],16) )
print(byteList)
spiObject.writeRaw( byteList )
if (ui[0] == "loadDefault"):
spiObject.writeDefault()
if (ui[0] == "help"):
uiLoopHelp()
| 36.081349 | 144 | 0.534451 |
cd63c34fbdfbd183f707a4b54997655b51643809
| 3,417 |
py
|
Python
|
src/onegov/gazette/views/groups.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gazette/views/groups.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gazette/views/groups.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from morepath import redirect
from onegov.core.security import Private
from onegov.gazette import _
from onegov.gazette import GazetteApp
from onegov.gazette.forms import EmptyForm
from onegov.gazette.layout import Layout
from onegov.user import UserGroup
from onegov.user import UserGroupCollection
from onegov.user.forms import UserGroupForm
| 23.244898 | 66 | 0.605502 |
cd6412162ab8b14b43aaaa41358897ddfe50fe48
| 136 |
py
|
Python
|
dlpt/__init__.py
|
damogranlabs/dlpt
|
e4cdbaf4b5496ed985eb255b17294aa7cf3d35e4
|
[
"MIT"
] | 5 |
2021-08-09T19:39:06.000Z
|
2022-03-22T11:21:29.000Z
|
dlpt/__init__.py
|
damogranlabs/dlpt
|
e4cdbaf4b5496ed985eb255b17294aa7cf3d35e4
|
[
"MIT"
] | null | null | null |
dlpt/__init__.py
|
damogranlabs/dlpt
|
e4cdbaf4b5496ed985eb255b17294aa7cf3d35e4
|
[
"MIT"
] | null | null | null |
from . import utils
from . import pth
from . import proc
from . import log
from . import json
from . import time
from . import importer
| 17 | 22 | 0.742647 |
cd64ffc5e28a3c1d060e7cdf2e73c1f3c1f202dd
| 1,466 |
py
|
Python
|
personal_utilities/fourier_filters.py
|
dbstein/personal_utilities
|
3a4c7d2416b13a87f88fc0e400b299d648e1e541
|
[
"Apache-2.0"
] | null | null | null |
personal_utilities/fourier_filters.py
|
dbstein/personal_utilities
|
3a4c7d2416b13a87f88fc0e400b299d648e1e541
|
[
"Apache-2.0"
] | null | null | null |
personal_utilities/fourier_filters.py
|
dbstein/personal_utilities
|
3a4c7d2416b13a87f88fc0e400b299d648e1e541
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
| 34.093023 | 78 | 0.563438 |
cd67c54b1e46edcb715070c6ab83abb9ea55fa6d
| 1,178 |
py
|
Python
|
sloth/simple.py
|
codacy-badger/sloth
|
a4f2118b2f19e55271613d43c785aaf4ab030b5e
|
[
"MIT"
] | 1 |
2021-02-11T12:14:23.000Z
|
2021-02-11T12:14:23.000Z
|
src/sloth/simple.py
|
Legorooj/sloth
|
47f6358349f8545fc475efab19edd6efda3ffbcd
|
[
"MIT"
] | null | null | null |
src/sloth/simple.py
|
Legorooj/sloth
|
47f6358349f8545fc475efab19edd6efda3ffbcd
|
[
"MIT"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2020 Legorooj <[email protected]>
# Copyright (c) 2020 FluffyKoalas <github.com/fluffykoalas>
# This file and all others in this project are licensed under the MIT license.
# Please see the LICENSE file in the root of this repository for more details.
# ----------------------------------------------------------------------------
from .timers import Timer
from .raw import tests, runners
__all__ = [
'call_after', 'time_callable', 'time_eval', 'time_exec'
]
| 32.722222 | 78 | 0.616299 |
cd682359aededb5fca5a5b75e857cce2e964a4f3
| 1,385 |
py
|
Python
|
Final/P2Pchat.py
|
cainanBlack/csc321
|
9cebf9c3b61befda932732316b7406f1462c0bee
|
[
"MIT"
] | null | null | null |
Final/P2Pchat.py
|
cainanBlack/csc321
|
9cebf9c3b61befda932732316b7406f1462c0bee
|
[
"MIT"
] | null | null | null |
Final/P2Pchat.py
|
cainanBlack/csc321
|
9cebf9c3b61befda932732316b7406f1462c0bee
|
[
"MIT"
] | null | null | null |
import netifaces
import argparse
import os
import zmq
import threading
if __name__ == '__main__':
main()
| 26.634615 | 102 | 0.626715 |
cd68c50658ac006c458874597809bf7939658dff
| 83 |
py
|
Python
|
examples/object/reference/change_list.py
|
zqmillet/kinopico_python_book
|
0db4b0a904a1ba1b7e90cf971871e134941aeb65
|
[
"MIT"
] | null | null | null |
examples/object/reference/change_list.py
|
zqmillet/kinopico_python_book
|
0db4b0a904a1ba1b7e90cf971871e134941aeb65
|
[
"MIT"
] | null | null | null |
examples/object/reference/change_list.py
|
zqmillet/kinopico_python_book
|
0db4b0a904a1ba1b7e90cf971871e134941aeb65
|
[
"MIT"
] | null | null | null |
a = [1, 2, 3]
b = a
a[0] = 'gouliguojiashengsiyi'
print('a =', a)
print('b =', b)
| 11.857143 | 29 | 0.506024 |
cd6940cd949b8d012c79a302492e17dd59770ba1
| 2,267 |
py
|
Python
|
source/CTRW.py
|
tangxiangong/ClassTop
|
fdafdafd165672ae464210fb8c66c70256d50956
|
[
"MIT"
] | null | null | null |
source/CTRW.py
|
tangxiangong/ClassTop
|
fdafdafd165672ae464210fb8c66c70256d50956
|
[
"MIT"
] | null | null | null |
source/CTRW.py
|
tangxiangong/ClassTop
|
fdafdafd165672ae464210fb8c66c70256d50956
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Time : 2021/12/1 13:27
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
from trajectory import Trajectory
from rnd import stable_rnd, skewed_stable_rnd
if __name__ == "__main__":
m1 = CTRW(100, 1, 2)
t1, x1 = m1.get()
fig1 = plt.figure(1)
plt.step(t1, x1, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig1.savefig("../figures/ctrw1.png")
m2 = CTRW(100, 0.7, 2)
t2, x2 = m2.get()
fig2 = plt.figure(2)
plt.step(t2, x2, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig2.savefig("../figures/ctrw2.png")
m3 = CTRW(100, 1, 1.5)
t3, x3 = m3.get()
fig3 = plt.figure(3)
plt.step(t3, x3, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig3.savefig("../figures/ctrw3.png")
m4 = CTRW(100, 0.7, 1.5)
t4, x4 = m4.get()
fig4 = plt.figure(4)
plt.step(t4, x4, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig4.savefig("../figures/ctrw4.png")
| 27.313253 | 70 | 0.549625 |
cd6b149fb6473adbe7fd7149968a3e8e9f36d8bd
| 901 |
py
|
Python
|
src/conversion_spec_file_reader.py
|
Eldar1205/exchanger-python-demo-app
|
6733ff6044555f8c4639dc6e25baf4ef51401fd9
|
[
"MIT"
] | 7 |
2021-08-31T09:03:39.000Z
|
2021-09-27T13:45:58.000Z
|
src/conversion_spec_file_reader.py
|
Eldar1205/exchanger-python-demo-app
|
6733ff6044555f8c4639dc6e25baf4ef51401fd9
|
[
"MIT"
] | null | null | null |
src/conversion_spec_file_reader.py
|
Eldar1205/exchanger-python-demo-app
|
6733ff6044555f8c4639dc6e25baf4ef51401fd9
|
[
"MIT"
] | null | null | null |
import aiofiles
import asyncstdlib
from pydantic.types import PositiveFloat
from conversion_parameters import ConversionParameters
from conversion_spec import ConversionSpec, Currency
from conversion_spec_provider import ConversionSpecProvider
| 37.541667 | 109 | 0.783574 |
cd6b1d33d27551aa6e7a920f48a0b7633b6280b3
| 3,931 |
py
|
Python
|
Paris_G_1-2-3_v2.py
|
Gaspe-R/Rendez-vous-prefecture-Paris
|
e24d1bf0ae6ca5860ad858957c5e923c0ac3d85a
|
[
"MIT"
] | null | null | null |
Paris_G_1-2-3_v2.py
|
Gaspe-R/Rendez-vous-prefecture-Paris
|
e24d1bf0ae6ca5860ad858957c5e923c0ac3d85a
|
[
"MIT"
] | null | null | null |
Paris_G_1-2-3_v2.py
|
Gaspe-R/Rendez-vous-prefecture-Paris
|
e24d1bf0ae6ca5860ad858957c5e923c0ac3d85a
|
[
"MIT"
] | null | null | null |
from sqlite3 import Date
from twilio.rest import Client
from datetime import datetime
from playsound import playsound
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import csv
import time
################################ "PREFCTURE DE PARIS" #####################################
######################## "Remise d'un titre de sjour tranger" ###########################
while True:
# New instance for Chrome
browser = webdriver.Chrome(ChromeDriverManager().install())
# Open the webpage
try:
browser.get('https://pprdv.interieur.gouv.fr/booking/create/989')
time.sleep(3)
# Save the window opener (current window, do not mistaken with tab... not the same)
main_window = browser.current_window_handle
# Accepter les cookies :
browser.find_element_by_xpath("//a[@onclick='javascript:accepter()']").click()
time.sleep(2)
# Click in checkbox "Veuillez cocher la case pour..." :
browser.find_element_by_xpath("//input[@name='condition']").click()
time.sleep(3)
# Click in the submit button :
browser.find_element_by_xpath("//input[@name='nextButton']").click()
time.sleep(3)
# Click in the radio button "Guichets 1-2 &3" :
browser.find_element_by_xpath("//input[@id='planning990']").click()
time.sleep(3)
# Click in the submit button 1 :
browser.find_element_by_xpath("//input[@type='submit']").click()
time.sleep(4)
##################################################
# Variables :
textNo = "Il n'existe plus de plage horaire libre pour votre demande de rendez-vous"
textOui = "Choix d'une plage horaire"
son = "./alert.wav" # ajouter le chemin de votre fichier audio pour l'alerte
url = browser.current_url
now = datetime.now()
Heure = now.strftime("%H:%M:%S")
Date = datetime.now().strftime("%d/%m/%Y")
#account Twilio :
account_sid = 'SID' # ajouter le SID Twilio
auth_token = 'token' # ajouter le Token Twilio
client = Client(account_sid, auth_token)
#log CSV:
header = ['Date', 'Heure', 'Prfecture', 'Disponibilit']
DataNo = [Date, Heure,'Paris G 1-2 et 3', 'Pas de Rendez-vous']
DataOui = [Date, Heure, 'Paris G 1-2 et 3', 'Rendez-vous Disponible']
##################################################
#Conditions :
if (textOui in browser.page_source):
browser.find_element_by_xpath("//input[@type='submit']").click()
print("")
print("RDV Disponible")
print("")
with open('./log.csv', 'a', newline='') as f: #ajouter le chemin de votre fichier log
writer = csv.writer(f)
writer.writerow(DataOui)
"""
# Send SMS Alert :
message = client.messages.create(
from_='votre numero twilio',
body = 'Rendez-vous prefecture disponible, https://pprdv.interieur.gouv.fr/booking/create/989',
to ='votre numero perso'
)
print(message.sid)
"""
#alert sound :
playsound(son)
time.sleep(900)
break
elif (textNo in browser.page_source):
playsound(son)
print("")
print("Pas de RDV")
print("")
with open('./log.csv', 'a', newline='') as f: #ajouter le chemin de votre fichier log
writer = csv.writer(f)
writer.writerow(DataNo)
time.sleep(30)
browser.quit()
except:
browser.quit()
time.sleep(60)
| 36.738318 | 126 | 0.522768 |
cd6d9b2b982fd93ff60ad7ad2c61547c26a40708
| 3,022 |
py
|
Python
|
movo_common/si_utils/src/si_utils/my_tf_listener.py
|
ALAN-NUS/kinova_movo
|
05a0451f5c563359ae0ffe3280e1df85caec9e55
|
[
"BSD-3-Clause"
] | 1 |
2021-03-26T06:33:28.000Z
|
2021-03-26T06:33:28.000Z
|
movo_common/si_utils/src/si_utils/my_tf_listener.py
|
ALAN-NUS/kinova_movo
|
05a0451f5c563359ae0ffe3280e1df85caec9e55
|
[
"BSD-3-Clause"
] | null | null | null |
movo_common/si_utils/src/si_utils/my_tf_listener.py
|
ALAN-NUS/kinova_movo
|
05a0451f5c563359ae0ffe3280e1df85caec9e55
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import rospy
import math
import tf
import geometry_msgs.msg
from geometry_msgs.msg import PoseStamped
from si_utils.lx_transformerROS import my_transformer
if __name__ == '__main__':
rospy.init_node('my_tf_listener')
listener = tf.TransformListener()
# my_trans = my_transformer()
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
try:
# look1 = listener.lookupTransform('/left_ee_link', '/link1', rospy.Time(0))
# look2 = listener.lookupTransform('/base_link', '/left_ee_link', rospy.Time(0))
# look3 = listener.lookupTransform('/base_link', '/link1', rospy.Time(0))
# rospy.loginfo(look3)
# rospy.loginfo(look2)
pose = PoseStamped()
pose.header.frame_id = '/link1'
pose2 = listener.transformPose('/base_link', pose)
rospy.loginfo(pose2)
# (trans,rot) = listener.lookupTransform('/base_link', '/ar_marker_1', rospy.Time(0))
# (trans,rot) = listener.lookupTransform('/base_link', '/left_ee_link', rospy.Time(0))
# (trans1,rot1) = listener.lookupTransform('/movo_camera_color_optical_frame', '/ar_marker_17', rospy.Time(0))
# (trans,rot) = listener.lookupTransform('/base_link', '/movo_camera_color_optical_frame', rospy.Time(0))
# (trans,rot) = listener.lookupTransform('/movo_camera_color_optical_frame', '/base_link', rospy.Time(0))
# (trans,rot) = listener.lookupTransform('/base_link', '/ar_marker_1', rospy.Time(0))
# pose = PoseStamped()
# pose.header.frame_id = 'ar_marker_1'
# rospy.loginfo("========== First trans ===========")
# pose1 = listener.transformPose('/movo_camera_color_optical_frame', pose)
# rospy.loginfo(pose1)
# rospy.loginfo("========== Second trans ===========")
# rospy.loginfo(listener.transformPose('/base_link', pose1))
# print(trans)
# print(rot)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
print('test')
# rate.sleep()
'''
pose = PoseStamped()
pose.header.frame_id = '/ar_marker_17'
rospy.loginfo("========== First trans ===========")
listener.waitForTransform("/ar_marker_17", "/movo_camera_color_optical_frame", rospy.Time(), rospy.Duration(4.0))
pose1 = listener.transformPose('/movo_camera_color_optical_frame', pose)
rospy.loginfo(pose1)
rospy.loginfo("========== Second trans ===========")
rospy.loginfo(listener.transformPose('/base_link', pose1))
pose_nutStart_nut = PoseStamped()
pose_nutStart_nut.header.frame_id = '/nutStart'
pose_nutStart_ar = my_trans.tf.transformPose('/ar_marker_17', pose_nutStart_nut)
rospy.loginfo(pose_nutStart_ar)
pose_nutStart_ca = listener.transformPose('/movo_camera_color_optical_frame', pose_nutStart_ar)
rospy.loginfo(pose_nutStart_ca)
'''
| 35.139535 | 122 | 0.634348 |
cd6e8efff351684ee42b6f8c78aec9644cacd755
| 8,661 |
py
|
Python
|
acme_tiny.py
|
dennydai/docker-letsencrypt
|
898fa70665d321e527c7fcc463a57a66dbbdab26
|
[
"MIT"
] | 22 |
2015-12-06T06:19:43.000Z
|
2016-03-10T06:44:34.000Z
|
acme_tiny.py
|
dennydai/docker-letsencrypt
|
898fa70665d321e527c7fcc463a57a66dbbdab26
|
[
"MIT"
] | 1 |
2016-09-11T07:38:45.000Z
|
2016-09-11T10:50:26.000Z
|
acme_tiny.py
|
dennydai/docker-letsencrypt
|
898fa70665d321e527c7fcc463a57a66dbbdab26
|
[
"MIT"
] | 4 |
2015-12-22T01:25:16.000Z
|
2016-01-14T13:24:27.000Z
|
#!/usr/bin/env python
import argparse, subprocess, json, os, os.path, urllib2, sys, base64, binascii, time, \
hashlib, re, copy, textwrap
#CA = "https://acme-staging.api.letsencrypt.org"
CA = "https://acme-v01.api.letsencrypt.org"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
This script automates the process of getting a signed TLS certificate from
Let's Encrypt using the ACME protocol. It will need to be run on your server
and have access to your private account key, so PLEASE READ THROUGH IT! It's
only ~200 lines, so it won't take long.
===Example Usage===
python acme_tiny.py --account-key ./account.key --csr ./domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > signed.crt
===================
===Example Crontab Renewal (once per month)===
0 0 1 * * python /path/to/acme_tiny.py --account-key /path/to/account.key --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > /path/to/signed.crt 2>> /var/log/acme_tiny.log
==============================================
""")
)
parser.add_argument("--account-key", required=True, help="path to your Let's Encrypt account private key")
parser.add_argument("--csr", required=True, help="path to your certificate signing request")
parser.add_argument("--acme-dir", required=True, help="path to the .well-known/acme-challenge/ directory")
args = parser.parse_args()
signed_crt = get_crt(args.account_key, args.csr, args.acme_dir)
sys.stdout.write(signed_crt)
| 43.522613 | 221 | 0.592772 |
cd6f36cb0dc0dd674280cb84b43ef766b0e9c395
| 14,691 |
py
|
Python
|
Draft/08_compare_original_GBS_Exome_biallelic_variation_alle_count.py
|
peipeiwang6/Genomic_prediction_in_Switchgrass
|
1fba3508c0d81d16e0629e3cf94ff4d174a85b13
|
[
"MIT"
] | null | null | null |
Draft/08_compare_original_GBS_Exome_biallelic_variation_alle_count.py
|
peipeiwang6/Genomic_prediction_in_Switchgrass
|
1fba3508c0d81d16e0629e3cf94ff4d174a85b13
|
[
"MIT"
] | null | null | null |
Draft/08_compare_original_GBS_Exome_biallelic_variation_alle_count.py
|
peipeiwang6/Genomic_prediction_in_Switchgrass
|
1fba3508c0d81d16e0629e3cf94ff4d174a85b13
|
[
"MIT"
] | null | null | null |
'''
imput1: exome capture, biallelic indel matrix
input2: exome capture, biallelic SNP matrix
input3: GBS, biallelic indel matrix
input4: GBS, biallelic SNP matrix
input5: allele count file for exome homozygous or heterozygous genotype
input6: allele count file for GBS homozygous or heterozygous genotype
input7: tetraploid or octaploid
'''
import sys,os
import numpy as np
exome_indel = open(sys.argv[1],'r').readlines()
exome_snp = open(sys.argv[2],'r').readlines()
gbs_indel = open(sys.argv[3],'r').readlines()
gbs_snp = open(sys.argv[4],'r').readlines()
EP = {} #EP[pos] = 1
for inl in exome_indel[1:]:
tem = inl.split('\t')
EP[tem[0] + '_' + tem[1]] = 1
for inl in exome_snp[1:]:
tem = inl.split('\t')
EP[tem[0] + '_' + tem[1]] = 1
S = {} #shared position, S[pos] = 1
for inl in gbs_indel[1:]:
tem = inl.split('\t')
if tem[0] + '_' + tem[1] in EP:
S[tem[0] + '_' + tem[1]] = 1
for inl in gbs_snp[1:]:
tem = inl.split('\t')
if tem[0] + '_' + tem[1] in EP:
S[tem[0] + '_' + tem[1]] = 1
E = {} # E[pos][ind] = A/T
G = {} # G[pos][ind] = A/T
EN = {} # EN[i] = ind
GN = {} # GN[i] = ind
IND = {} # IND[ind] = 1
tem = exome_indel[0].strip().split('\t')
for i in range(4,len(tem)):
EN[i] = tem[i]
IND[tem[i]] = 1
tem = gbs_indel[0].strip().split('\t')
for i in range(4,len(tem)):
GN[i] = tem[i]
for inl in exome_indel[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
E[pos] = {}
E[pos]['ref'] = tem[2]
E[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
E[pos][EN[i]] = tem[i]
for inl in exome_snp[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
E[pos] = {}
E[pos]['ref'] = tem[2]
E[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
E[pos][EN[i]] = tem[i]
for inl in gbs_indel[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
G[pos] = {}
G[pos]['ref'] = tem[2]
G[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
G[pos][GN[i]] = tem[i]
for inl in gbs_snp[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
G[pos] = {}
G[pos]['ref'] = tem[2]
G[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
G[pos][GN[i]] = tem[i]
out = open('Biallelic_variation_%s_Exome_VS_GBS.txt'%sys.argv[7],'w')
Ind = sorted(IND.keys())
title = 'Chr\tPos\tRef\tAlt'
for ind in Ind:
title = title + '\t' + ind
out.write(title + '\n')
for pos in S:
res = pos.split('_')[0] + '\t' + pos.split('_')[1]
if E[pos]['ref'] == G[pos]['ref']:
res = res + '\t' + E[pos]['ref']
else:
res = res + '\t' + E[pos]['ref'] + '|' + G[pos]['ref']
if E[pos]['alt'] == G[pos]['alt']:
res = res + '\t' + E[pos]['alt']
else:
res = res + '\t' + E[pos]['alt'] + '|' + G[pos]['alt']
for ind in Ind:
if E[pos][ind] == G[pos][ind] or (E[pos][ind].split('/')[0] == G[pos][ind].split('/')[1] and E[pos][ind].split('/')[1] == G[pos][ind].split('/')[0]):
res = res + '\t' + E[pos][ind]
else:
res = res + '\t' + E[pos][ind] + '|' + G[pos][ind]
out.write(res + '\n')
out.close()
ori_exome_indel = open(sys.argv[1],'r').readlines()
ori_exome_snp = open(sys.argv[2],'r').readlines()
ori_gbs_indel = open(sys.argv[3],'r').readlines()
ori_gbs_snp = open(sys.argv[4],'r').readlines()
ori_out = open('Shared_Biallelic_variation_%s_original_Exome_VS_GBS.txt'%sys.argv[7],'w')
out = open('Distribution_of_discrepancy_Biallelic_variation_%s_between_exome_and_GBS.txt'%sys.argv[7],'w')
ori_out.write(title + '\n')
O_exome = {}
O_gbs = {}
EN = {} # EN[i] = ind
GN = {} # GN[i] = ind
tem = ori_exome_indel[0].strip().split('\t')
for i in range(4,len(tem)):
EN[i] = tem[i]
IND[tem[i]] = 1
tem = ori_gbs_indel[0].strip().split('\t')
for i in range(4,len(tem)):
GN[i] = tem[i]
for inl in ori_exome_indel[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
O_exome[pos] = {}
O_exome[pos]['ref'] = tem[2]
O_exome[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
O_exome[pos][EN[i]] = tem[i]
for inl in ori_exome_snp[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
O_exome[pos] = {}
O_exome[pos]['ref'] = tem[2]
O_exome[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
O_exome[pos][EN[i]] = tem[i]
for inl in ori_gbs_indel[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
O_gbs[pos] = {}
O_gbs[pos]['ref'] = tem[2]
O_gbs[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
O_gbs[pos][GN[i]] = tem[i]
for inl in ori_gbs_snp[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
O_gbs[pos] = {}
O_gbs[pos]['ref'] = tem[2]
O_gbs[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
O_gbs[pos][GN[i]] = tem[i]
if sys.argv[7] == 'octaploid':
N1 = 0 ### Exome has variation, GBS is ./.
N2 = 0 ### have same variation
N3 = 0 ### Exome has heteo(AATT), GBS has homo
N3_02 = 0 ### Exome has heteo(ATTT or AAAT), GBS has homo
N3_03 = 0 ### Exome has heteo(ATTT or AAAT), GBS has hetero(AATT)
N4 = 0 ### Exome is ./., GBS has variation
N5 = 0 ### Exome has homo, GBS has heteo(AATT)
N5_02 = 0 ### Exome has homo, GBS has heteo(ATTT or AAAT)
N5_03 = 0 ### Exome has hetero(AATT), GBS has heteo(ATTT or AAAT)
N5_04 = 0 ### Exome has hetero(ATTT), GBS has heteo(TTTA)
N6 = 0 ### both are ./.
N7 = 0 ### both homo but different variation
out.write('Chr\tpos\tID\tExome_SNP\tGBS_SNP\tType\n')
for pos in S:
res = pos.split('_')[0] + '\t' + pos.split('_')[1]
if O_exome[pos]['ref'] == O_gbs[pos]['ref']:
res = res + '\t' + O_exome[pos]['ref']
else:
res = res + '\t' + O_exome[pos]['ref'] + '|' + O_gbs[pos]['ref']
print(pos)
if O_exome[pos]['alt'] == O_gbs[pos]['alt']:
res = res + '\t' + O_exome[pos]['alt']
else:
res = res + '\t' + O_exome[pos]['alt'] + '|' + O_gbs[pos]['alt']
print(pos)
for ind in Ind:
if O_exome[pos][ind] == O_gbs[pos][ind] or sorted(O_exome[pos][ind].split('/')) == sorted(O_gbs[pos][ind].split('/')):
res = res + '\t' + O_exome[pos][ind]
else:
res = res + '\t' + O_exome[pos][ind] + '|' + O_gbs[pos][ind]
### have same SNPs, AATT == TTAA, ATTT == TTTA
if (O_exome[pos][ind] == O_gbs[pos][ind] or sorted(O_exome[pos][ind].split('/')) == sorted(O_gbs[pos][ind].split('/'))) and O_exome[pos][ind]!= './././.':
N2 += 1
### both are ./.
elif O_exome[pos][ind] == O_gbs[pos][ind] and O_exome[pos][ind]== './././.':
N6 += 1
### Exome has SNPs, GBS is ./.
elif O_exome[pos][ind] != './././.' and O_gbs[pos][ind] == './././.':
N1 += 1
### Exome is ./., GBS has SNPs
elif O_exome[pos][ind] == './././.' and O_gbs[pos][ind] != './././.':
N4 += 1
### Exome has homo, GBS has hetero(AATT)
elif len(np.unique(O_exome[pos][ind].split('/'))) == 1 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_exome[pos][ind]!= './.' and O_gbs[pos][ind].split('/')[1] != O_gbs[pos][ind].split('/')[2]:
N5 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_homo_GBS_hetero_AATT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has homo, GBS has hetero(ATTT or AAAT)
elif len(np.unique(O_exome[pos][ind].split('/'))) == 1 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_exome[pos][ind]!= './.' and O_gbs[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[2]:
N5_02 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_homo_GBS_hetero_ATTT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has AATT, GBS has hetero(ATTT or AAAT)
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_gbs[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[2] and O_exome[pos][ind].split('/')[1] != O_exome[pos][ind].split('/')[2]:
N5_03 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_AATT_GBS_hetero_ATTT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has ATTT, GBS has heteroTTTA
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_gbs[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[2] and O_exome[pos][ind].split('/')[1] == O_exome[pos][ind].split('/')[2] and sorted(O_exome[pos][ind].split('/')) != sorted(O_gbs[pos][ind].split('/')):
N5_04 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_ATTT_GBS_hetero_AAAT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has hetero(AATT), GBS has homo
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 1 and O_exome[pos][ind].split('/')[1] != O_exome[pos][ind].split('/')[2] and O_gbs[pos][ind] != './.':
N3 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_AATT_GBS_homo\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has hetero(ATTT or AAAT), GBS has homo
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 1 and O_exome[pos][ind].split('/')[1] == O_exome[pos][ind].split('/')[2] and O_gbs[pos][ind] != './.':
N3_02 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_ATTT_GBS_homo\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has hetero(ATTT or AAAT), GBS has hetero(AATT)
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_exome[pos][ind].split('/')[1] == O_exome[pos][ind].split('/')[2] and O_gbs[pos][ind].split('/')[1] != O_gbs[pos][ind].split('/')[2] :
N3_03 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_ATTT_GBS_hetero_AATT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### both homo, but diff
elif len(np.unique(O_exome[pos][ind].split('/'))) == 1 and len(np.unique(O_gbs[pos][ind].split('/'))) == 1 and O_exome[pos][ind]!=O_gbs[pos][ind] and O_exome[pos][ind] != './././.' and O_gbs[pos][ind]!= './././.':
N7 += 1
print([O_exome[pos][ind],O_gbs[pos][ind]])
out.write('%s\t%s\t%s\t%s\t%s\tBoth_homo_differ\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
ori_out.write(res + '\n')
ori_out.close()
out.close()
print([N1,N2,N3,N3_02,N3_03,N4,N5,N5_02,N5_03,N5_04,N6,N7])
if sys.argv[7] == 'tetraploid':
N1 = 0 ### Exome has SNPs, GBS is ./.
N2 = 0 ### have same SNPs
N3 = 0 ### Exome has heteo, GBS has homo
N4 = 0 ### Exome is ./., GBS has SNPs
N5 = 0 ### Exome has homo, GBS has heteo
N6 = 0 ### both are ./.
N7 = 0 ### both homo but different SNPs
out.write('Chr\tpos\tID\tExome_SNP\tGBS_SNP\tType\n')
for pos in S:
res = pos.split('_')[0] + '\t' + pos.split('_')[1]
if O_exome[pos]['ref'] == O_gbs[pos]['ref']:
res = res + '\t' + O_exome[pos]['ref']
else:
res = res + '\t' + O_exome[pos]['ref'] + '|' + O_gbs[pos]['ref']
if O_exome[pos]['alt'] == O_gbs[pos]['alt']:
res = res + '\t' + O_exome[pos]['alt']
else:
res = res + '\t' + O_exome[pos]['alt'] + '|' + O_gbs[pos]['alt']
for ind in Ind:
if O_exome[pos][ind] == O_gbs[pos][ind] or (O_exome[pos][ind].split('/')[0] == O_gbs[pos][ind].split('/')[1] and O_exome[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[0]):
res = res + '\t' + O_exome[pos][ind]
else:
res = res + '\t' + O_exome[pos][ind] + '|' + O_gbs[pos][ind]
### have same SNPs
if (O_exome[pos][ind] == O_gbs[pos][ind] or (O_exome[pos][ind].split('/')[0] == O_gbs[pos][ind].split('/')[1] and O_exome[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[0])) and O_exome[pos][ind]!= './.':
N2 += 1
### both are ./.
elif O_exome[pos][ind] == O_gbs[pos][ind] and O_exome[pos][ind]== './.':
N6 += 1
### Exome has SNPs, GBS is ./.
elif O_exome[pos][ind] != './.' and O_gbs[pos][ind] == './.':
N1 += 1
### Exome is ./., GBS has SNPs
elif O_exome[pos][ind] == './.' and O_gbs[pos][ind] != './.':
N4 += 1
### Exome has homo, GBS has hetero
elif O_exome[pos][ind].split('/')[0] == O_exome[pos][ind].split('/')[1] and O_exome[pos][ind]!= './.' and O_gbs[pos][ind].split('/')[0] != O_gbs[pos][ind].split('/')[1]:
N5 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_homo_GBS_hetero\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has hetero, GBS has homo
elif O_exome[pos][ind].split('/')[0] != O_exome[pos][ind].split('/')[1] and O_gbs[pos][ind].split('/')[0] == O_gbs[pos][ind].split('/')[1] and O_gbs[pos][ind] != './.':
N3 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_GBS_homo\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
else:
N7 += 1
print([O_exome[pos][ind],O_gbs[pos][ind]])
out.write('%s\t%s\t%s\t%s\t%s\tBoth_homo_differ\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
ori_out.write(res + '\n')
ori_out.close()
out.close()
print([N1,N2,N3,N4,N5,N6,N7])
inp = open('Distribution_of_discrepancy_Biallelic_variation_%s_between_exome_and_GBS.txt'%sys.argv[7],'r').readlines()
out = open('Distribution_of_discrepancy_Biallelic_variation_%s_between_exome_and_GBS_alle_count.txt'%sys.argv[7],'w')
P = {}
for inl in inp[1:]:
tem = inl.split('\t')
chr = tem[0]
pos = tem[1]
ind = tem[2]
if chr not in P:
P[chr] = {}
if pos not in P[chr]:
P[chr][pos] = {}
if ind not in P[chr][pos]:
P[chr][pos][ind] = [0,0,0,0]
Exome = open(sys.argv[5],'r')
inl = Exome.readline()
inl = Exome.readline()
while inl:
tem = inl.split('\t')
chr = tem[0]
pos = tem[1]
ind = tem[2]
if chr in P:
if pos in P[chr]:
if ind in P[chr][pos]:
P[chr][pos][ind][0] = int(tem[6])
P[chr][pos][ind][1] = int(tem[7])
inl = Exome.readline()
GBS = open(sys.argv[6],'r')
inl = GBS.readline()
inl = GBS.readline()
while inl:
tem = inl.split('\t')
chr = tem[0]
pos = tem[1]
ind = tem[2]
if chr in P:
if pos in P[chr]:
if ind in P[chr][pos]:
P[chr][pos][ind][2] = int(tem[6])
P[chr][pos][ind][3] = int(tem[7])
inl = GBS.readline()
out.write('Chr\tPos\tInd\tExome_SNP\tGBS_SNP\tType\tExome_alle_count\tExome_read_count\tGBS_alle_count\tGBS_read_count\n')
for inl in inp[1:]:
tem = inl.split('\t')
chr = tem[0]
pos = tem[1]
ind = tem[2]
if chr not in P:
P[chr] = {}
if pos not in P[chr]:
P[chr][pos] = {}
if ind not in P[chr][pos]:
P[chr][pos][ind] = [0,0,0,0]
out.write('%s\t%s\t%s\t%s\t%s\n'%(inl.strip(),P[chr][pos][ind][0],P[chr][pos][ind][1],P[chr][pos][ind][2],P[chr][pos][ind][3]))
out.close()
| 39.176 | 327 | 0.571166 |
cd7225d8ec41e4d30a72fb83efb498273f5b3bbc
| 132 |
py
|
Python
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/calculators/calc_frame.py
|
PascalGuenther/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 82 |
2016-06-29T17:24:43.000Z
|
2021-04-16T06:49:17.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/calculators/calc_frame.py
|
PascalGuenther/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 6 |
2022-01-12T18:22:08.000Z
|
2022-03-25T10:19:27.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/calculators/calc_frame.py
|
PascalGuenther/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 56 |
2016-08-02T10:50:50.000Z
|
2021-07-19T08:57:34.000Z
|
from pyradioconfig.parts.bobcat.calculators.calc_frame import Calc_Frame_Bobcat
| 33 | 79 | 0.863636 |
cd7529c73cff8550931b72e595537b4c1b291bee
| 1,940 |
py
|
Python
|
scripts/stats_wrapper.py
|
gpertea/regtools
|
a59d5dbd3268b0d83412e6fe81cf7e924c7bcb7c
|
[
"MIT"
] | 70 |
2015-08-05T21:32:51.000Z
|
2021-11-26T13:26:33.000Z
|
scripts/stats_wrapper.py
|
gpertea/regtools
|
a59d5dbd3268b0d83412e6fe81cf7e924c7bcb7c
|
[
"MIT"
] | 145 |
2015-08-05T22:27:58.000Z
|
2022-03-14T21:50:17.000Z
|
scripts/stats_wrapper.py
|
gpertea/regtools
|
a59d5dbd3268b0d83412e6fe81cf7e924c7bcb7c
|
[
"MIT"
] | 29 |
2015-08-01T02:19:40.000Z
|
2021-12-16T20:02:40.000Z
|
import glob
import subprocess
import os
import argparse
import shutil
input_parser = argparse.ArgumentParser(
description="Run RegTools stats script",
)
input_parser.add_argument(
'tag',
help="Variant tag parameter used to run RegTools.",
)
args = input_parser.parse_args()
tag = args.tag
cwd = os.getcwd()
lines_per_file = 25000
smallfile = None
with open(f'all_splicing_variants_{tag}.bed', 'r') as bigfile:
header = bigfile.readline()
for lineno, line in enumerate(bigfile):
if lineno % lines_per_file == 0:
if smallfile:
smallfile.close()
small_filename = 'small_file_{}.txt'.format(lineno + lines_per_file)
smallfile = open(small_filename, "w")
smallfile.write(header)
smallfile.write(line)
if smallfile:
smallfile.close()
#get chunks
files = glob.glob('small_file_*')
files.sort()
number_of_in_files = len(files)
for file in files:
subprocess.run(f'Rscript --vanilla compare_junctions_hist_v2.R {tag} {file}', shell=True, check=True)
output_files = glob.glob("*_out.tsv")
output_files.sort()# glob lacks reliable ordering, so impose your own if output order matters
number_of_out_files = len(output_files)
if number_of_in_files == number_of_out_files:
with open(f'compare_junctions/hist/junction_pvalues_{tag}.tsv', 'wb') as outfile:
for i, fname in enumerate(output_files):
with open(fname, 'rb') as infile:
if i != 0:
infile.readline() # Throw away header on all but first file
# Block copy rest of file from input to output without parsing
shutil.copyfileobj(infile, outfile)
print(fname + " has been imported.")
else:
print("Number of output files doesn't match the number of input files that should have been processed")
files = glob.glob('small_file_*')
for file in files:
os.remove(file)
| 33.448276 | 107 | 0.676804 |
cd7892510c7f345ccc184879db2d6bb6e417c44a
| 451 |
py
|
Python
|
lib/model/utils/plt_loss.py
|
PhoneSix/Domain-Contrast
|
5c674b581bce9beacf5bc0dd13113f33c4050495
|
[
"MIT"
] | 4 |
2021-07-31T01:04:15.000Z
|
2022-03-09T07:23:10.000Z
|
lib/model/utils/plt_loss.py
|
PhoneSix/Domain-Contrast
|
5c674b581bce9beacf5bc0dd13113f33c4050495
|
[
"MIT"
] | null | null | null |
lib/model/utils/plt_loss.py
|
PhoneSix/Domain-Contrast
|
5c674b581bce9beacf5bc0dd13113f33c4050495
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import os
| 25.055556 | 47 | 0.62306 |
cd78d6e1151155e18754cebc1cc2d5b9e9efa63f
| 3,267 |
py
|
Python
|
ocellaris/utils/alarm.py
|
TormodLandet/Ocellaris
|
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
|
[
"Apache-2.0"
] | 1 |
2017-11-07T12:19:44.000Z
|
2017-11-07T12:19:44.000Z
|
ocellaris/utils/alarm.py
|
TormodLandet/Ocellaris
|
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
|
[
"Apache-2.0"
] | null | null | null |
ocellaris/utils/alarm.py
|
TormodLandet/Ocellaris
|
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
|
[
"Apache-2.0"
] | 2 |
2018-05-02T17:17:01.000Z
|
2019-03-11T13:09:40.000Z
|
# Copyright (C) 2018-2019 Tormod Landet
# SPDX-License-Identifier: Apache-2.0
"""
A timeout context manager based on SIGALRM, Permits multiple
SIGALRM events to be queued.
Uses a `heapq` to store the objects to be called when an alarm signal is
raised, so that the next alarm is always at the top of the heap.
Note: SIGALRM does not work on Windows!
Code from ActiveState Python recipes
http://code.activestate.com/recipes/577600-queue-for-managing-multiple-sigalrm-alarms-concurr/
modified by stackoverflow user "James":
https://stackoverflow.com/a/34999808
"""
import heapq
import signal
from time import time
alarmlist = []
def __clear_alarm():
"""
Clear an existing alarm.
If the alarm signal was set to a callable other than our own, queue the
previous alarm settings.
"""
oldsec = signal.alarm(0)
oldfunc = signal.signal(signal.SIGALRM, __alarm_handler)
if oldsec > 0 and oldfunc != __alarm_handler:
heapq.heappush(alarmlist, (__new_alarm(oldsec, oldfunc, [], {})))
def __alarm_handler(*_args):
"""
Handle an alarm by calling any due heap entries and resetting the alarm.
Note that multiple heap entries might get called, especially if calling an
entry takes a lot of time.
"""
try:
nextt = __next_alarm()
while nextt is not None and nextt <= 0:
(_tm, func, args, keys) = heapq.heappop(alarmlist)
func(*args, **keys)
nextt = __next_alarm()
finally:
if alarmlist:
__set_alarm()
def alarm(sec, func, *args, **keys):
"""
Set an alarm.
When the alarm is raised in `sec` seconds, the handler will call `func`,
passing `args` and `keys`. Return the heap entry (which is just a big
tuple), so that it can be cancelled by calling `cancel()`.
"""
__clear_alarm()
try:
newalarm = __new_alarm(sec, func, args, keys)
heapq.heappush(alarmlist, newalarm)
return newalarm
finally:
__set_alarm()
def cancel(alarm):
"""
Cancel an alarm by passing the heap entry returned by `alarm()`.
It is an error to try to cancel an alarm which has already occurred.
"""
__clear_alarm()
try:
alarmlist.remove(alarm)
heapq.heapify(alarmlist)
finally:
if alarmlist:
__set_alarm()
| 25.724409 | 94 | 0.653811 |
cd79597c4dc624f2537254fe68c7bb39e5b6003c
| 2,549 |
py
|
Python
|
apps/insar.py
|
giswqs/streamlit-insar
|
e2c0897f01aeff96cd119cce8cf6dd3d8fb0e455
|
[
"MIT"
] | 5 |
2021-12-14T23:28:36.000Z
|
2022-02-27T14:35:29.000Z
|
apps/insar.py
|
giswqs/streamlit-insar
|
e2c0897f01aeff96cd119cce8cf6dd3d8fb0e455
|
[
"MIT"
] | null | null | null |
apps/insar.py
|
giswqs/streamlit-insar
|
e2c0897f01aeff96cd119cce8cf6dd3d8fb0e455
|
[
"MIT"
] | null | null | null |
import folium
import altair as alt
import leafmap.foliumap as leafmap
import pandas as pd
import streamlit as st
| 25.747475 | 85 | 0.433503 |
cd7a330fb695d24e5d3e2270fbbe2e1e0d11d2dc
| 2,105 |
py
|
Python
|
solve_net.py
|
a1exwang/theano-cnn-intro
|
5f6ecdcb2908afb34a7d94e69b1d1ab13beb3c62
|
[
"MIT"
] | null | null | null |
solve_net.py
|
a1exwang/theano-cnn-intro
|
5f6ecdcb2908afb34a7d94e69b1d1ab13beb3c62
|
[
"MIT"
] | null | null | null |
solve_net.py
|
a1exwang/theano-cnn-intro
|
5f6ecdcb2908afb34a7d94e69b1d1ab13beb3c62
|
[
"MIT"
] | null | null | null |
from utils import LOG_INFO
import numpy as np
| 39.716981 | 116 | 0.443705 |
cd7b0a77a1f93e1e0546019ec5051874f1e448ee
| 1,199 |
py
|
Python
|
playground/test1.py
|
mathee92/unirentalz
|
803c58628ebda002e2c127db11fbaddf181ef394
|
[
"MIT"
] | null | null | null |
playground/test1.py
|
mathee92/unirentalz
|
803c58628ebda002e2c127db11fbaddf181ef394
|
[
"MIT"
] | null | null | null |
playground/test1.py
|
mathee92/unirentalz
|
803c58628ebda002e2c127db11fbaddf181ef394
|
[
"MIT"
] | null | null | null |
# -----------
# User Instructions
#
# Modify the valid_month() function to verify
# whether the data a user enters is a valid
# month. If the passed in parameter 'month'
# is not a valid month, return None.
# If 'month' is a valid month, then return
# the name of the month with the first letter
# capitalized.
#
import string
import cgi
months = ['January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December']
| 18.734375 | 46 | 0.539616 |
cd7da929a4d4176f292520c09ac6f877772c0b49
| 2,274 |
py
|
Python
|
hookio/logs.py
|
Marak/hook.io-sdk-python
|
722b04eb0832ef712d5dcd491899996088e1aa8b
|
[
"Unlicense"
] | 1 |
2021-06-15T11:52:44.000Z
|
2021-06-15T11:52:44.000Z
|
hookio/logs.py
|
Marak/hook.io-sdk-python
|
722b04eb0832ef712d5dcd491899996088e1aa8b
|
[
"Unlicense"
] | null | null | null |
hookio/logs.py
|
Marak/hook.io-sdk-python
|
722b04eb0832ef712d5dcd491899996088e1aa8b
|
[
"Unlicense"
] | null | null | null |
import sys
import weakref
import json
import logging
from .utils import opt_json, Response2JSONLinesIterator
from six import StringIO
log = logging.getLogger(__name__)
| 35.53125 | 87 | 0.575638 |
cd7e21c2d43aa6b5ca80b05a26cc762c012f19a7
| 228 |
py
|
Python
|
data/__init__.py
|
opconty/keras_std
|
26cbe25c525128a067a97157bca0b060f40e5ec8
|
[
"Apache-2.0"
] | 18 |
2019-07-16T10:54:29.000Z
|
2021-03-16T00:34:23.000Z
|
data/__init__.py
|
opconty/keras_std
|
26cbe25c525128a067a97157bca0b060f40e5ec8
|
[
"Apache-2.0"
] | 2 |
2019-08-26T11:40:12.000Z
|
2019-12-07T13:18:48.000Z
|
data/__init__.py
|
opconty/keras_std
|
26cbe25c525128a067a97157bca0b060f40e5ec8
|
[
"Apache-2.0"
] | 8 |
2019-07-17T08:26:10.000Z
|
2021-03-16T00:34:17.000Z
|
#-*- coding:utf-8 -*-
#'''
# Created on 19-7-16 2:14
#
# @Author: Greg Gao(laygin)
#'''
from .synth_text import SynthTextConfig, SynthTextDataset
from .icdar13 import IcdarConfig, IcdarDataset
from .img_aug import resize_image
| 25.333333 | 57 | 0.736842 |
cd7f21d270d7885499684e88d3eb5ad2fac11de9
| 6,376 |
py
|
Python
|
alberto/annotation/train.py
|
lettomobile/DeepPoseKit
|
a922d2d99cd55d0a3909c1f3f8b2bf8c377ff503
|
[
"Apache-2.0"
] | 1 |
2021-11-01T02:08:00.000Z
|
2021-11-01T02:08:00.000Z
|
alberto/annotation/train.py
|
albertoursino/DeepPoseKit
|
a922d2d99cd55d0a3909c1f3f8b2bf8c377ff503
|
[
"Apache-2.0"
] | null | null | null |
alberto/annotation/train.py
|
albertoursino/DeepPoseKit
|
a922d2d99cd55d0a3909c1f3f8b2bf8c377ff503
|
[
"Apache-2.0"
] | null | null | null |
from alberto.annotation import annotation_set
from pandas import np
from deepposekit.io import TrainingGenerator, DataGenerator
from deepposekit.augment import FlipAxis
import imgaug.augmenters as iaa
import imgaug as ia
from deepposekit.models import StackedHourglass
from deepposekit.models import load_model
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from deepposekit.callbacks import Logger, ModelCheckpoint
import time
from os.path import expanduser
HOME = annotation_set.HOME
IMAGE_SIZE = annotation_set.IMAGE_SIZE
TYPE = annotation_set.TYPE
data_generator = DataGenerator(
datapath=HOME + '/deepposekit-data/datasets/{}/annotation_set_{}_{}.h5'.format(TYPE, IMAGE_SIZE[0], IMAGE_SIZE[1]))
image, keypoints = data_generator[0]
plt.figure(figsize=(5, 5))
image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
cmap = None if image.shape[-1] is 3 else 'gray'
plt.imshow(image, cmap=cmap, interpolation='none')
for idx, jdx in enumerate(data_generator.graph):
if jdx > -1:
x1 = keypoints[0, idx, 0]
x2 = keypoints[0, jdx, 0]
if (0 <= x1 <= IMAGE_SIZE[0]) and (0 <= x2 <= IMAGE_SIZE[0]):
plt.plot(
[keypoints[0, idx, 0], keypoints[0, jdx, 0]],
[keypoints[0, idx, 1], keypoints[0, jdx, 1]],
'r-'
)
plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3)
plt.show()
# Augmentation
augmenter = []
augmenter.append(FlipAxis(data_generator, axis=0)) # flip image up-down
augmenter.append(FlipAxis(data_generator, axis=1)) # flip image left-right
sometimes = []
sometimes.append(iaa.Affine(scale={"x": (0.95, 1.05), "y": (0.95, 1.05)},
translate_percent={'x': (-0.05, 0.05), 'y': (-0.05, 0.05)},
shear=(-8, 8),
order=ia.ALL,
cval=ia.ALL,
mode=ia.ALL)
)
sometimes.append(iaa.Affine(scale=(0.8, 1.2),
mode=ia.ALL,
order=ia.ALL,
cval=ia.ALL)
)
augmenter.append(iaa.Sometimes(0.75, sometimes))
augmenter.append(iaa.Affine(rotate=(-180, 180),
mode=ia.ALL,
order=ia.ALL,
cval=ia.ALL)
)
augmenter = iaa.Sequential(augmenter)
# image, keypoints = data_generator[0]
# image, keypoints = augmenter(images=image, keypoints=keypoints)
# plt.figure(figsize=(5, 5))
# image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
# cmap = None if image.shape[-1] is 3 else 'gray'
# plt.imshow(image, cmap=cmap, interpolation='none')
# for idx, jdx in enumerate(data_generator.graph):
# if jdx > -1:
# x1 = keypoints[0, idx, 0]
# x2 = keypoints[0, jdx, 0]
# if (0 <= x1 <= IMAGE_SIZE[0]) and (0 <= x2 <= IMAGE_SIZE[0]):
# plt.plot(
# [keypoints[0, idx, 0], keypoints[0, jdx, 0]],
# [keypoints[0, idx, 1], keypoints[0, jdx, 1]],
# 'r-'
# )
plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50,
cmap=plt.cm.hsv, zorder=3)
# plt.show()
train_generator = TrainingGenerator(generator=data_generator,
downsample_factor=3,
augmenter=augmenter,
sigma=5,
validation_split=0,
use_graph=False,
random_seed=1,
graph_scale=1)
train_generator.get_config()
# n_keypoints = data_generator.keypoints_shape[0]
# batch = train_generator(batch_size=1, validation=False)[0]
# inputs = batch[0]
# outputs = batch[1]
# fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10, 10))
# ax1.set_title('image')
# ax1.imshow(inputs[0, ..., 0], vmin=0, vmax=255)
#
# ax2.set_title('posture graph')
# ax2.imshow(outputs[0, ..., n_keypoints:-1].max(-1))
#
# ax3.set_title('keypoints confidence')
# ax3.imshow(outputs[0, ..., :n_keypoints].max(-1))
#
# ax4.set_title('posture graph and keypoints confidence')
# ax4.imshow(outputs[0, ..., -1], vmin=0)
# plt.show()
train_generator.on_epoch_end()
# Define a model
model = StackedHourglass(train_generator)
model.get_config()
# data_size = (10,) + data_generator.image_shape
# x = np.random.randint(0, 255, data_size, dtype="uint8")
# y = model.predict(x[:100], batch_size=100) # make sure the model is in GPU memory
# t0 = time.time()
# y = model.predict(x, batch_size=100, verbose=1)
# t1 = time.time()
# print(x.shape[0] / (t1 - t0))
# logger = Logger(validation_batch_size=10,
# # filepath saves the logger data to a .h5 file
# filepath=HOME + "/deepposekit-data/datasets/{}/log_densenet.h5".format(TYPE)
# )
# Remember, if you set validation_split=0 for your TrainingGenerator,
# which will just use the training set for model fitting,
# make sure to set monitor="loss" instead of monitor="val_loss".
reduce_lr = ReduceLROnPlateau(monitor="loss", factor=0.2, verbose=1, patience=20)
model_checkpoint = ModelCheckpoint(
HOME + "/deepposekit-data/datasets/{}/model_densenet.h5".format(TYPE),
monitor="loss",
# monitor="loss" # use if validation_split=0
verbose=1,
save_best_only=True,
)
early_stop = EarlyStopping(
monitor="loss",
# monitor="loss" # use if validation_split=0
min_delta=0.001,
patience=100,
verbose=1
)
callbacks = [early_stop, reduce_lr, model_checkpoint]
model.fit(
batch_size=5,
validation_batch_size=10,
callbacks=callbacks,
# epochs=1000, # Increase the number of epochs to train the model longer
epochs=50,
n_workers=8,
steps_per_epoch=None,
)
# model = load_model(
# HOME + "/deepposekit-data/datasets/{}/model_densenet.h5".format(TYPE),
# augmenter=augmenter,
# generator=data_generator,
# )
#
# model.fit(
# batch_size=2,
# validation_batch_size=10,
# callbacks=callbacks,
# epochs=50,
# n_workers=8,
# steps_per_epoch=None,
# )
| 32.697436 | 132 | 0.606336 |
cd8237accaa927ddf6513747162736a47cc442f6
| 763 |
py
|
Python
|
northpole/settings/local_staging.py
|
mhotwagner/northpole
|
7d904d919aeb6a36549750ee0700578246896691
|
[
"MIT"
] | null | null | null |
northpole/settings/local_staging.py
|
mhotwagner/northpole
|
7d904d919aeb6a36549750ee0700578246896691
|
[
"MIT"
] | null | null | null |
northpole/settings/local_staging.py
|
mhotwagner/northpole
|
7d904d919aeb6a36549750ee0700578246896691
|
[
"MIT"
] | null | null | null |
from .base import *
from dotenv import load_dotenv
load_dotenv(dotenv_path='northpole/.staging.env', verbose=True)
ALLOWED_HOSTS = ['*']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('POSTGRES_DB', 'northpole-staging'),
'USER': os.getenv('POSTGRES_USER'),
'PASSWORD': os.getenv('POSTGRES_PASSWORD'),
'HOST': os.getenv('POSTGRES_HOST'),
'PORT': os.getenv('POSTGRES_PORT', '5432'),
}
}
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, '..', 'static_source'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, '..' 'media')
MEDIA_URL = '/media/'
| 25.433333 | 64 | 0.651376 |
cd836f4eaf2d0f0894b304e6d9d109cacae91338
| 12,587 |
py
|
Python
|
bc4py/bip32/bip32.py
|
namuyan/bc4py
|
6484d356096261d0d57e9e1f5ffeae1f9a9865f3
|
[
"MIT"
] | 12 |
2018-09-19T14:02:09.000Z
|
2020-01-27T16:20:14.000Z
|
bc4py/bip32/bip32.py
|
kumacoinproject/bc4py
|
6484d356096261d0d57e9e1f5ffeae1f9a9865f3
|
[
"MIT"
] | 1 |
2020-03-19T16:57:30.000Z
|
2020-03-19T16:57:30.000Z
|
bc4py/bip32/bip32.py
|
namuyan/bc4py
|
6484d356096261d0d57e9e1f5ffeae1f9a9865f3
|
[
"MIT"
] | 6 |
2018-11-13T17:20:14.000Z
|
2020-02-15T11:46:52.000Z
|
#!/usr/bin/env python
#
# Copyright 2014 Corgan Labs
# See LICENSE.txt for distribution terms
#
from bc4py.bip32.base58 import check_decode, check_encode
from bc4py_extension import PyAddress
from ecdsa.curves import SECP256k1
from ecdsa.keys import SigningKey, VerifyingKey, square_root_mod_prime as mod_sqrt
from ecdsa.ecdsa import generator_secp256k1, int_to_string
from ecdsa.ellipticcurve import Point, INFINITY
from os import urandom
import hmac
import hashlib
import codecs
import struct
CURVE_GEN = generator_secp256k1 # Point class
CURVE_ORDER = CURVE_GEN.order() # int
FIELD_ORDER = SECP256k1.curve.p() # int
MIN_ENTROPY_LEN = 128 # bits
BIP32_HARDEN = 0x80000000 # choose from hardened set of child keys
EX_MAIN_PRIVATE = [codecs.decode('0488ade4', 'hex')] # Version strings for mainnet extended private keys
EX_MAIN_PUBLIC = [codecs.decode('0488b21e', 'hex'),
codecs.decode('049d7cb2', 'hex')] # Version strings for mainnet extended public keys
EX_TEST_PRIVATE = [codecs.decode('04358394', 'hex')] # Version strings for testnet extended private keys
EX_TEST_PUBLIC = [codecs.decode('043587CF', 'hex')] # Version strings for testnet extended public keys
WALLET_VERSION = b'\x80'
def CKDpriv(self, i):
"""
Create a child key of index 'i'.
If the most significant bit of 'i' is set, then select from the
hardened key set, otherwise, select a regular child key.
Returns a BIP32Key constructed with the child key parameters,
or None if i index would result in an invalid key.
"""
# Index as bytes, BE
i_str = struct.pack(">L", i)
# Data to HMAC
if i & BIP32_HARDEN:
data = b'\0' + self.get_private_key() + i_str
path = self.path + '/' + str(i % BIP32_HARDEN) + '\''
else:
data = self.get_public_key() + i_str
path = self.path + '/' + str(i)
# Get HMAC of data
(Il, Ir) = self._hmac(data)
# Construct new key material from Il and current private key
Il_int = int.from_bytes(Il, 'big')
if Il_int > CURVE_ORDER:
return None
sec_int = int.from_bytes(self.secret.to_string(), 'big')
k_int = (Il_int + sec_int) % CURVE_ORDER
if k_int == 0:
return None
# Construct and return a new BIP32Key
secret = SigningKey.from_string(int_to_string(k_int), SECP256k1)
public = secret.verifying_key
return Bip32(secret=secret, public=public, chain=Ir, depth=self.depth + 1, index=i, fpr=self.fingerprint(), path=path)
def CKDpub(self, i):
"""
Create a publicly derived child key of index 'i'.
If the most significant bit of 'i' is set, this is
an error.
Returns a BIP32Key constructed with the child key parameters,
or None if index would result in invalid key.
"""
if i & BIP32_HARDEN:
raise Exception("Cannot create a hardened child key using public child derivation")
# Data to HMAC. Same as CKDpriv() for public child key.
data = self.get_public_key() + struct.pack(">L", i)
# Get HMAC of data
(Il, Ir) = self._hmac(data)
# Construct curve point Il*G+K
Il_int = int.from_bytes(Il, 'big')
if Il_int >= CURVE_ORDER:
return None
point = Il_int*CURVE_GEN + self.public.pubkey.point
if point == INFINITY:
return None
public = VerifyingKey.from_public_point(point, SECP256k1)
# Construct and return a new BIP32Key
path = self.path + '/' + str(i)
return Bip32(
secret=None, public=public, chain=Ir, depth=self.depth + 1, index=i, fpr=self.fingerprint(), path=path)
def child_key(self, i):
"""
Create and return a child key of this one at index 'i'.
The index 'i' should be summed with BIP32_HARDEN to indicate
to use the private derivation algorithm.
"""
if self.secret is None:
return self.CKDpub(i)
else:
return self.CKDpriv(i)
def get_address(self, hrp, ver) -> PyAddress:
"""Return bech32 compressed address"""
return PyAddress.from_param(hrp, ver, self.identifier())
def identifier(self):
"""Return key identifier as string"""
pk = self.get_public_key()
return hashlib.new('ripemd160', hashlib.sha256(pk).digest()).digest()
def fingerprint(self):
"""Return key fingerprint as string"""
return self.identifier()[:4]
def extended_key(self, is_private=True, encoded=True, is_testnet=False):
"""Return extended private or public key as string, optionally base58 encoded"""
if self.secret is None and is_private is True:
raise Exception("Cannot export an extended private key from a public-only deterministic key")
if is_testnet:
version = EX_TEST_PRIVATE[0] if is_private else EX_TEST_PUBLIC[0]
else:
version = EX_MAIN_PRIVATE[0] if is_private else EX_MAIN_PUBLIC[0]
depth = self.depth.to_bytes(1, 'big')
fpr = self.parent_fpr
child = struct.pack('>L', self.index)
chain = self.chain
if self.secret is None or is_private is False:
# startswith b'\x02' or b'\x03'
data = self.get_public_key()
else:
# startswith b'\x00'
data = b'\x00' + self.get_private_key()
if encoded:
return check_encode(version + depth + fpr + child + chain + data)
else:
return depth + fpr + child + chain + data
def wallet_import_format(self, prefix=WALLET_VERSION):
"""Returns private key encoded for wallet import"""
if self.secret is None:
raise Exception("Publicly derived deterministic keys have no private half")
raw = prefix + self.get_private_key() + b'\x01' # Always compressed
return check_encode(raw)
def dump(self):
"""Dump key fields mimicking the BIP0032 test vector format"""
print(" * Identifier")
print(" * (hex): ", self.identifier().hex())
print(" * (fpr): ", self.fingerprint().hex())
print(" * (main addr):", self.get_address('bc', 0))
print(" * (path): ", self.path)
if self.secret:
print(" * Secret key")
print(" * (hex): ", self.get_private_key().hex())
print(" * (wif): ", self.wallet_import_format())
print(" * Public key")
print(" * (hex): ", self.get_public_key().hex())
print(" * Chain code")
print(" * (hex): ", self.chain.hex())
print(" * Serialized")
print(" * (pub hex): ", self.extended_key(is_private=False, encoded=False).hex())
print(" * (pub b58): ", self.extended_key(is_private=False, encoded=True))
if self.secret:
print(" * (prv hex): ", self.extended_key(is_private=True, encoded=False).hex())
print(" * (prv b58): ", self.extended_key(is_private=True, encoded=True))
def parse_bip32_path(path):
"""parse BIP32 format"""
r = list()
for s in path.split('/'):
if s == 'm':
continue
elif s.endswith("'") or s.endswith('h'):
r.append(int(s[:-1]) + BIP32_HARDEN)
else:
r.append(int(s))
return r
def struct_bip32_path(path):
"""struct BIP32 string path"""
s = 'm'
for p in path:
if p & BIP32_HARDEN:
s += "/{}'".format(p % BIP32_HARDEN)
else:
s += "/{}".format(p)
return s
__all__ = [
"BIP32_HARDEN",
"Bip32",
"parse_bip32_path",
"struct_bip32_path",
]
| 37.573134 | 126 | 0.593072 |
cd83dd3751ba2089366bb8592c6a8484b3986736
| 1,167 |
py
|
Python
|
lib/utils/useragent.py
|
cckuailong/pocsploit
|
fe4a3154e59d2bebd55ccfdf62f4f7efb21b5a2a
|
[
"MIT"
] | 106 |
2022-03-18T06:51:09.000Z
|
2022-03-31T19:11:41.000Z
|
lib/utils/useragent.py
|
cckuailong/pocsploit
|
fe4a3154e59d2bebd55ccfdf62f4f7efb21b5a2a
|
[
"MIT"
] | 5 |
2022-03-27T07:37:32.000Z
|
2022-03-31T13:56:11.000Z
|
lib/utils/useragent.py
|
cckuailong/pocsploit
|
fe4a3154e59d2bebd55ccfdf62f4f7efb21b5a2a
|
[
"MIT"
] | 30 |
2022-03-21T01:27:08.000Z
|
2022-03-31T12:28:01.000Z
|
import random
from loguru import logger
from lib.vars.vars import conf, th, paths
from lib.vars.ua import UA_LIST
| 28.463415 | 184 | 0.685518 |
cd865fa7395cf48130baac47f65fb9a0acdb8fa6
| 1,378 |
py
|
Python
|
etapa 2/gaussJacobi.py
|
jlucartc/MetodosNumericos20182
|
d5610b95945ed6ec9b9bae6cd96672f4d616c1b9
|
[
"MIT"
] | null | null | null |
etapa 2/gaussJacobi.py
|
jlucartc/MetodosNumericos20182
|
d5610b95945ed6ec9b9bae6cd96672f4d616c1b9
|
[
"MIT"
] | null | null | null |
etapa 2/gaussJacobi.py
|
jlucartc/MetodosNumericos20182
|
d5610b95945ed6ec9b9bae6cd96672f4d616c1b9
|
[
"MIT"
] | null | null | null |
import numpy as np
from sympy import *
from math import *
from timeit import default_timer as timer
start = None
end = None
A = np.matrix(eval(input("Digite uma matriz : ")))
A = A.astype(float)
X = np.matrix(eval(input("Digite X : ")))
e = float(input("Digite a preciso: "))
B = np.copy(A[:,A.shape[1]-1])
A = np.delete(np.copy(A),A.shape[1]-1,1)
C = np.asmatrix(np.zeros([A.shape[0],A.shape[1]]))
C = C.astype(float)
G = np.copy(B)
for i in range(C.shape[0]):
for j in range(C.shape[1]):
if i != j:
C[i,j] = (np.copy(A[i,j])/np.copy(A[i,i]))*(-1)
G[i,0] = (np.copy(G[i,0]))/(np.copy(A[i,i]))
C[i,i] = 0
Xn = None
z = True
print("Matriz C:\n",C)
print("Matriz G:\n",G)
start = timer()
while(z):
Xn = (np.copy(C) @ np.copy(X)) + np.copy(G)
d = maxXi(np.copy(Xn),np.copy(X))
if(d < e):
z = False
else:
X = np.copy(Xn)
end = timer()
print("Resposta de Gauss-Jacobi: ")
print(Xn)
print("Tempo de execucao total: %e segundos" % (end - start))
| 18.621622 | 78 | 0.523948 |
cd8770a9a9b49ceb88698ef2075f53487bd2aca7
| 8,139 |
py
|
Python
|
custom_libs/Project2/plotter.py
|
drkostas/COSC522
|
5731576301daf99ca7c3d382fe3ea8b1398008ff
|
[
"MIT"
] | 1 |
2021-12-22T14:29:42.000Z
|
2021-12-22T14:29:42.000Z
|
custom_libs/Project2/plotter.py
|
drkostas/COSC522
|
5731576301daf99ca7c3d382fe3ea8b1398008ff
|
[
"MIT"
] | 3 |
2021-10-13T02:14:30.000Z
|
2021-11-24T05:28:32.000Z
|
custom_libs/Project2/plotter.py
|
drkostas/COSC522
|
5731576301daf99ca7c3d382fe3ea8b1398008ff
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
| 44.966851 | 110 | 0.578327 |
cd887102450875f1d2f5fd98ea87c44fd4dd0888
| 303 |
py
|
Python
|
Python/8/SquareSum/square_sum.py
|
hwakabh/codewars
|
7afce5a7424d35abc55c350301ac134f2d3edd3d
|
[
"MIT"
] | null | null | null |
Python/8/SquareSum/square_sum.py
|
hwakabh/codewars
|
7afce5a7424d35abc55c350301ac134f2d3edd3d
|
[
"MIT"
] | 6 |
2020-02-21T17:01:59.000Z
|
2021-05-04T07:04:41.000Z
|
Python/8/SquareSum/square_sum.py
|
hwakabh/codewars
|
7afce5a7424d35abc55c350301ac134f2d3edd3d
|
[
"MIT"
] | null | null | null |
import sys
if __name__ == "__main__":
if len(sys.argv) == 1:
nums = [int(e) for e in input('>>> Enter the numbers with comma-separeted: ').split(',')]
print(square_sum(numbers=nums))
else:
sys.exit(1)
| 23.307692 | 97 | 0.590759 |
cd88a71325bc436cee432caaafbbae9750fb46b0
| 5,710 |
py
|
Python
|
spatial_ops/lazy_loader.py
|
LucaMarconato/spatial_ops
|
86e5b8557db9efa2ca263098ed47c16de05fab00
|
[
"MIT"
] | null | null | null |
spatial_ops/lazy_loader.py
|
LucaMarconato/spatial_ops
|
86e5b8557db9efa2ca263098ed47c16de05fab00
|
[
"MIT"
] | null | null | null |
spatial_ops/lazy_loader.py
|
LucaMarconato/spatial_ops
|
86e5b8557db9efa2ca263098ed47c16de05fab00
|
[
"MIT"
] | null | null | null |
import os
import pickle
from abc import ABC, abstractmethod
import h5py
import numpy as np
from .folders import get_pickle_lazy_loader_data_path, hdf5_lazy_loader_data_path
from .unpickler import CustomUnpickler
def load_data(self, store_precomputation_on_disk=True):
if self.associated_instance is None:
raise ValueError(f'self.associated_instance = {self.associated_instance}')
if not self.has_data_already_been_precomputed():
# print('precomputing')
data = self.precompute()
if data is None:
raise ValueError(f'data = {data}')
if store_precomputation_on_disk:
self._save_data(data)
return data
else:
# print('loading')
return self._load_precomputed_data()
if not os.path.isfile(hdf5_lazy_loader_data_path):
f = h5py.File(hdf5_lazy_loader_data_path, 'w')
f.close()
if __name__ == '__main__':
from spatial_ops.data import JacksonFischerDataset as jfd
from spatial_ops.data import Patient
patient = jfd.patients[15]
derived_quantity = NumberOfPlatesLoader0(patient)
print(derived_quantity.load_data())
derived_quantity = NumberOfPlatesLoader1(patient)
# derived_quantity.delete_precomputation()
print(derived_quantity.load_data())
| 33.588235 | 117 | 0.680911 |
cd89017afbf663624d11e9b8f48f90440b465747
| 27,270 |
py
|
Python
|
connector/binance/websockets.py
|
firebird631/siis
|
8d64e8fb67619aaa5c0a62fda9de51dedcd47796
|
[
"PostgreSQL"
] | null | null | null |
connector/binance/websockets.py
|
firebird631/siis
|
8d64e8fb67619aaa5c0a62fda9de51dedcd47796
|
[
"PostgreSQL"
] | null | null | null |
connector/binance/websockets.py
|
firebird631/siis
|
8d64e8fb67619aaa5c0a62fda9de51dedcd47796
|
[
"PostgreSQL"
] | null | null | null |
# @date 2020-01-31
# @author Frederic Scherma, All rights reserved without prejudices.
# @license Copyright (c) 2020 Dream Overflow
# Binance Websocket connector.
import json
import threading
import traceback
from autobahn.twisted.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
from twisted.internet import ssl, reactor # , reactor
from twisted.internet.protocol import ReconnectingClientFactory
from connector.binance.client import Client
from monitor.service import MonitorService
import logging
logger = logging.getLogger('siis.connector.binance.ws')
error_logger = logging.getLogger('siis.error.connector.binance.ws')
traceback_logger = logging.getLogger('siis.traceback.connector.binance.ws')
| 37.510316 | 136 | 0.541584 |
cd8a35bcbfb312cda1686fb97584510659ede9ae
| 669 |
py
|
Python
|
Basic Data Structures/array/ListSlicing.py
|
rush2catch/algorithms-leetcode
|
38a5e6aa33d48fa14fe09c50c28a2eaabd736e55
|
[
"MIT"
] | null | null | null |
Basic Data Structures/array/ListSlicing.py
|
rush2catch/algorithms-leetcode
|
38a5e6aa33d48fa14fe09c50c28a2eaabd736e55
|
[
"MIT"
] | null | null | null |
Basic Data Structures/array/ListSlicing.py
|
rush2catch/algorithms-leetcode
|
38a5e6aa33d48fa14fe09c50c28a2eaabd736e55
|
[
"MIT"
] | null | null | null |
list_0 = [1, 2, 3, 6]
print(list_slicing(list_0, 1, 4))
print(list_slicing(list_0, 2, 4))
list_1 = [1, 2, 4, 5, 6, 9, 4, 6, 5, 8, 1, 4]
print(list_slicing(list_1, 3, 4))
print(list_slicing(list_1, 4, 3))
print(list_slicing(list_1, 2, 6))
print(list_slicing(list_1, 6, 2))
print(list_slicing(list_1, 5, 3))
print(list_slicing(list_1, 2, 5))
| 27.875 | 63 | 0.605381 |
cd8abd04dc151085122e27f4484b76ecb7ff52ac
| 225 |
py
|
Python
|
OnePy/sys_module/base_riskmanager.py
|
Chandlercjy/OnePyfx
|
9bd43b721d3f7352495b6ccab76bd533a3d2e8f2
|
[
"MIT"
] | 321 |
2017-07-09T09:25:45.000Z
|
2022-03-29T16:51:35.000Z
|
OnePy/sys_module/base_riskmanager.py
|
sunzhouhong/OnePy
|
4e225945de297ba1211035a7b95b5094cdddc2a7
|
[
"MIT"
] | 7 |
2017-08-23T12:10:29.000Z
|
2020-03-26T12:56:09.000Z
|
OnePy/sys_module/base_riskmanager.py
|
sunzhouhong/OnePy
|
4e225945de297ba1211035a7b95b5094cdddc2a7
|
[
"MIT"
] | 134 |
2017-07-26T22:29:18.000Z
|
2022-03-23T09:22:10.000Z
|
from OnePy.sys_module.metabase_env import OnePyEnvBase
| 17.307692 | 70 | 0.715556 |
cd8aca443ef9f431942f1f2a5e259a12ad32107f
| 3,057 |
py
|
Python
|
tests/geographic/duplicates/test_find_grid_duplicates.py
|
PEM-Humboldt/regi0
|
0d64587d5d87f57cddfc7a67bb8baf74cd70adf2
|
[
"MIT"
] | null | null | null |
tests/geographic/duplicates/test_find_grid_duplicates.py
|
PEM-Humboldt/regi0
|
0d64587d5d87f57cddfc7a67bb8baf74cd70adf2
|
[
"MIT"
] | 15 |
2022-02-03T11:38:37.000Z
|
2022-03-09T23:23:04.000Z
|
tests/geographic/duplicates/test_find_grid_duplicates.py
|
PEM-Humboldt/regi0
|
0d64587d5d87f57cddfc7a67bb8baf74cd70adf2
|
[
"MIT"
] | null | null | null |
"""
Test cases for the regi0.geographic.duplicates.find_grid_duplicates function.
"""
import numpy as np
import pandas as pd
from regi0.geographic.duplicates import find_grid_duplicates
| 21.082759 | 83 | 0.446189 |
cd8b45d655ef0b191b537030a3d9f0b1784aa23f
| 772 |
py
|
Python
|
kolibri/core/public/utils.py
|
FollonSaxBass/kolibri
|
4cf820b14386aecc228fecff64c847bad407cbb1
|
[
"MIT"
] | 2 |
2021-05-13T10:20:46.000Z
|
2021-11-15T12:31:03.000Z
|
kolibri/core/public/utils.py
|
camellia26/kolibri
|
7f1cb794c93f37e039be22f56a5ac1989ed22bde
|
[
"MIT"
] | 8 |
2021-05-21T15:31:24.000Z
|
2022-02-24T15:02:14.000Z
|
kolibri/core/public/utils.py
|
camellia26/kolibri
|
7f1cb794c93f37e039be22f56a5ac1989ed22bde
|
[
"MIT"
] | 1 |
2019-10-05T11:14:40.000Z
|
2019-10-05T11:14:40.000Z
|
import platform
from django.core.exceptions import ObjectDoesNotExist
from morango.models import InstanceIDModel
import kolibri
def get_device_info():
"""Returns metadata information about the device"""
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
try:
device_name = kolibri.core.device.models.DeviceSettings.objects.get().name
# When Koliri starts at the first time, and device hasn't been created
except ObjectDoesNotExist:
device_name = instance_model.hostname
info = {
"application": "kolibri",
"kolibri_version": kolibri.__version__,
"instance_id": instance_model.id,
"device_name": device_name,
"operating_system": platform.system(),
}
return info
| 28.592593 | 82 | 0.715026 |
cd8c005ad2ae492334e75e29d8ea3fae95bca95b
| 1,372 |
py
|
Python
|
mcpipy/cellcraft/config.py
|
cellcraft/cellcraft
|
1cb2b152bb6433250cec43e2586f1b5d093ec6e5
|
[
"MIT"
] | 2 |
2016-01-21T12:05:36.000Z
|
2016-04-18T09:50:03.000Z
|
mcpipy/cellcraft/config.py
|
cellcraft/cellcraft
|
1cb2b152bb6433250cec43e2586f1b5d093ec6e5
|
[
"MIT"
] | 1 |
2016-05-13T13:08:28.000Z
|
2016-05-13T13:08:28.000Z
|
mcpipy/cellcraft/config.py
|
cellcraft/cellcraft
|
1cb2b152bb6433250cec43e2586f1b5d093ec6e5
|
[
"MIT"
] | 3 |
2015-12-14T19:28:42.000Z
|
2020-11-29T12:53:12.000Z
|
import os
import json
import logging
# cellcraft node
CELLCRAFT_NODE_URL="http://192.168.178.29:4534"
# path to cache where pickle files will be stored
PATH_RESOURCES='cellcraft/resources'
PATH_CACHE='cellcraft/resources/cache/'
PATH_TEST_CACHE='test/fixtures/cache/'
# path to fixtures
PATH_TO_FIXTURES="test/fixtures"
# path to cellpack structures after processing them
PATH_CELLPACK = 'cellcraft/resources/cellpack/'
# cellpack parameters
envelop_id = 22
# database name to store biological information and coordinates of structures
DB='cellcraft'
TEST_DB='test'
# fix maximum amount of structures saved on cache
MAXIMUM_NUM_STRUCTURES_CACHE = 8
# load block appear appearance json
current_env = os.environ.get('app_env')
root_logger = logging.getLogger()
current_env = 'test'
if current_env == 'cellcraft':
DB_HOST = '127.0.0.1'
DB_PORT = 27017
root_logger.setLevel(logging.INFO)
elif current_env == 'test':
DB_HOST = '127.0.0.1'
DB_PORT = 27017
root_logger.setLevel(logging.DEBUG)
else:
logging.warning('Please configure a environment using now default dev environment for config')
root_logger.setLevel(logging.DEBUG)
| 24.070175 | 98 | 0.764577 |
cd8c4a556bdf6a751d59f1d67ef4d0688f0e6844
| 9,123 |
py
|
Python
|
ftpsync/pyftpsync.py
|
wengzy/pyftpsync
|
db6decb02bf3535fe87d90b45a6cc974dd356b04
|
[
"MIT"
] | 86 |
2015-03-02T17:40:03.000Z
|
2022-03-14T03:41:40.000Z
|
ftpsync/pyftpsync.py
|
wengzy/pyftpsync
|
db6decb02bf3535fe87d90b45a6cc974dd356b04
|
[
"MIT"
] | 63 |
2015-04-12T19:01:52.000Z
|
2022-01-19T00:57:51.000Z
|
ftpsync/pyftpsync.py
|
wengzy/pyftpsync
|
db6decb02bf3535fe87d90b45a6cc974dd356b04
|
[
"MIT"
] | 25 |
2015-04-12T18:07:25.000Z
|
2021-04-25T15:20:24.000Z
|
# -*- coding: utf-8 -*-
"""
Simple folder synchronization using FTP.
(c) 2012-2021 Martin Wendt; see https://github.com/mar10/pyftpsync
Licensed under the MIT license: https://www.opensource.org/licenses/mit-license.php
Usage examples:
> pyftpsync.py --help
> pyftpsync.py upload . ftps://example.com/myfolder
"""
import argparse
import platform
import sys
from pprint import pprint
from ftpsync import __version__
from ftpsync.cli_common import (
common_parser,
creds_parser,
matcher_parser,
verbose_parser,
)
from ftpsync.run_command import add_run_parser, handle_run_command
from ftpsync.scan_command import add_scan_parser
from ftpsync.synchronizers import (
BiDirSynchronizer,
DownloadSynchronizer,
UploadSynchronizer,
)
from ftpsync.targets import FsTarget, make_target
from ftpsync.tree_command import add_tree_parser
from ftpsync.util import (
DEBUG_FLAGS,
PYTHON_VERSION,
check_cli_verbose,
namespace_to_dict,
set_pyftpsync_logger,
)
# ===============================================================================
# run
# ===============================================================================
def run():
"""CLI main entry point."""
# Use print() instead of logging when running in CLI mode:
set_pyftpsync_logger(None)
parser = argparse.ArgumentParser(
description="Synchronize folders over FTP.",
epilog="See also https://github.com/mar10/pyftpsync",
parents=[verbose_parser],
)
# Note: we want to allow --version to be combined with --verbose. However
# on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used.
if check_cli_verbose(3) > 3:
version_info = "pyftpsync/{} Python/{} {}".format(
__version__, PYTHON_VERSION, platform.platform()
)
else:
version_info = "{}".format(__version__)
parser.add_argument("-V", "--version", action="version", version=version_info)
subparsers = parser.add_subparsers(help="sub-command help")
# --- Create the parser for the "upload" command ---------------------------
sp = subparsers.add_parser(
"upload",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files to remote folder",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite remote files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["local", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove remote files if they don't exist locally",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove remote files if they don't exist locally "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="upload")
# --- Create the parser for the "download" command -------------------------
sp = subparsers.add_parser(
"download",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files from remote folder to local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite local files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove local files if they don't exist on remote target",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove local files if they don't exist on remote target "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="download")
# --- Create the parser for the "sync" command -----------------------------
sp = subparsers.add_parser(
"sync",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="synchronize new and modified files between remote folder and local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--resolve",
default="ask",
choices=["old", "new", "local", "remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.set_defaults(command="sync")
# --- Create the parser for the "run" command -----------------------------
add_run_parser(subparsers)
# --- Create the parser for the "scan" command -----------------------------
add_scan_parser(subparsers)
# --- Create the parser for the "tree" command -----------------------------
add_tree_parser(subparsers)
# --- Parse command line ---------------------------------------------------
args = parser.parse_args()
args.verbose -= args.quiet
del args.quiet
# print("verbose", args.verbose)
ftp_debug = 0
if args.verbose >= 6:
ftp_debug = 1
if args.debug:
if args.verbose < 4:
parser.error("'--debug' requires verbose level >= 4")
DEBUG_FLAGS.update(args.debug)
# Modify the `args` from the `pyftpsync.yaml` config:
if getattr(args, "command", None) == "run":
handle_run_command(parser, args)
if callable(getattr(args, "command", None)):
# scan_handler
try:
return args.command(parser, args)
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
elif not hasattr(args, "command"):
parser.error(
"missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')"
)
# Post-process and check arguments
if hasattr(args, "delete_unmatched") and args.delete_unmatched:
args.delete = True
args.local_target = make_target(args.local, {"ftp_debug": ftp_debug})
if args.remote == ".":
parser.error("'.' is expected to be the local target (not remote)")
args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug})
if not isinstance(args.local_target, FsTarget) and isinstance(
args.remote_target, FsTarget
):
parser.error("a file system target is expected to be local")
# Let the command handler do its thing
opts = namespace_to_dict(args)
if args.command == "upload":
s = UploadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "download":
s = DownloadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "sync":
s = BiDirSynchronizer(args.local_target, args.remote_target, opts)
else:
parser.error("unknown command '{}'".format(args.command))
s.is_script = True
try:
s.run()
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
finally:
# Prevent sporadic exceptions in ftplib, when closing in __del__
s.local.close()
s.remote.close()
stats = s.get_stats()
if args.verbose >= 5:
pprint(stats)
elif args.verbose >= 1:
if args.dry_run:
print("(DRY-RUN) ", end="")
print(
"Wrote {}/{} files in {} directories, skipped: {}.".format(
stats["files_written"],
stats["local_files"],
stats["local_dirs"],
stats["conflict_files_skipped"],
),
end="",
)
if stats["interactive_ask"]:
print()
else:
print(" Elap: {}.".format(stats["elap_str"]))
return
# Script entry point
if __name__ == "__main__":
# Just in case...
from multiprocessing import freeze_support
freeze_support()
run()
| 31.350515 | 90 | 0.574044 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.