hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7cb23f9d984ca01ba8f682afe13184f98d4f5e92 | 389 | py | Python | qtask/utils/testing.py | LinkTsang/qtask-legacy-python | 9b264b8e33313e4d3615472d59a2a39948eeeaa1 | [
"MIT"
] | null | null | null | qtask/utils/testing.py | LinkTsang/qtask-legacy-python | 9b264b8e33313e4d3615472d59a2a39948eeeaa1 | [
"MIT"
] | null | null | null | qtask/utils/testing.py | LinkTsang/qtask-legacy-python | 9b264b8e33313e4d3615472d59a2a39948eeeaa1 | [
"MIT"
] | null | null | null | import asyncio
import traceback
import unittest
| 22.882353 | 63 | 0.62982 |
7cb2d3d2cb22c43c3c911d744e22c33bc37cdf49 | 1,661 | py | Python | landing/views.py | theflatladder/kyrsovaya | d6d661854cd955e544a199e201f325decc360cc1 | [
"MIT"
] | null | null | null | landing/views.py | theflatladder/kyrsovaya | d6d661854cd955e544a199e201f325decc360cc1 | [
"MIT"
] | null | null | null | landing/views.py | theflatladder/kyrsovaya | d6d661854cd955e544a199e201f325decc360cc1 | [
"MIT"
] | null | null | null | from django.shortcuts import render, render_to_response, redirect
from django.contrib import auth
from django.contrib.auth.forms import UserCreationForm
from django.template.context_processors import csrf
from django.http import HttpResponseRedirect
| 31.339623 | 140 | 0.668874 |
7cb376c84e1d9faf8d7802d4ab0284c278818e8c | 441 | py | Python | FPRun11.py | yecfly/DEPRESSIONEST | 21b72906aac9f310e264f7a5eea348480a647197 | [
"Unlicense"
] | null | null | null | FPRun11.py | yecfly/DEPRESSIONEST | 21b72906aac9f310e264f7a5eea348480a647197 | [
"Unlicense"
] | null | null | null | FPRun11.py | yecfly/DEPRESSIONEST | 21b72906aac9f310e264f7a5eea348480a647197 | [
"Unlicense"
] | null | null | null | from Facepatchindependenttrain import runPatch
import sys
if len(sys.argv)==6:
runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]),
trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]),
NetworkType=int(sys.argv[4]),
runs=int(sys.argv[5]))
else:
print("argument errors, try\npython runfile.py <FacePatchID> <trainpklID> <testpklID> <NetworkType> <runs>")
| 40.090909 | 113 | 0.621315 |
7cb439e7ed9a5e950d6cf894c40e5a62043d06e9 | 5,183 | py | Python | vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | 2 | 2019-08-19T17:08:47.000Z | 2019-10-05T11:37:02.000Z | vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.convert import po2tmx
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import tmx
from translate.storage import lisa
| 33.43871 | 128 | 0.641134 |
7cb54fa0b7a5c349c3088529c91a97ac9de21c8e | 2,684 | py | Python | plugin.video.yatp/libs/client/commands.py | mesabib/kodi.yatp | d874df43047b5b58f84cb3760fc891d9a133a69f | [
"CNRI-Python"
] | 54 | 2015-08-01T20:31:36.000Z | 2022-02-06T11:06:01.000Z | plugin.video.yatp/libs/client/commands.py | mesabib/kodi.yatp | d874df43047b5b58f84cb3760fc891d9a133a69f | [
"CNRI-Python"
] | 57 | 2015-08-31T09:54:49.000Z | 2018-08-30T20:39:12.000Z | plugin.video.yatp/libs/client/commands.py | mesabib/kodi.yatp | d874df43047b5b58f84cb3760fc891d9a133a69f | [
"CNRI-Python"
] | 16 | 2016-01-17T11:44:41.000Z | 2021-12-12T00:41:29.000Z | # coding: utf-8
# Module: commands
# Created on: 28.07.2015
# Author: Roman Miroshnychenko aka Roman V.M. ([email protected])
# Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html
"""
Context menu commands
"""
import sys
import xbmc
import xbmcgui
import json_requests as jsonrq
from simpleplugin import Addon
addon = Addon('plugin.video.yatp')
_ = addon.initialize_gettext()
def show_torrent_info(info_hash):
"""
Display current torrent info
:param info_hash:
:return:
"""
torr_info = jsonrq.get_torrent_info(info_hash)
info_dialog = xbmcgui.DialogProgress()
info_dialog.create(torr_info['name'])
while not info_dialog.iscanceled():
info_dialog.update(torr_info['progress'],
_('state: {0}; seeds: {1}; peers: {2}').format(
torr_info['state'],
torr_info['num_seeds'],
torr_info['num_peers']
),
_('size: {0}MB; DL speed: {1}KB/s; UL speed: {2}KB/s').format(
torr_info['size'],
torr_info['dl_speed'],
torr_info['ul_speed']
),
_('total DL: {0}MB; total UL: {1}MB').format(
torr_info['total_download'],
torr_info['total_upload'])
)
xbmc.sleep(1000)
torr_info = jsonrq.get_torrent_info(info_hash)
if __name__ == '__main__':
if sys.argv[1] == 'pause':
jsonrq.pause_torrent(sys.argv[2])
elif sys.argv[1] == 'resume':
jsonrq.resume_torrent(sys.argv[2])
elif sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno(
_('Confirm delete'),
_('Do you really want to delete the torrent?')):
jsonrq.remove_torrent(sys.argv[2], False)
elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno(
_('Confirm delete'),
_('Do you really want to delete the torrent with files?'),
_('Warning: The files will be deleted permanently!')):
jsonrq.remove_torrent(sys.argv[2], True)
elif sys.argv[1] == 'pause_all':
jsonrq.pause_all()
elif sys.argv[1] == 'resume_all':
jsonrq.resume_all()
elif sys.argv[1] == 'show_info':
show_torrent_info(sys.argv[2])
elif sys.argv[1] == 'restore_finished':
jsonrq.restore_finished(sys.argv[2])
else:
addon.log_debug('Command cancelled or invalid command: {0}'.format(sys.argv[1]))
xbmc.executebuiltin('Container.Refresh')
| 35.786667 | 89 | 0.554396 |
7cb5817de3a17f08a3afdfbe15a3bbd0fbe2d1d8 | 346 | py | Python | setup.py | GeorgeDittmar/MarkovTextGenerator | df6a56e23051e1f263ba22889dc3b5d0dc03e370 | [
"Apache-2.0"
] | 1 | 2021-11-26T15:49:31.000Z | 2021-11-26T15:49:31.000Z | setup.py | GeorgeDittmar/Mimic | df6a56e23051e1f263ba22889dc3b5d0dc03e370 | [
"Apache-2.0"
] | 1 | 2019-06-24T17:30:41.000Z | 2019-06-26T04:53:00.000Z | setup.py | GeorgeDittmar/MarkovTextGenerator | df6a56e23051e1f263ba22889dc3b5d0dc03e370 | [
"Apache-2.0"
] | 2 | 2020-05-04T07:57:17.000Z | 2021-02-23T05:10:11.000Z | #!/usr/bin/env python
from distutils.core import setup
setup(name='Mimik',
version='1.0',
description='Python framework for markov models',
author='George Dittmar',
author_email='[email protected]',
url='https://www.python.org/sigs/distutils-sig/',
packages=['distutils', 'distutils.command'],
)
| 26.615385 | 55 | 0.65896 |
7cb5d1b6022bb826ecb887e64d632c52c31ffdb9 | 5,563 | py | Python | pipeline/scripts/package.py | deplatformr/open-images | 3726c9802bda1d7ecbbbd9920d5566daaecc9faa | [
"MIT"
] | 2 | 2020-10-12T02:37:54.000Z | 2020-10-14T15:16:49.000Z | pipeline/scripts/package.py | deplatformr/open-images | 3726c9802bda1d7ecbbbd9920d5566daaecc9faa | [
"MIT"
] | null | null | null | pipeline/scripts/package.py | deplatformr/open-images | 3726c9802bda1d7ecbbbd9920d5566daaecc9faa | [
"MIT"
] | null | null | null | import os
import shutil
import sqlite3
import tarfile
from datetime import datetime
import bagit
| 47.144068 | 735 | 0.585655 |
7cb6009fc34f03127073ead641d466f1b2a5c978 | 2,313 | py | Python | app/search/hot_eval/hl_reportable.py | don4apaev/anfisa | 2e4bdd83c584c0000f037413ccc1f9067c07fa70 | [
"Apache-2.0"
] | null | null | null | app/search/hot_eval/hl_reportable.py | don4apaev/anfisa | 2e4bdd83c584c0000f037413ccc1f9067c07fa70 | [
"Apache-2.0"
] | null | null | null | app/search/hot_eval/hl_reportable.py | don4apaev/anfisa | 2e4bdd83c584c0000f037413ccc1f9067c07fa70 | [
"Apache-2.0"
] | null | null | null | def evalRec(env, rec):
"""hl_reportable"""
return (len(set(rec.Genes) &
{
'ABHD12',
'ACTG1',
'ADGRV1',
'AIFM1',
'ATP6V1B1',
'BCS1L',
'BSND',
'CABP2',
'CACNA1D',
'CDC14A',
'CDH23',
'CEACAM16',
'CEP78',
'CHD7',
'CIB2',
'CISD2',
'CLDN14',
'CLIC5',
'CLPP',
'CLRN1',
'COCH',
'COL11A2',
'DIAPH1',
'DIAPH3',
'DMXL2',
'DNMT1',
'DSPP',
'EDN3',
'EDNRB',
'EPS8',
'EPS8L2',
'ESPN',
'ESRRB',
'EYA1',
'EYA4',
'GIPC3',
'GJB2',
'GJB6',
'GPSM2',
'GRHL2',
'GRXCR1',
'GSDME',
'HGF',
'HSD17B4',
'ILDR1',
'KCNE1',
'KCNQ1',
'KCNQ4',
'LARS2',
'LHFPL5',
'LOXHD1',
'LRTOMT',
'MARVELD2',
'MIR96',
'MITF',
'MSRB3',
'MT-RNR1',
'MT-TS1',
'MYH14',
'MYH9',
'MYO15A',
'MYO3A',
'MYO6',
'MYO7A',
'OSBPL2',
'OTOA',
'OTOF',
'OTOG',
'OTOGL',
'P2RX2',
'PAX3',
'PDZD7',
'PJVK',
'POU3F4',
'POU4F3',
'PRPS1',
'PTPRQ',
'RDX',
'RIPOR2',
'S1PR2',
'SERPINB6',
'SIX1',
'SLC17A8',
'SLC26A4',
'SLC52A2',
'SLITRK6',
'SMPX',
'SOX10',
'STRC',
'SYNE4',
'TBC1D24',
'TECTA',
'TIMM8A',
'TMC1',
'TMIE',
'TMPRSS3',
'TPRN',
'TRIOBP',
'TUBB4B',
'USH1C',
'USH1G',
'USH2A',
'WFS1',
'WHRN',
}
) > 0) | 21.027273 | 32 | 0.253783 |
7cb6f4beed1a08b09244a31819b47421774b7914 | 6,486 | py | Python | eval/util/metrics.py | fau-is/grm | 78b1559ea0dda1b817283adecd58da50ca232223 | [
"MIT"
] | 5 | 2020-09-15T18:57:01.000Z | 2021-12-13T14:14:08.000Z | eval/util/metrics.py | fau-is/grm | 78b1559ea0dda1b817283adecd58da50ca232223 | [
"MIT"
] | null | null | null | eval/util/metrics.py | fau-is/grm | 78b1559ea0dda1b817283adecd58da50ca232223 | [
"MIT"
] | 1 | 2020-09-10T17:45:22.000Z | 2020-09-10T17:45:22.000Z | import sklearn
import pandas
import seaborn as sns
import matplotlib.pyplot as pyplot
from functools import reduce
# import numpy as np
| 36.234637 | 118 | 0.646007 |
7cb7c886108da63565062eb8d192b4df3da78f64 | 3,566 | py | Python | dpgs_sandbox/tests/test_bug_migrations_in_base_models.py | gabrielpiassetta/django-pgschemas | 1e76db4cef31c7534bf4ba109961e835a1dd3c96 | [
"MIT"
] | null | null | null | dpgs_sandbox/tests/test_bug_migrations_in_base_models.py | gabrielpiassetta/django-pgschemas | 1e76db4cef31c7534bf4ba109961e835a1dd3c96 | [
"MIT"
] | null | null | null | dpgs_sandbox/tests/test_bug_migrations_in_base_models.py | gabrielpiassetta/django-pgschemas | 1e76db4cef31c7534bf4ba109961e835a1dd3c96 | [
"MIT"
] | null | null | null | import warnings
from unittest.mock import patch
from django.apps import apps
from django.core import management
from django.core.management.base import CommandError
from django.db import models
from django.db.utils import ProgrammingError
from django.test import TransactionTestCase, tag
from django_pgschemas.checks import check_schema_names
from django_pgschemas.models import TenantMixin
from django_pgschemas.utils import get_tenant_model
TenantModel = get_tenant_model()
| 37.536842 | 118 | 0.714526 |
7cb900078da95ed33cbe2fdf9bd9a465b5e9a56e | 6,330 | py | Python | tfx/components/transform/component.py | pingsutw/tfx | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | [
"Apache-2.0"
] | null | null | null | tfx/components/transform/component.py | pingsutw/tfx | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | [
"Apache-2.0"
] | null | null | null | tfx/components/transform/component.py | pingsutw/tfx | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Transform component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text, Union
import absl
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.components.transform import executor
from tfx.orchestration import data_types
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TransformSpec
| 43.356164 | 105 | 0.733017 |
7cb9aea67a579bf1b09555b59098bc7f2315e25f | 959 | py | Python | objects/GitIndexEntry.py | anderslatif/alg | d5902a05a4cb249e554f65a7e8016d7d050b6da9 | [
"MIT"
] | null | null | null | objects/GitIndexEntry.py | anderslatif/alg | d5902a05a4cb249e554f65a7e8016d7d050b6da9 | [
"MIT"
] | null | null | null | objects/GitIndexEntry.py | anderslatif/alg | d5902a05a4cb249e554f65a7e8016d7d050b6da9 | [
"MIT"
] | null | null | null | # https://github.com/git/git/blob/master/Documentation/technical/index-format.txt
| 22.302326 | 85 | 0.657977 |
7cba9b9fb8b398f82ae1a8d924fec2ad7e1b9ddf | 2,437 | py | Python | matdgl/layers/partitionpaddinglayer.py | huzongxiang/CrystalNetwork | a434f76fa4347d42b3c905852ce265cd0bcefca3 | [
"BSD-2-Clause"
] | 6 | 2022-03-30T13:47:03.000Z | 2022-03-31T09:27:46.000Z | matdgl/layers/partitionpaddinglayer.py | huzongxiang/CrystalNetwork | a434f76fa4347d42b3c905852ce265cd0bcefca3 | [
"BSD-2-Clause"
] | null | null | null | matdgl/layers/partitionpaddinglayer.py | huzongxiang/CrystalNetwork | a434f76fa4347d42b3c905852ce265cd0bcefca3 | [
"BSD-2-Clause"
] | 2 | 2022-03-30T20:53:11.000Z | 2022-03-31T22:20:05.000Z |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 13 14:47:13 2021
@author: huzongxiang
"""
import tensorflow as tf
from tensorflow.keras import layers
| 27.077778 | 81 | 0.585556 |
7cbab3e957076a86de0198f2fb2ae52e8d52e634 | 173 | py | Python | lino_book/projects/min9/settings/memory.py | khchine5/book | b6272d33d49d12335d25cf0a2660f7996680b1d1 | [
"BSD-2-Clause"
] | 1 | 2018-01-12T14:09:58.000Z | 2018-01-12T14:09:58.000Z | lino_book/projects/min9/settings/memory.py | khchine5/book | b6272d33d49d12335d25cf0a2660f7996680b1d1 | [
"BSD-2-Clause"
] | 4 | 2018-02-06T19:53:10.000Z | 2019-08-01T21:47:44.000Z | lino_book/projects/min9/settings/memory.py | khchine5/book | b6272d33d49d12335d25cf0a2660f7996680b1d1 | [
"BSD-2-Clause"
] | null | null | null | from .demo import *
SITE.verbose_name = SITE.verbose_name + " (:memory:)"
# SITE = Site(globals(), title=Site.title+" (:memory:)")
DATABASES['default']['NAME'] = ':memory:'
| 34.6 | 56 | 0.653179 |
7cbb1dd15de4d3e88890e2caf26d07b7deb3f4b0 | 152 | py | Python | reservation/urls.py | aryamanak10/diner-restaurant-website | 6d2d9de89a73c5535ebf782c4d8bbfc6ca9489fc | [
"MIT"
] | 1 | 2020-05-07T17:18:36.000Z | 2020-05-07T17:18:36.000Z | reservation/urls.py | aryamanak10/Restaurant-Site-using-Django | 6d2d9de89a73c5535ebf782c4d8bbfc6ca9489fc | [
"MIT"
] | null | null | null | reservation/urls.py | aryamanak10/Restaurant-Site-using-Django | 6d2d9de89a73c5535ebf782c4d8bbfc6ca9489fc | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'reservation'
urlpatterns = [
path('', views.reserve_table, name = 'reserve_table'),
] | 19 | 58 | 0.710526 |
7cbb90e215684507ec88ead7205a67d14728eaf9 | 809 | py | Python | chainer/_version.py | yumetov/chainer | 522e017a18008ee00e39f4ae4b30f4f9db3824b2 | [
"MIT"
] | 3,705 | 2017-06-01T07:36:12.000Z | 2022-03-30T10:46:15.000Z | chainer/_version.py | yumetov/chainer | 522e017a18008ee00e39f4ae4b30f4f9db3824b2 | [
"MIT"
] | 5,998 | 2017-06-01T06:40:17.000Z | 2022-03-08T01:42:44.000Z | chainer/_version.py | yumetov/chainer | 522e017a18008ee00e39f4ae4b30f4f9db3824b2 | [
"MIT"
] | 1,150 | 2017-06-02T03:39:46.000Z | 2022-03-29T02:29:32.000Z | __version__ = '7.8.0'
_optional_dependencies = [
{
'name': 'CuPy',
'packages': [
'cupy-cuda120',
'cupy-cuda114',
'cupy-cuda113',
'cupy-cuda112',
'cupy-cuda111',
'cupy-cuda110',
'cupy-cuda102',
'cupy-cuda101',
'cupy-cuda100',
'cupy-cuda92',
'cupy-cuda91',
'cupy-cuda90',
'cupy-cuda80',
'cupy',
],
'specifier': '>=7.7.0,<8.0.0',
'help': 'https://docs.cupy.dev/en/latest/install.html',
},
{
'name': 'iDeep',
'packages': [
'ideep4py',
],
'specifier': '>=2.0.0.post3, <2.1',
'help': 'https://docs.chainer.org/en/latest/tips.html',
},
]
| 23.114286 | 63 | 0.410383 |
7cbc5cd567a3861d37ece4294dbac699b11bc6a2 | 10,435 | py | Python | image_aug.py | qwerasdf887/image_augmentation | 7d465eba4d6af5d9a4cd79bf1981c8ef206ffe42 | [
"MIT"
] | null | null | null | image_aug.py | qwerasdf887/image_augmentation | 7d465eba4d6af5d9a4cd79bf1981c8ef206ffe42 | [
"MIT"
] | null | null | null | image_aug.py | qwerasdf887/image_augmentation | 7d465eba4d6af5d9a4cd79bf1981c8ef206ffe42 | [
"MIT"
] | null | null | null | # coding=UTF-8
# This Python file uses the following encoding: utf-8
import cv2
import numpy as np
import xml.etree.cElementTree as ET
from random import sample
#default args:
default_args = {'noise_prob': 0.1,
'gasuss_mean': 0,
'gasuss_var': 0.001,
'rand_hug': 30,
'rand_saturation':30,
'rand_light': 30,
'rot_angle': 15,
'bordervalue': (127, 127, 127),
'zoom_out_value': 0.7,
'output_shape': (416, 416),
'take_value' : 5
}
#noise
#noise
#(-N~N)
#(-N~N)
#(-N~N)
#
def horizontal_flip(image, box_loc=None, **kwargs):
'''
Args:
box_loc: bounding box location(x_min, y_min, x_max, y_max)
'''
if box_loc is None:
return cv2.flip(image, 1)
else:
w = image.shape[1]
for i in box_loc:
if i[2] == 0:
break
else:
x_min, x_max = i[0], i[2]
i[0] = w - x_max
i[2] = w - x_min
return cv2.flip(image, 1), box_loc
#
def vertical_flip(image, box_loc=None, **kwargs):
'''
Args:
box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label))
'''
if box_loc is None:
return cv2.flip(image, 0)
else:
h = image.shape[0]
for i in box_loc:
if i[3] == 0:
break
else:
y_min, y_max = i[1], i[3]
i[1] = h - y_max
i[3] = h - y_min
return cv2.flip(image, 0), box_loc
#-n~n
def rot_image(image, box_loc=None, **kwargs):
'''
Args:
box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label))
rot:
bordervalue:
'''
h, w, _ = image.shape
center = ( w // 2, h // 2)
angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle'])
M = cv2.getRotationMatrix2D(center, angle, 1)
out_img = cv2.warpAffine(image, M, (w, h), borderValue = kwargs['bordervalue'])
if box_loc is None:
return out_img
else:
loc = box_loc[:,0:4].copy()
loc = np.append(loc, loc[:, 0:1], axis=-1)
loc = np.append(loc, loc[:, 3:4], axis=-1)
loc = np.append(loc, loc[:, 2:3], axis=-1)
loc = np.append(loc, loc[:, 1:2], axis=-1)
loc = loc.reshape(-1, 4, 2)
loc = loc - np.array(center)
rot_loc = loc.dot(np.transpose(M[:,0:2]))
rot_loc = rot_loc + np.array(center)
rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]])
rot_box = np.floor(rot_box)
rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1, h-1])
return out_img, rot_box
#
#
# value~1
#load csv data
#draw rectangle
#0~N image augmentation
if __name__ == "__main__":
img = cv2.imread('./00002.jpg')
bbox = load_csv('./00002.xml')
#noise
#aug_img = sp_noise(img, **default_args)
#aug_img, bbox = sp_noise(img, bbox, **default_args)
#gasuss_noise
#aug_img = gasuss_noise(img, **default_args)
#aug_img, bbox = gasuss_noise(img, bbox, **default_args)
#Hue
#aug_img = mod_hue(img, **default_args)
#aug_img, bbox = mod_hue(img, bbox, **default_args)
#saturation
#aug_img = mod_saturation(img, **default_args)
#aug_img, bbox = mod_saturation(img, bbox, **default_args)
#light
#aug_img = mod_light(img, **default_args)
#aug_img, bbox = mod_light(img, bbox, **default_args)
#
#aug_img = horizontal_flip(img, **default_args)
#aug_img, bbox = horizontal_flip(img, bbox, **default_args)
#
#aug_img = vertical_flip(img, **default_args)
#aug_img, bbox = vertical_flip(img, bbox, **default_args)
#
#aug_img = rot_image(img, **default_args)
#aug_img, bbox = rot_image(img, bbox, **default_args)
#resize
#aug_img = resize_img(img, **default_args)
#aug_img, bbox = resize_img(img, bbox, **default_args)
#
#aug_img = padding_img(aug_img, **default_args)
#aug_img, bbox = padding_img(aug_img, bbox, **default_args)
# N~1
#aug_img = random_zoom_out(img, **default_args)
#aug_img, bbox = random_zoom_out(img, bbox, **default_args)
#augmentation
aug_img = rand_aug_image(img, **default_args)
#aug_img, bbox = rand_aug_image(img, bbox, **default_args)
print(bbox)
draw_rect(aug_img, bbox)
cv2.imshow('img', img)
cv2.imshow('aug img', aug_img)
cv2.waitKey(0)
cv2.destroyAllWindows() | 31.430723 | 117 | 0.58965 |
7cbd6ca4479663e9722341b796b7cdd0073b6b18 | 1,507 | py | Python | 03_picnic/picnic.py | intimanipuchi/tiny_python_projects | 5e419620ae07b0bcf8df073ba3f6c6c3d7d1a93c | [
"MIT"
] | null | null | null | 03_picnic/picnic.py | intimanipuchi/tiny_python_projects | 5e419620ae07b0bcf8df073ba3f6c6c3d7d1a93c | [
"MIT"
] | null | null | null | 03_picnic/picnic.py | intimanipuchi/tiny_python_projects | 5e419620ae07b0bcf8df073ba3f6c6c3d7d1a93c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Author : Roman Koziy <[email protected]>
Date : 2021-12-15
Purpose: Working with lists
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="Working with lists",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("items",
type=str,
nargs="+",
metavar="str",
help="item(s) to bring")
parser.add_argument("-s",
"--sorted",
help="a boolean flag",
action="store_true")
return parser.parse_args()
# --------------------------------------------------
def main():
"""The main function: formatting and printing the output"""
args = get_args()
sort_flag = args.sorted
items = args.items
if sort_flag:
items = sorted(items)
if len(items) == 1:
print(f"You are bringing {items[0]}.")
elif len(items) < 3:
items.insert(-1, "and")
print(f"You are bringing {' '.join(items)}.")
else:
# print(items)
last = items[-1]
and_last = "and " + last
items[-1] = and_last
# print(items)
print(f"You are bringing {', '.join(items)}.")
# --------------------------------------------------
if __name__ == "__main__":
main()
| 25.116667 | 63 | 0.472462 |
7cbd753ba4fba3e640a8395f823b9af2ba40868f | 400 | py | Python | triangle.py | montyshyama/python-basics | d71156d70fdadc722a192b984e9bff66401ab894 | [
"MIT"
] | null | null | null | triangle.py | montyshyama/python-basics | d71156d70fdadc722a192b984e9bff66401ab894 | [
"MIT"
] | null | null | null | triangle.py | montyshyama/python-basics | d71156d70fdadc722a192b984e9bff66401ab894 | [
"MIT"
] | 1 | 2020-04-05T20:06:08.000Z | 2020-04-05T20:06:08.000Z | side_a=int(input("Enter the first side(a):"))
side_b=int(input("Enter the second side(b):"))
side_c=int(input("Enter the third side(c):"))
if side_a==side_b and side_a==side_c:
print("The triangle is an equilateral triangle.")
elif side_a==side_b or side_a==side_c or side_b==side_c:
print("The triangle is an isosceles triangle.")
else:
print("The triangle is scalene triangle.") | 44.444444 | 57 | 0.7075 |
7cbd766d520e1888b731cf3cea3bb5f44d830c1f | 520 | py | Python | david/modules/artist/view.py | ktmud/david | 4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f | [
"MIT"
] | 2 | 2016-04-07T08:21:32.000Z | 2020-11-26T11:49:20.000Z | david/modules/artist/view.py | ktmud/david | 4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f | [
"MIT"
] | null | null | null | david/modules/artist/view.py | ktmud/david | 4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from flask import Blueprint, request
from david.lib.template import st
from .model import Artist
bp = Blueprint('artist', __name__)
| 23.636364 | 57 | 0.688462 |
7cbdd46842ad893e844a14b8fc15ffc18db30ecc | 2,832 | py | Python | Volume Estimation/volume.py | JessieRamaux/Food-Volume-Estimation | 260b0e78a3b6a7b8bbe9daf98956502beea92552 | [
"MIT"
] | 10 | 2021-02-19T09:31:43.000Z | 2022-02-09T08:29:02.000Z | Volume Estimation/volume.py | JessieRamaux/Food-Volume-Estimation | 260b0e78a3b6a7b8bbe9daf98956502beea92552 | [
"MIT"
] | null | null | null | Volume Estimation/volume.py | JessieRamaux/Food-Volume-Estimation | 260b0e78a3b6a7b8bbe9daf98956502beea92552 | [
"MIT"
] | 3 | 2021-02-16T00:05:32.000Z | 2021-06-11T13:37:10.000Z | import numpy as np
import cv2
import os
import json
import glob
from PIL import Image, ImageDraw
plate_diameter = 25 #cm
plate_depth = 1.5 #cm
plate_thickness = 0.2 #cm
img = cv2.imread("out.png",0)
print(get_volume(img,"test.json")) | 28.039604 | 116 | 0.604167 |
7cbe3198f6071ec0d541441f81f18f624a937b6f | 5,044 | py | Python | t2k/bin/cmttags.py | tianluyuan/pyutils | 2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50 | [
"MIT"
] | 1 | 2019-02-22T10:57:13.000Z | 2019-02-22T10:57:13.000Z | t2k/bin/cmttags.py | tianluyuan/pyutils | 2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50 | [
"MIT"
] | null | null | null | t2k/bin/cmttags.py | tianluyuan/pyutils | 2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
A script to create tags for CMT managed packages.
Call from within cmt/ directory
"""
import subprocess
import sys
import os
from optparse import OptionParser
__author__ = 'Tianlu Yuan'
__email__ = 'tianlu.yuan [at] colorado.edu'
# Ignore large external packages for now
IGNORES = ['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP']
# Extensions for finding src files, must satisfy unix wildcard rules
EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'),
'python':('*.py'),
'java':('*.java')}
# Ignore these files and dirs, key specifies argument to find
# (e.g. '-iname')
PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']}
def check_dir():
""" Are we inside cmt/
"""
if os.path.basename(os.getcwd()) != 'cmt':
sys.exit('Not inside cmt directory!')
def check_requirements():
""" Ensure that requirements file exists in cmt dir
"""
if not os.path.isfile('requirements'):
sys.exit('No requirements file!')
def init_use_dict():
"""Returns the initial use_dict which contains the current (cwd)
package and its path. 'cmt show uses' does not include the
package itself.
"""
# Must call os.path.dirname because the cwd should be inside a cmt
# directory
return {'this':os.path.dirname(os.getcwd())}
def parse_uses():
""" Returns a dict of used packages and their root dir paths.
e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY}
"""
check_dir()
check_requirements()
proc = subprocess.Popen(['cmt', 'show', 'uses'],
stdout=subprocess.PIPE)
use_dict = init_use_dict()
for line in iter(proc.stdout.readline, ''):
tokens = line.split()
# ignore lines that start with '#'
if line[0] != '#' and tokens[1] not in IGNORES:
basepath = tokens[-1].strip('()')
# highland and psyche do not strictly follow CMT path
# organization. They have subpackages within a master, so
# we need to take that into account
relpath_list = [master for master in tokens[3:-1]]
relpath_list.extend([tokens[1], tokens[2]])
use_dict[tokens[1]] = os.path.join(basepath, *relpath_list)
return use_dict
def build_find_args(exts):
""" ext is a list of file extensions corresponding to the files we want
to search. This will return a list of arguments that can be passed to `find`
"""
find_args = []
for a_ext in exts:
# -o for "or"
find_args.extend(['-o', '-iname'])
find_args.append('{0}'.format(a_ext))
# replace first '-o' with '( for grouping matches
find_args[0] = '('
# append parens for grouping negation
find_args.extend([')', '('])
# Add prune files
for match_type in PRUNE:
for aprune in PRUNE[match_type]:
find_args.append('-not')
find_args.append('-'+match_type)
find_args.append('{0}'.format(aprune))
find_args.append(')')
return find_args
def build_find_cmd(opts, paths):
""" Builds teh cmd file using ctags. Returns cmd based on the following
template: 'find {0} -type f {1} | etags -'
"""
find_args = build_find_args(get_exts(opts))
return ['find']+paths+['-type', 'f']+find_args
def main():
""" Uses ctags to generate TAGS file in cmt directory based on cmt show uses
"""
parser = OptionParser()
parser.add_option('--cpp',
dest='cpp',
action='store_true',
default=False,
help='tag only c/cpp files (default)')
parser.add_option('--python',
dest='python',
action='store_true',
default=False,
help='tag only python files')
parser.add_option('--java',
dest='java',
action='store_true',
default=False,
help='tag only java files')
parser.add_option('-n',
dest='dry_run',
action='store_true',
default=False,
help='dry run')
(opts, args) = parser.parse_args()
# get the cmt show uses dictionary of programs and paths
use_dict = parse_uses()
# build the commands
find_cmd = build_find_cmd(opts, list(use_dict.itervalues()))
tags_cmd = build_tags_cmd()
print 'Creating TAGS file based on dependencies:'
print use_dict
if not opts.dry_run:
find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE)
tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout)
tags_proc.communicate()
if __name__ == '__main__':
main()
| 28.822857 | 80 | 0.585052 |
7cbea5a7d278dcb466c16a1d3e035b7e14f3c77c | 63,630 | py | Python | salt/daemons/masterapi.py | rickh563/salt | 02822d6466c47d0daafd6e98b4e767a396b0ed48 | [
"Apache-2.0"
] | null | null | null | salt/daemons/masterapi.py | rickh563/salt | 02822d6466c47d0daafd6e98b4e767a396b0ed48 | [
"Apache-2.0"
] | null | null | null | salt/daemons/masterapi.py | rickh563/salt | 02822d6466c47d0daafd6e98b4e767a396b0ed48 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
from __future__ import absolute_import
# Import python libs
import fnmatch
import logging
import os
import re
import time
import stat
import tempfile
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.key
import salt.fileserver
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
import salt.utils.jid
from salt.pillar import git_pillar
from salt.utils.event import tagify
from salt.exceptions import SaltMasterError
# Import 3rd-party libs
import salt.ext.six as six
try:
import pwd
HAS_PWD = True
except ImportError:
# pwd is not available on windows
HAS_PWD = False
log = logging.getLogger(__name__)
# Things to do in lower layers:
# only accept valid minion ids
def init_git_pillar(opts):
'''
Clear out the ext pillar caches, used when the master starts
'''
pillargitfs = []
for opts_dict in [x for x in opts.get('ext_pillar', [])]:
if 'git' in opts_dict:
try:
import git
except ImportError:
return pillargitfs
parts = opts_dict['git'].strip().split()
try:
br = parts[0]
loc = parts[1]
except IndexError:
log.critical(
'Unable to extract external pillar data: {0}'
.format(opts_dict['git'])
)
else:
pillargitfs.append(
git_pillar.GitPillar(
br,
loc,
opts
)
)
return pillargitfs
def clean_fsbackend(opts):
'''
Clean out the old fileserver backends
'''
# Clear remote fileserver backend caches so they get recreated
for backend in ('git', 'hg', 'svn'):
if backend in opts['fileserver_backend']:
env_cache = os.path.join(
opts['cachedir'],
'{0}fs'.format(backend),
'envs.p'
)
if os.path.isfile(env_cache):
log.debug('Clearing {0}fs env cache'.format(backend))
try:
os.remove(env_cache)
except OSError as exc:
log.critical(
'Unable to clear env cache file {0}: {1}'
.format(env_cache, exc)
)
file_lists_dir = os.path.join(
opts['cachedir'],
'file_lists',
'{0}fs'.format(backend)
)
try:
file_lists_caches = os.listdir(file_lists_dir)
except OSError:
continue
for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'):
cache_file = os.path.join(file_lists_dir, file_lists_cache)
try:
os.remove(cache_file)
except OSError as exc:
log.critical(
'Unable to file_lists cache file {0}: {1}'
.format(cache_file, exc)
)
def clean_expired_tokens(opts):
'''
Clean expired tokens from the master
'''
serializer = salt.payload.Serial(opts)
for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']):
for token in filenames:
token_path = os.path.join(dirpath, token)
with salt.utils.fopen(token_path) as token_file:
token_data = serializer.loads(token_file.read())
if 'expire' not in token_data or token_data.get('expire', 0) < time.time():
try:
os.remove(token_path)
except (IOError, OSError):
pass
def clean_old_jobs(opts):
'''
Clean out the old jobs from the job cache
'''
# TODO: better way to not require creating the masterminion every time?
mminion = salt.minion.MasterMinion(
opts,
states=False,
rend=False,
)
# If the master job cache has a clean_old_jobs, call it
fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache'])
if fstr in mminion.returners:
mminion.returners[fstr]()
def access_keys(opts):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
users = []
keys = {}
acl_users = set(opts['client_acl'].keys())
if opts.get('user'):
acl_users.add(opts['user'])
acl_users.add(salt.utils.get_user())
if HAS_PWD:
for user in pwd.getpwall():
users.append(user.pw_name)
for user in acl_users:
log.info(
'Preparing the {0} key for local communication'.format(
user
)
)
if HAS_PWD:
if user not in users:
try:
user = pwd.getpwnam(user).pw_name
except KeyError:
log.error('ACL user {0} is not available'.format(user))
continue
keyfile = os.path.join(
opts['cachedir'], '.{0}_key'.format(user)
)
if os.path.exists(keyfile):
log.debug('Removing stale keyfile: {0}'.format(keyfile))
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
cumask = os.umask(191)
with salt.utils.fopen(keyfile, 'w+') as fp_:
fp_.write(key)
os.umask(cumask)
# 600 octal: Read and write access to the owner only.
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.
os.chmod(keyfile, 0o600)
if HAS_PWD:
try:
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
except OSError:
# The master is not being run as root and can therefore not
# chown the key file
pass
keys[user] = key
return keys
def fileserver_update(fileserver):
'''
Update the fileserver backends, requires that a built fileserver object
be passed in
'''
try:
if not fileserver.servers:
log.error(
'No fileservers loaded, the master will not be able to '
'serve files to minions'
)
raise SaltMasterError('No fileserver backends available')
fileserver.update()
except Exception as exc:
log.error(
'Exception {0} occurred in file server update'.format(exc),
exc_info_on_loglevel=logging.DEBUG
)
| 39.253547 | 173 | 0.49604 |
7cbf3fcf677b8e93a5ef2be1bcf1c650636a93f5 | 2,003 | py | Python | core/domain/role_services_test.py | Mohitbalwani26/oppia | a3d1de8b428b8216bb61ba70315583fe077f5b8a | [
"Apache-2.0"
] | null | null | null | core/domain/role_services_test.py | Mohitbalwani26/oppia | a3d1de8b428b8216bb61ba70315583fe077f5b8a | [
"Apache-2.0"
] | null | null | null | core/domain/role_services_test.py | Mohitbalwani26/oppia | a3d1de8b428b8216bb61ba70315583fe077f5b8a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test functions relating to roles and actions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import role_services
from core.tests import test_utils
import feconf
import python_utils
| 39.27451 | 79 | 0.734898 |
7cbf5d867e83bab7776ed420c8f1d228f4f2244d | 82,473 | py | Python | deep_learning/keras/keras/backend/cntk_backend.py | xpennec/applications | 50aefdf14de308fc3c132784ebba9d329e47b087 | [
"MIT"
] | 21 | 2019-01-12T17:59:41.000Z | 2022-03-08T17:42:56.000Z | deep_learning/keras/keras/backend/cntk_backend.py | farrell236/applications | 0e1ab139ade2a0b3ba6f04f6fd93822b1dd5ae2f | [
"MIT"
] | 7 | 2019-01-24T11:44:58.000Z | 2020-04-21T21:13:37.000Z | deep_learning/keras/keras/backend/cntk_backend.py | farrell236/applications | 0e1ab139ade2a0b3ba6f04f6fd93822b1dd5ae2f | [
"MIT"
] | 8 | 2019-01-24T11:36:05.000Z | 2021-06-15T20:59:50.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cntk as C
import numpy as np
from .common import floatx, epsilon, image_dim_ordering, image_data_format
from collections import defaultdict
from contextlib import contextmanager
import warnings
C.set_global_option('align_axis', 1)
b_any = any
dev = C.device.use_default_device()
if dev.type() == 0:
warnings.warn(
'CNTK backend warning: GPU is not detected. '
'CNTK\'s CPU version is not fully optimized,'
'please run with GPU to get better performance.')
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
# LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase
_LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')
# static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor.
_LEARNING_PHASE = -1
_UID_PREFIXES = defaultdict(int)
# cntk doesn't support gradient as symbolic op, to hook up with keras model,
# we will create gradient as a constant placeholder, here use this global
# map to keep the mapping from grad placeholder to parameter
grad_parameter_dict = {}
NAME_SCOPE_STACK = []
def clear_session():
"""Reset learning phase flag for cntk backend.
"""
global _LEARNING_PHASE
global _LEARNING_PHASE_PLACEHOLDER
_LEARNING_PHASE = -1
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0)
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
"""
if dtype is None:
dtype = floatx()
if name is None:
name = ''
if isinstance(
value,
C.variables.Constant) or isinstance(
value,
C.variables.Parameter):
value = value.value
# we don't support init parameter with symbolic op, so eval it first as
# workaround
if isinstance(value, C.cntk_py.Function):
value = eval(value)
shape = value.shape if hasattr(value, 'shape') else ()
if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0:
value = value.astype(dtype)
# TODO: remove the conversion when cntk supports int32, int64
# https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter
dtype = 'float32' if 'int' in str(dtype) else dtype
v = C.parameter(shape=shape,
init=value,
dtype=dtype,
name=_prepare_name(name, 'variable'))
v._keras_shape = v.shape
v._uses_learning_phase = False
v.constraint = constraint
return v
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
# Arguments
x: A candidate placeholder.
# Returns
Boolean.
"""
return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder
| 32.153216 | 109 | 0.584361 |
7cbfb5620b9999ebfec8396e7e566e9eef183412 | 6,946 | py | Python | Project Files/Prebuilt tools/twitter/Twitter/pylib/oauthlib/oauth1/rfc5849/endpoints/resource.py | nVoid/Yale-TouchDesigner-April2016 | 40eb36f515fa3935f3e9ddaa923664e88308262c | [
"MIT"
] | 39 | 2015-06-10T23:18:07.000Z | 2021-10-21T04:29:06.000Z | Project Files/Prebuilt tools/twitter/Twitter/pylib/oauthlib/oauth1/rfc5849/endpoints/resource.py | nVoid/Yale-TouchDesigner-April2016 | 40eb36f515fa3935f3e9ddaa923664e88308262c | [
"MIT"
] | 13 | 2020-10-28T16:02:09.000Z | 2020-11-16T13:30:05.000Z | Project Files/Prebuilt tools/twitter/Twitter/pylib/oauthlib/oauth1/rfc5849/endpoints/resource.py | nVoid/Yale-TouchDesigner-April2016 | 40eb36f515fa3935f3e9ddaa923664e88308262c | [
"MIT"
] | 26 | 2015-06-10T22:09:15.000Z | 2021-06-27T15:45:15.000Z | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the resource protection provider logic of
OAuth 1.0 RFC 5849.
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.common import log
from .base import BaseEndpoint
from .. import errors
| 44.525641 | 84 | 0.6444 |
7cbffc6e03738d28e2329b530ca6fb3c25fe1127 | 1,493 | py | Python | python/ex_1.py | AymenSe/Geometric-operations-DIP | ef0b0bc86210a8da5e63136bf5a239179b869722 | [
"MIT"
] | null | null | null | python/ex_1.py | AymenSe/Geometric-operations-DIP | ef0b0bc86210a8da5e63136bf5a239179b869722 | [
"MIT"
] | null | null | null | python/ex_1.py | AymenSe/Geometric-operations-DIP | ef0b0bc86210a8da5e63136bf5a239179b869722 | [
"MIT"
] | null | null | null | ####################################################
#
# @ Authors : SEKHRI Aymen
# MOHAMMED HACENE Tarek
#
# @ Hint: you have to install all requirements
# from requirements.txt
#
####################################################
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# load the image
onion_img = cv.imread("onion.png")
# Store height and width and channels of the image
row, col, chs = onion_img.shape
# Store the spectral resolution
dtype_img = onion_img.dtype # This will give you: uint8
def translation(img, trans):
"""
args:
- img: absolute path to the image
- trans: must be a tuple (row_trans, col_trans)
"""
# read the image
image = cv.imread(img)
# retrieve the height and the width
height, width = image.shape[:2]
# retrieve the params of translation
row_trans, col_trans = trans
# Create the translation matrix
T = np.float32([[1, 0, col_trans], [0, 1, row_trans]])
# Apply the T matrix: T*M
img_translation = cv.warpAffine(image, T, (width, height))
# show the images
cv.imshow("Original Image", image)
cv.imshow('Translation Image', img_translation)
# Don't destroy the images until the user do
cv.waitKey()
cv.destroyAllWindows()
# translation 20 pixel to the right
translation("onion.png", (0, 20))
# translation 50 lines and 100 cols to the right
translation("onion.png", (50, 100))
# remove the peper from the image using translations
translation("onion.png", (40, 40))
| 24.883333 | 59 | 0.649029 |
7cc0266db2f787f19a55358bfe261dafe0201d9d | 3,999 | py | Python | utils/hit_rate_utils.py | h-zcc/ref-nms | 8f83f350c497d0ef875c778a8ce76725552abb3c | [
"MIT"
] | 19 | 2020-12-14T13:53:10.000Z | 2022-02-27T09:46:15.000Z | utils/hit_rate_utils.py | h-zcc/ref-nms | 8f83f350c497d0ef875c778a8ce76725552abb3c | [
"MIT"
] | 3 | 2021-01-16T11:41:07.000Z | 2021-08-06T08:21:42.000Z | utils/hit_rate_utils.py | h-zcc/ref-nms | 8f83f350c497d0ef875c778a8ce76725552abb3c | [
"MIT"
] | 3 | 2021-01-10T15:25:29.000Z | 2021-09-26T01:38:16.000Z | from utils.misc import calculate_iou, xywh_to_xyxy
__all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator']
| 36.688073 | 105 | 0.565141 |
7cc16e64c487a1ae6266b04c47e0496bada66d00 | 905 | py | Python | LeetCode_ReorderDataLogFiles.py | amukher3/Problem_solutions | 8fa6014a91f295d08cafb989024caa91d99211d9 | [
"Apache-2.0"
] | 1 | 2021-12-28T08:58:51.000Z | 2021-12-28T08:58:51.000Z | LeetCode_ReorderDataLogFiles.py | amukher3/Coding | a330cb04b5dd5cc1c3cf69249417a71586441bc7 | [
"Apache-2.0"
] | null | null | null | LeetCode_ReorderDataLogFiles.py | amukher3/Coding | a330cb04b5dd5cc1c3cf69249417a71586441bc7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 22 19:07:30 2020
@author: Abhishek Mukherjee
"""
| 27.424242 | 76 | 0.438674 |
7cc17f1b77efcc568026cf1d93c6a6ded983ab6a | 475 | py | Python | saleor/core/transactions.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 15,337 | 2015-01-12T02:11:52.000Z | 2021-10-05T19:19:29.000Z | saleor/core/transactions.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 7,486 | 2015-02-11T10:52:13.000Z | 2021-10-06T09:37:15.000Z | saleor/core/transactions.py | aminziadna/saleor | 2e78fb5bcf8b83a6278af02551a104cfa555a1fb | [
"CC-BY-4.0"
] | 5,864 | 2015-01-16T14:52:54.000Z | 2021-10-05T23:01:15.000Z | from contextlib import contextmanager
from django.db import DatabaseError
from ..core.tracing import traced_atomic_transaction
| 22.619048 | 65 | 0.669474 |
7cc1ccd2747eb46713eaccaf9ca6dc49d25b3128 | 3,314 | py | Python | src/command_modules/azure-cli-policyinsights/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py | diberry/azure-cli | 302999245cbb13b890b0a74f03443c577bd4bfae | [
"MIT"
] | 1 | 2019-03-30T20:49:32.000Z | 2019-03-30T20:49:32.000Z | src/command_modules/azure-cli-policyinsights/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py | diberry/azure-cli | 302999245cbb13b890b0a74f03443c577bd4bfae | [
"MIT"
] | 4 | 2018-08-08T20:01:17.000Z | 2018-09-17T15:20:06.000Z | src/command_modules/azure-cli-policyinsights/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py | diberry/azure-cli | 302999245cbb13b890b0a74f03443c577bd4bfae | [
"MIT"
] | 1 | 2018-04-14T01:46:00.000Z | 2018-04-14T01:46:00.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, record_only
| 48.028986 | 194 | 0.545866 |
7cc20f1f6a53dbfc79dbca785199d6d05868daf1 | 25,440 | py | Python | tests/prep_post/test.py | Aslic/rmats_turbo_4.1.0 | c651509a5d32799315054fa37a2210fab2aae5e5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/prep_post/test.py | Aslic/rmats_turbo_4.1.0 | c651509a5d32799315054fa37a2210fab2aae5e5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/prep_post/test.py | Aslic/rmats_turbo_4.1.0 | c651509a5d32799315054fa37a2210fab2aae5e5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import glob
import os.path
import subprocess
import sys
import unittest
import tests.bam
import tests.base_test
import tests.gtf
import tests.output_parser as output_parser
import tests.test_config
import tests.util
if __name__ == '__main__':
unittest.main(verbosity=2)
| 39.75 | 83 | 0.553263 |
7cc57c915f6cace046e0bbe739957206038f009f | 1,527 | py | Python | nltk/align/util.py | kruskod/nltk | dba7b5431b1d57a75d50e048961c1a203b98c3da | [
"Apache-2.0"
] | 1 | 2015-11-25T00:47:58.000Z | 2015-11-25T00:47:58.000Z | nltk/align/util.py | kruskod/nltk | dba7b5431b1d57a75d50e048961c1a203b98c3da | [
"Apache-2.0"
] | null | null | null | nltk/align/util.py | kruskod/nltk | dba7b5431b1d57a75d50e048961c1a203b98c3da | [
"Apache-2.0"
] | null | null | null | # Natural Language Toolkit: Aligner Utilities
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Anna Garbar
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from nltk.align.api import Alignment
def pharaohtext2tuples(pharaoh_text):
"""
Converts pharaoh text format into an Alignment object (a list of tuples).
>>> pharaoh_text = '0-0 2-1 9-2 21-3 10-4 7-5'
>>> pharaohtext2tuples(pharaoh_text)
Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4), (21, 3)])
:type pharaoh_text: str
:param pharaoh_text: the word alignment outputs in the pharaoh output format
:rtype: Alignment
:return: An Alignment object that contains a list of integer tuples
"""
# Converts integers to strings for a word alignment point.
list_of_tuples = [tuple(map(int,a.split('-'))) for a in pharaoh_text.split()]
return Alignment(list_of_tuples)
def alignment2pharaohtext(alignment):
"""
Converts an Alignment object (a list of tuples) into pharaoh text format.
>>> alignment = [(0, 0), (2, 1), (9, 2), (21, 3), (10, 4), (7, 5)]
>>> alignment2pharaohtext(alignment)
'0-0 2-1 9-2 21-3 10-4 7-5'
:type alignment: Alignment
:param alignment: An Alignment object that contains a list of integer tuples
:rtype: str
:return: the word alignment outputs in the pharaoh output format
"""
pharaoh_text = ' '.join(str(i) + "-" + str(j) for i,j in alignment)
return pharaoh_text
| 33.933333 | 81 | 0.642436 |
7cc70ea72109b3602fc21ef2eb53e2e3c1469770 | 1,461 | py | Python | grr/server/grr_response_server/databases/db_yara_test_lib.py | khanhgithead/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | [
"Apache-2.0"
] | 4,238 | 2015-01-01T15:34:50.000Z | 2022-03-31T08:18:05.000Z | grr/server/grr_response_server/databases/db_yara_test_lib.py | khanhgithead/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | [
"Apache-2.0"
] | 787 | 2015-01-02T21:34:24.000Z | 2022-03-02T13:26:38.000Z | grr/server/grr_response_server/databases/db_yara_test_lib.py | khanhgithead/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | [
"Apache-2.0"
] | 856 | 2015-01-02T02:50:11.000Z | 2022-03-31T11:11:53.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""A module with test cases for the YARA database method."""
import os
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
| 33.976744 | 75 | 0.776181 |
7cc7a37d4874c578241d8fb555c025d8c962058b | 4,912 | py | Python | gpytorch/kernels/inducing_point_kernel.py | 4aHxKzD/gpytorch | 7193545f88820ea04588b983f1d7ed603a59a27c | [
"MIT"
] | 1 | 2021-03-05T07:20:58.000Z | 2021-03-05T07:20:58.000Z | gpytorch/kernels/inducing_point_kernel.py | 4aHxKzD/gpytorch | 7193545f88820ea04588b983f1d7ed603a59a27c | [
"MIT"
] | 1 | 2021-02-24T14:01:43.000Z | 2021-02-24T14:01:43.000Z | gpytorch/kernels/inducing_point_kernel.py | syncrostone/gpytorch | 4d33fbf64594aab2dd6e0cfcb3242510231b3e0e | [
"MIT"
] | 1 | 2021-03-15T12:32:24.000Z | 2021-03-15T12:32:24.000Z | #!/usr/bin/env python3
import copy
import math
import torch
from ..distributions import MultivariateNormal
from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify
from ..mlls import InducingPointKernelAddedLossTerm
from ..models import exact_prediction_strategies
from ..utils.cholesky import psd_safe_cholesky
from .kernel import Kernel
| 36.656716 | 116 | 0.659202 |
7cc9967d7946d0cff670ce2e551feabb3ef304ce | 891 | py | Python | app/__init__.py | Jotasenpai/DigitalMediaStoreRESTfull | bb776d398e1756b1ff2fd4f392b80479ae29847d | [
"MIT"
] | null | null | null | app/__init__.py | Jotasenpai/DigitalMediaStoreRESTfull | bb776d398e1756b1ff2fd4f392b80479ae29847d | [
"MIT"
] | null | null | null | app/__init__.py | Jotasenpai/DigitalMediaStoreRESTfull | bb776d398e1756b1ff2fd4f392b80479ae29847d | [
"MIT"
] | null | null | null | import logging
import os
from flask import Flask
from flask_cors import CORS
from app.extensions import api
from app.extensions.database import db
from app.extensions.schema import ma
from app.views import albums, artists, hello, tracks
| 21.214286 | 54 | 0.679012 |
7cc9e6223af3f0ca91fd050679827da65d115102 | 18,053 | py | Python | app.py | SASHA-PAIS/A-Flask-web-app-for-inventory-management | e6ed1b0d1d06ba04f9930f7653ce0504ecf81dd3 | [
"MIT"
] | null | null | null | app.py | SASHA-PAIS/A-Flask-web-app-for-inventory-management | e6ed1b0d1d06ba04f9930f7653ce0504ecf81dd3 | [
"MIT"
] | null | null | null | app.py | SASHA-PAIS/A-Flask-web-app-for-inventory-management | e6ed1b0d1d06ba04f9930f7653ce0504ecf81dd3 | [
"MIT"
] | null | null | null | from flask import Flask, url_for, request, redirect
from flask import render_template as render
from flask_mysqldb import MySQL
import yaml
import json
import MySQLdb
import decimal
# Setting up the flask instance
app = Flask(__name__)
# Configure the database
db = yaml.load(open('db.yaml'))
app.config['MYSQL_HOST'] = db['mysql_host']
app.config['MYSQL_USER'] = db['mysql_user']
app.config['MYSQL_PASSWORD'] = db['mysql_password']
app.config['MYSQL_DB'] = db['mysql_db']
mysql = MySQL(app)
link = {x:x for x in ["location", "product", "movement"]}
link["index"] = '/'
if __name__ == '__main__':
app.run(debug=True) | 37.146091 | 218 | 0.585831 |
7ccbe673fd6019f10368e191ac41278443f5c053 | 9,554 | py | Python | python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 8 | 2016-08-15T07:02:27.000Z | 2016-08-24T09:34:00.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 1 | 2021-11-01T06:28:16.000Z | 2021-11-01T06:28:16.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 5 | 2021-12-10T11:20:06.000Z | 2022-02-18T05:18:12.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig
if __name__ == "__main__":
unittest.main()
| 40.483051 | 82 | 0.626125 |
7ccc10fc8c636712784281edcf93b9e16ef2ae97 | 2,202 | py | Python | configs/vinbig/detectors_resnext.py | SeHwanJoo/mmdetection_vinbig | 9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3 | [
"Apache-2.0"
] | 2 | 2021-04-01T08:17:08.000Z | 2021-07-12T11:53:53.000Z | configs/vinbig/detectors_resnext.py | SeHwanJoo/mmdetection_vinbig | 9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3 | [
"Apache-2.0"
] | null | null | null | configs/vinbig/detectors_resnext.py | SeHwanJoo/mmdetection_vinbig | 9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'./dataset_base.py',
'./scheduler_base.py',
'../_base_/default_runtime.py'
]
model = dict(
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='DetectoRS_ResNeXt',
pretrained='open-mmlab://resnext101_32x4d',
depth=101,
groups=32,
base_width=4,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True,
plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
in_channels=512,
position='after_conv2')
]
),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
pretrained='open-mmlab://resnext101_32x4d',
style='pytorch')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
num_classes=14
),
dict(
type='Shared2FCBBoxHead',
num_classes=14
),
dict(
type='Shared2FCBBoxHead',
num_classes=14
)
]
),
test_cfg=dict(
rpn=dict(
nms_thr=0.7
),
rcnn=dict(
score_thr=0.0,
nms=dict(type='nms', iou_threshold=0.4)
)
)
)
| 27.525 | 57 | 0.475931 |
7cccbb5b10c9e4406bbef811b8c0c86a34ddfd24 | 26,701 | py | Python | skbio/draw/tests/test_distributions.py | johnchase/scikit-bio | 340e6153b6c93053d923d344e63481860e03731e | [
"BSD-3-Clause"
] | null | null | null | skbio/draw/tests/test_distributions.py | johnchase/scikit-bio | 340e6153b6c93053d923d344e63481860e03731e | [
"BSD-3-Clause"
] | null | null | null | skbio/draw/tests/test_distributions.py | johnchase/scikit-bio | 340e6153b6c93053d923d344e63481860e03731e | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
from skbio.draw import boxplots, grouped_distributions
from skbio.draw._distributions import (
_calc_data_point_locations, _calc_data_point_ticks, _color_box_plot,
_create_legend, _get_distribution_markers, _is_single_matplotlib_color,
_plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options,
_set_figure_size, _validate_input, _validate_x_values)
if __name__ == '__main__':
main()
| 46.275563 | 79 | 0.562638 |
7ccec8a64f1094a2aaa4d1c42f4858ca203734a3 | 122 | py | Python | packages/gtmapi/lmsrvcore/api/interfaces/__init__.py | jjwatts/gigantum-client | 88ce0475fb6880322bdd06d987c494e29064f278 | [
"MIT"
] | 60 | 2018-09-26T15:46:00.000Z | 2021-10-10T02:37:14.000Z | packages/gtmapi/lmsrvcore/api/interfaces/__init__.py | jjwatts/gigantum-client | 88ce0475fb6880322bdd06d987c494e29064f278 | [
"MIT"
] | 1,706 | 2018-09-26T16:11:22.000Z | 2021-08-20T13:37:59.000Z | packages/gtmapi/lmsrvcore/api/interfaces/__init__.py | jjwatts/gigantum-client | 88ce0475fb6880322bdd06d987c494e29064f278 | [
"MIT"
] | 11 | 2019-03-14T13:23:51.000Z | 2022-01-25T01:29:16.000Z | from lmsrvcore.api.interfaces.user import User
from lmsrvcore.api.interfaces.git import GitCommit, GitRef, GitRepository
| 30.5 | 73 | 0.844262 |
7cced65964aa995783474d3ea16a3fdb37a88182 | 3,570 | py | Python | tensorflow_probability/python/bijectors/invert_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/bijectors/invert_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/bijectors/invert_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
if __name__ == "__main__":
tf.test.main()
| 38.387097 | 95 | 0.693557 |
7ccf2b0c1cc9f5a9318ca8b0e302ba7e965fbb1e | 4,394 | py | Python | dayu_widgets/alert.py | ZSD-tim/dayu_widgets | 31c2530bdc4161d9311574d9850c2e9471e53072 | [
"MIT"
] | null | null | null | dayu_widgets/alert.py | ZSD-tim/dayu_widgets | 31c2530bdc4161d9311574d9850c2e9471e53072 | [
"MIT"
] | null | null | null | dayu_widgets/alert.py | ZSD-tim/dayu_widgets | 31c2530bdc4161d9311574d9850c2e9471e53072 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : [email protected]
###################################################################
"""
MAlert class.
"""
import six
import functools
from dayu_widgets.avatar import MAvatar
from dayu_widgets.label import MLabel
from dayu_widgets import dayu_theme
from dayu_widgets.tool_button import MToolButton
from dayu_widgets.mixin import property_mixin
from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property
| 32.072993 | 98 | 0.62244 |
7cd0a40cffa8dae1e2b1f6c63ccd1a5c2242724b | 86 | py | Python | week03/code05.py | byeongal/KMUCP | 5bafe02c40aae67fc53d9e6cdcb727929368587e | [
"MIT"
] | null | null | null | week03/code05.py | byeongal/KMUCP | 5bafe02c40aae67fc53d9e6cdcb727929368587e | [
"MIT"
] | null | null | null | week03/code05.py | byeongal/KMUCP | 5bafe02c40aae67fc53d9e6cdcb727929368587e | [
"MIT"
] | 1 | 2019-11-27T20:28:19.000Z | 2019-11-27T20:28:19.000Z | input_str = input(" . >> ")
print(" ", len(input_str), ".")
| 28.666667 | 46 | 0.639535 |
7cd14d3a6d6b9088b4271089222cd9080f058243 | 5,664 | py | Python | jobs/SCH/JB_SALES_HIERARCHY_FLAG_N_SR.py | bibinvasudev/EBI_Project | df2560139e463d68a37e67e0bb683c06fa9ef91b | [
"CNRI-Python"
] | null | null | null | jobs/SCH/JB_SALES_HIERARCHY_FLAG_N_SR.py | bibinvasudev/EBI_Project | df2560139e463d68a37e67e0bb683c06fa9ef91b | [
"CNRI-Python"
] | null | null | null | jobs/SCH/JB_SALES_HIERARCHY_FLAG_N_SR.py | bibinvasudev/EBI_Project | df2560139e463d68a37e67e0bb683c06fa9ef91b | [
"CNRI-Python"
] | null | null | null | # SCH1101.sh --> JB_SALES_HIERARCHY_FLAG_N_SR.py
#**************************************************************************************************************
#
# Created by : bibin
# Version : 1.0
#
# Description :
# 1. This script will load the data into 'SALES_HIERARCHY' table based on stream lookups.
#
#
# Initial Creation:
#
# Date (YYYY-MM-DD) Change Description
# ----------------- ------------------
# 2018-11-02 Initial creation
#
#**************************************************************************************************************
# Importing required Lib
from dependencies.spark import start_spark
from dependencies.EbiReadWrite import EbiReadWrite
import logging
import sys
from time import gmtime, strftime
import cx_Oracle
import py4j
import pyspark
# Spark logging
logger = logging.getLogger(__name__)
# Date Formats
start_date = "'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
log_date =strftime("%Y%m%d", gmtime())
# Job Naming Details
script_name = "SCH1101.SH"
app_name = "JB_SALES_HIERARCHY_FLAG_N_SR"
log_filename = app_name + '_' + log_date + '.log'
# Query for loading invoice table
# Main method
# Entry point for script
if __name__ == "__main__":
# Calling main() method
main()
| 38.794521 | 218 | 0.625883 |
7cd1e3bcc66dd50fd9167cfd73166db8b21f6910 | 670 | py | Python | myth/util.py | amanbhandari2002/mythproto | b03764485dad5178127307a3b3e4ddc508158143 | [
"BSD-3-Clause"
] | 1 | 2020-10-01T09:17:00.000Z | 2020-10-01T09:17:00.000Z | myth/util.py | amanbhandari2002/mythproto | b03764485dad5178127307a3b3e4ddc508158143 | [
"BSD-3-Clause"
] | null | null | null | myth/util.py | amanbhandari2002/mythproto | b03764485dad5178127307a3b3e4ddc508158143 | [
"BSD-3-Clause"
] | 2 | 2020-09-30T19:53:40.000Z | 2020-10-01T09:13:08.000Z |
# t is a nine item tuple returned by the time module. This method converts it to
# MythTV's standard representation used on filenames
| 18.108108 | 80 | 0.631343 |
7cd283a215a5ab2f5c601f954e24742216c659e4 | 14,208 | py | Python | scripts/tator_tracker.py | openem-team/openem | 45222c9c77084eacab278da25a8734ae7d43f677 | [
"MIT"
] | 10 | 2019-01-23T23:58:01.000Z | 2021-08-30T19:42:35.000Z | scripts/tator_tracker.py | openem-team/openem | 45222c9c77084eacab278da25a8734ae7d43f677 | [
"MIT"
] | 3 | 2020-03-20T15:21:41.000Z | 2020-09-18T18:49:38.000Z | scripts/tator_tracker.py | openem-team/openem | 45222c9c77084eacab278da25a8734ae7d43f677 | [
"MIT"
] | 2 | 2020-05-08T17:39:12.000Z | 2020-10-09T01:27:17.000Z | #!/usr/bin/env python3
import argparse
import openem
import os
import cv2
import numpy as np
from openem.tracking import *
import json
import sys
import datetime
import tator
from pprint import pprint
from collections import defaultdict
import yaml
import math
import subprocess
import sys
if __name__=="__main__":
parser = argparse.ArgumentParser(description=__doc__)
tator.get_parser(parser)
parser.add_argument("--detection-type-id", type=int, required=True)
parser.add_argument("--tracklet-type-id", type=int, required=True)
parser.add_argument("--version-id", type=int)
parser.add_argument("--input-version-id", type=int)
parser.add_argument("--strategy-config", type=str)
parser.add_argument("--dry-run", action='store_true')
parser.add_argument('media_files', type=str, nargs='*')
args = parser.parse_args()
# Weight methods
methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion']
# Weight methods that require the video
visual_methods = ['hybrid', 'iou-global-motion']
api = tator.get_api(args.host, args.token)
detection_type = api.get_localization_type(args.detection_type_id)
project = detection_type.project
version_id = args.version_id
default_strategy = {"method": "hybrid",
"frame-diffs": [1,2,4,8,16,32,64,128,256],
"args": {},
"extension": {'method' : None},
"max-length": {},
"min-length": 0}
if args.strategy_config:
strategy = {**default_strategy}
with open(args.strategy_config, "r") as strategy_file:
strategy.update(yaml.load(strategy_file))
else:
strategy = default_strategy
if strategy['method'] == 'hybrid':
model_file = strategy['args']['model_file']
batch_size = strategy['args'].get('batch_size', 4)
comparator=FeaturesComparator(model_file)
#extractor=FeaturesExtractor(args.model_file)
class_method = strategy.get('class-method',None)
classify_function = None
classify_args = {}
if class_method:
pip_package=class_method.get('pip',None)
if pip_package:
p = subprocess.run([sys.executable,
"-m",
"pip",
"install",
pip_package])
print("Finished process.", flush=True)
function_name = class_method.get('function',None)
classify_args = class_method.get('args',None)
names = function_name.split('.')
module = __import__(names[0])
for name in names[1:-1]:
module = getattr(module,name)
classify_function = getattr(module,names[-1])
print("Strategy: ", flush=True)
pprint(strategy)
print(args.media_files, flush=True)
optional_fetch_args = {}
if args.input_version_id:
optional_fetch_args['version'] = [args.input_version_id]
for media_file in args.media_files:
comps=os.path.splitext(os.path.basename(media_file))[0]
media_id=comps.split('_')[0]
media = api.get_media(media_id)
if media.attributes.get("Tracklet Generator Processed") != "No":
print(f"Skipping media ID {media.id}, name {media.name} due to "
f"'Tracklet Generator Processed' attribute being set to "
f"something other than 'No'!")
continue
media_shape = (media.height, media.width)
fps = media.fps
localizations_by_frame = {}
localizations = api.get_localization_list(project,
type=args.detection_type_id,
media_id=[media_id],
**optional_fetch_args)
localizations = [l.to_dict() for l in localizations]
if len(localizations) == 0:
print(f"No localizations present in media {media_file}", flush=True)
continue
print(f"Processing {len(localizations)} detections", flush=True)
# Group by localizations by frame
for lid, local in enumerate(localizations):
frame = local['frame']
if frame in localizations_by_frame:
localizations_by_frame[frame].append(local)
else:
localizations_by_frame[frame] = [local]
detections=[]
track_ids=[]
track_id=1
# If media does not exist, download it.
if strategy['method'] == 'iou-global-motion':
if not os.path.exists(media_file):
temp_path = f'/tmp/{os.path.basename(media_file)}'
for progress in tator.util.download_media(api, media, temp_path):
print(f"Downloading {media_file}, {progress}%...")
print("Download finished!")
# Unfrag the file
subprocess.run(["ffmpeg", '-i', temp_path, '-c:v', 'copy', media_file])
os.remove(temp_path)
if strategy['method'] == 'hybrid': # Not all visual methods need detection images
vid=cv2.VideoCapture(media_file)
ok=True
frame = 0
while ok:
ok,frame_bgr = vid.read()
if frame in localizations_by_frame:
for l in localizations_by_frame[frame]:
l['bgr'] = crop_localization(frame_bgr, l)
if l['attributes']['Confidence'] < 0.50:
continue
detections.append(l)
track_ids.append(track_id)
track_id += 1
frame+=1
else:
# The method is analytical on the detections coordinates
# and does not require processing the video
for frame,frame_detections in localizations_by_frame.items():
for det in frame_detections:
detections.append(det)
track_ids.append(track_id)
track_id += 1
print("Loaded all detections", flush=True)
track_ids = renumber_track_ids(track_ids)
if strategy['method'] == 'hybrid':
weights_strategy = HybridWeights(comparator,
None,
None,
media_shape,
fps,
0.0,
batch_size)
elif strategy['method'] == 'iou':
weights_strategy = IoUWeights(media_shape, **strategy['args'])
elif strategy['method'] == 'iou-motion':
weights_strategy = IoUMotionWeights(media_shape, **strategy['args'])
elif strategy['method'] == 'iou-global-motion':
weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args'])
# Generate localization bgr based on grouped localizations
for x in strategy['frame-diffs']:
print(f"Started {x}", flush=True)
detections, track_ids, pairs, weights, is_cut, constraints = join_tracklets(
detections,
track_ids,
x,
weights_strategy)
if x in strategy['max-length']:
trim_to = strategy['max-length'][x]
print(f"Trimming track to max length of {trim_to}")
detections, track_ids = trim_tracklets(detections, track_ids, trim_to)
_,det_counts_per_track=np.unique(track_ids,return_counts=True)
print(f"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}", flush=True)
if x > 1 and strategy['extension']['method'] == 'linear-motion':
ext_frames=x
print(f"Extending by linear motion, {ext_frames}")
tracklets = join_up_iteration(detections,track_ids)
tracklets = extend_tracklets(tracklets, ext_frames)
detections, track_ids = split_tracklets(tracklets)
# Now we make new track objects based on the result
# from the graph solver
# [ detection, detection, detection, ...]
# [ track#, track#, track#,...]
# [ 133, 33, 13, 133,]
# [ 0,0,1,1]
# TODO: Handle is_cut?
tracklets = join_up_final(detections, track_ids)
new_objs=[make_object(tracklet) for tracklet in tracklets.values()]
new_objs=[x for x in new_objs if x is not None]
print(f"New objects = {len(new_objs)}")
with open(f"/work/{media_id}.json", "w") as f:
json.dump(new_objs,f)
if not args.dry_run:
for response in tator.util.chunked_create(api.create_state_list,project,
state_spec=new_objs):
pass
try:
api.update_media(int(media_id), {"attributes":{"Tracklet Generator Processed": str(datetime.datetime.now())}})
except:
print("WARNING: Unable to set 'Tracklet Generator Processed' attribute")
| 39.248619 | 126 | 0.551943 |
7cd30f449f940b3e03ca41f6babd9a375fe19ebf | 1,167 | py | Python | hypergan/losses/multi_loss.py | Darkar25/HyperGAN | 76ef7e0c20569ceece88dc76396d92c77050692b | [
"MIT"
] | 1 | 2019-05-29T14:24:04.000Z | 2019-05-29T14:24:04.000Z | hypergan/losses/multi_loss.py | KonradLinkowski/HyperGAN | 3153daee838dbb8e8d8926b1e81419682a24f2fe | [
"MIT"
] | 218 | 2021-05-25T01:46:15.000Z | 2022-02-11T01:08:52.000Z | hypergan/losses/multi_loss.py | KonradLinkowski/HyperGAN | 3153daee838dbb8e8d8926b1e81419682a24f2fe | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import hyperchamber as hc
from hypergan.losses.base_loss import BaseLoss
from hypergan.multi_component import MultiComponent
TINY=1e-8
| 26.522727 | 99 | 0.61868 |
7cd33c8d7f17b4aa6fc5f6d3f2701686f2ce01a4 | 13,643 | py | Python | src/fidesops/api/v1/endpoints/policy_endpoints.py | mohan-pogala/fidesops | 5c686362d4fb3b85253dd7e2898be1131a5071ab | [
"Apache-2.0"
] | null | null | null | src/fidesops/api/v1/endpoints/policy_endpoints.py | mohan-pogala/fidesops | 5c686362d4fb3b85253dd7e2898be1131a5071ab | [
"Apache-2.0"
] | null | null | null | src/fidesops/api/v1/endpoints/policy_endpoints.py | mohan-pogala/fidesops | 5c686362d4fb3b85253dd7e2898be1131a5071ab | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Any, Dict, List
from fastapi import APIRouter, Body, Depends, Security
from fastapi_pagination import (
Page,
Params,
)
from fastapi_pagination.bases import AbstractPage
from fastapi_pagination.ext.sqlalchemy import paginate
from fidesops.schemas.shared_schemas import FidesOpsKey
from pydantic import conlist
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from starlette.exceptions import HTTPException
from starlette.status import HTTP_404_NOT_FOUND
from fidesops.api import deps
from fidesops.api.v1 import scope_registry as scopes
from fidesops.api.v1 import urn_registry as urls
from fidesops.common_exceptions import (
DataCategoryNotSupported,
PolicyValidationError,
RuleValidationError,
RuleTargetValidationError,
KeyOrNameAlreadyExists,
)
from fidesops.models.client import ClientDetail
from fidesops.models.policy import (
ActionType,
Policy,
Rule,
RuleTarget,
)
from fidesops.models.storage import StorageConfig
from fidesops.schemas import policy as schemas
from fidesops.schemas.api import BulkUpdateFailed
from fidesops.util.oauth_util import verify_oauth_client
router = APIRouter(tags=["Policy"], prefix=urls.V1_URL_PREFIX)
logger = logging.getLogger(__name__)
def get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy:
"""Helper method to load Policy or throw a 404"""
logger.info(f"Finding policy with key '{policy_key}'")
policy = Policy.get_by(db=db, field="key", value=policy_key)
if not policy:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Policy found for key {policy_key}.",
)
return policy
| 31.95082 | 113 | 0.609543 |
7cd398bd2b3034834a61ea93e2ee16c8e1011acb | 7,425 | py | Python | engage-analytics/sentiment_analysis/src/report/interface_report.py | oliveriopt/mood-analytics | c98eb8c483a05af938a2f6f49d8ea803f5711572 | [
"Apache-2.0"
] | null | null | null | engage-analytics/sentiment_analysis/src/report/interface_report.py | oliveriopt/mood-analytics | c98eb8c483a05af938a2f6f49d8ea803f5711572 | [
"Apache-2.0"
] | 2 | 2020-03-27T19:14:44.000Z | 2020-03-27T19:14:44.000Z | engage-analytics/sentiment_analysis/src/report/interface_report.py | oliveriopt/mood-analytics | c98eb8c483a05af938a2f6f49d8ea803f5711572 | [
"Apache-2.0"
] | null | null | null | import emoji
import sentiment_analysis.src.report.cons_report as cons
import sentiment_analysis.src.constants as global_cons
from utils.data_connection.api_data_manager import APISourcesFetcher
from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question
from sentiment_analysis.src.word_cloud import words_clouds
from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment
from nested_lookup import nested_lookup
| 40.135135 | 116 | 0.644175 |
7cd4c163b81a2a9f7a9f4fb51454b97b7933bffd | 1,565 | py | Python | dwh_analytic/dags/data_warehouse_prod/schema/dim_process.py | dnguyenngoc/analytic | d609a93e96e7c546ad3ee3ebd4e13309ddf575f8 | [
"MIT"
] | null | null | null | dwh_analytic/dags/data_warehouse_prod/schema/dim_process.py | dnguyenngoc/analytic | d609a93e96e7c546ad3ee3ebd4e13309ddf575f8 | [
"MIT"
] | null | null | null | dwh_analytic/dags/data_warehouse_prod/schema/dim_process.py | dnguyenngoc/analytic | d609a93e96e7c546ad3ee3ebd4e13309ddf575f8 | [
"MIT"
] | null | null | null | resource ='human ad machime'
| 27.946429 | 62 | 0.548882 |
7cd59f2bf170f30d86846848bf3c6c4bf7b96d9c | 2,491 | py | Python | lino/modlib/gfks/mixins.py | NewRGB/lino | 43799e42107169ff173d3b8bc0324d5773471499 | [
"BSD-2-Clause"
] | 1 | 2019-11-13T19:38:50.000Z | 2019-11-13T19:38:50.000Z | lino/modlib/gfks/mixins.py | NewRGB/lino | 43799e42107169ff173d3b8bc0324d5773471499 | [
"BSD-2-Clause"
] | null | null | null | lino/modlib/gfks/mixins.py | NewRGB/lino | 43799e42107169ff173d3b8bc0324d5773471499 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
# Copyright 2010-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from builtins import object
from django.contrib.contenttypes.models import *
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.text import format_lazy
from lino.api import dd
from lino.core.gfks import gfk2lookup
from .fields import GenericForeignKey, GenericForeignKeyIdField
| 30.753086 | 71 | 0.635488 |
7cd5ad9a803e1cac21f7d6ba2961e58bea3c98da | 21,926 | py | Python | optical_form_reader/main.py | 1enes/optical_form_reader | fab99f2403c25f84fcb5bdac50148ab248432516 | [
"MIT"
] | null | null | null | optical_form_reader/main.py | 1enes/optical_form_reader | fab99f2403c25f84fcb5bdac50148ab248432516 | [
"MIT"
] | null | null | null | optical_form_reader/main.py | 1enes/optical_form_reader | fab99f2403c25f84fcb5bdac50148ab248432516 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from imutils import contours
from imutils.perspective import four_point_transform
import imutils
import cv2
import matplotlib.pyplot as plt
import numpy as np
from imutils import contours
from imutils.perspective import four_point_transform,order_points
import imutils
cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #,
alfabe={0:'A',1:'B',2:'C',3:'',4:'D',5:'E',6:'F',7:'G',8:'',9:'H',10:'I',11:'',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'',19:'P',20:'Q',21:'R',22:'S',23:'',24:'T',25:'U',26:'',27:'V',28:'W',29:'Y',30:'Z',31:'X'}
def cevap_gri(col1,col2,col3,col4):
'''
KOLONLARI GR YAPMAK N,MANDE YER KAPLAMASIN
'''
col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY)
col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY)
col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY)
col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY)
return col1_gri,col2_gri,col3_gri,col4_gri
####################################################################
if __name__ == '__main__':
bos_kagit="optic_empty.jpg"
dolu_kagit="optic_marked.jpg"
main_starter(bos_kagit,dolu_kagit)
| 31.100709 | 223 | 0.622457 |
7cd6ff8a4443655a42df05eccf62b0e804763fb0 | 2,898 | py | Python | service.py | Tigge/script.filmtipset-grade | a5b438dc478d6ef40f611585e9cd196c2ff49cf6 | [
"BSD-2-Clause"
] | 1 | 2015-02-19T08:45:57.000Z | 2015-02-19T08:45:57.000Z | service.py | Tigge/script.filmtipset-grade | a5b438dc478d6ef40f611585e9cd196c2ff49cf6 | [
"BSD-2-Clause"
] | 1 | 2015-02-01T19:28:17.000Z | 2015-03-18T22:27:14.000Z | service.py | Tigge/script.filmtipset-grade | a5b438dc478d6ef40f611585e9cd196c2ff49cf6 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2013, Gustav Tiger
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import xbmc
import xbmcaddon
import xbmcgui
import filmtipset
FILMTIPSET_ACCESS_KEY = "7ndg3Q3qwW8dPzbJMrB5Rw"
player = XBMCPlayer()
while(not xbmc.abortRequested):
if player.isPlayingVideo():
player.update()
xbmc.sleep(1000)
| 34.094118 | 82 | 0.670807 |
7cd8fbdd89ede56684cdd39b8bd8583e3ed86ea6 | 16,528 | py | Python | test/testMatrix.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | 1 | 2021-05-26T19:22:17.000Z | 2021-05-26T19:22:17.000Z | test/testMatrix.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | null | null | null | test/testMatrix.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | null | null | null |
import unittest
from nzmath.matrix import *
import nzmath.vector as vector
import nzmath.rational as rational
import nzmath.poly.uniutil as uniutil
Ra = rational.Rational
Poly = uniutil.polynomial
Int = rational.theIntegerRing
# sub test
try:
from test.testMatrixFiniteField import *
except:
try:
from nzmath.test.testMatrixFiniteField import *
except:
from .testMatrixFiniteField import *
## for RingMatrix
a1 = createMatrix(1, 2, [3, 2])
a2 = Matrix(1, 2, [5, -6])
a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10])
a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0, 0])
a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)])
## for RingSquareMatrix
b1 = createMatrix(2, 2, [1, 2]+[3, 4])
b2 = Matrix(2, 2, [0, -1]+[1, -2])
b3 = createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7, 9, 8])
b4 = Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7, 1, 9])
b5 = createMatrix(3, 3, [1, 3, 2, 4, 6, 5, 6, 8, 9])
b6 = createMatrix(3, 3, [1, 2, 4, 0, 3, 5, 0, 0, 0])
b7 = createMatrix(3, 3, [1, 0, 0, 9, 1, 0, 5, 6, 1])
b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2])
## for FieldMatrix
c1 = createMatrix(1, 2, [Ra(3), Ra(2)])
c2 = createMatrix(4, 5, \
[Ra(0), 0, 1, 2, -1]+[0, 0, 5, 12, -2]+[0, 0, 1, 3, -1]+[0, 0, 1, 2, 0])
c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7])
## for FieldSquareMatrix
d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)])
d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9])
d3 = Matrix(3, 3, \
[Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9])
d4 = createMatrix(6, 6, \
[Ra(4), 2, 5, 0, 2, 1]+[5, 1, 2, 5, 1, 1]+[90, 7, 54, 8, 4, 6]+\
[7, 5, 0, 8, 2, 5]+[8, 2, 6, 5, -4, 2]+[4, 1, 5, 6, 3, 1])
d5 = createMatrix(4, 4, \
[Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0, 0, -1, 2])
d6 = createMatrix(4, 4, \
[Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4, 5, 6, 7])
d7 = Matrix(3, 3, \
[Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)])
## other objects
v1 = vector.Vector([1, 4])
v2 = vector.Vector([8])
v3 = vector.Vector([0, 0, 1])
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 35.165957 | 94 | 0.575811 |
7cd9e1b23df0b17b32715fc652a510c5c85b28ff | 372 | py | Python | python/test-nose-3.py | li-ma/homework | d75b1752a02bd028af0806683abe079c7b0a9b29 | [
"Apache-2.0"
] | null | null | null | python/test-nose-3.py | li-ma/homework | d75b1752a02bd028af0806683abe079c7b0a9b29 | [
"Apache-2.0"
] | null | null | null | python/test-nose-3.py | li-ma/homework | d75b1752a02bd028af0806683abe079c7b0a9b29 | [
"Apache-2.0"
] | null | null | null | # Module Level
# Function Level
# Target Func
test_func_1.setUp = func_1_setup
test_func_1.tearDown = func_1_teardown
| 15.5 | 38 | 0.72043 |
7cda6328ac58b61f05923cca8623aa6b42f94561 | 3,591 | py | Python | lib/reindex/reporting.py | scality/utapi | 29475f1b9aa25cf3c883262bfb6f4573f846a5b7 | [
"Apache-2.0"
] | 13 | 2016-10-07T20:25:11.000Z | 2022-02-23T06:33:59.000Z | lib/reindex/reporting.py | scality/utapi | 29475f1b9aa25cf3c883262bfb6f4573f846a5b7 | [
"Apache-2.0"
] | 427 | 2016-08-17T18:03:32.000Z | 2022-03-31T10:46:12.000Z | lib/reindex/reporting.py | scality/utapi | 29475f1b9aa25cf3c883262bfb6f4573f846a5b7 | [
"Apache-2.0"
] | 5 | 2017-04-25T21:13:03.000Z | 2018-01-23T00:21:06.000Z | import requests
import redis
import json
import ast
import sys
import time
import urllib
import re
import sys
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
import argparse
if __name__ == '__main__':
options = get_options()
redis_conf = dict(
ip=options.sentinel_ip,
port=options.sentinel_port,
sentinel_cluster_name=options.sentinel_cluster_name,
password=options.redis_password
)
P = S3ListBuckets(options.bucketd_addr)
listbuckets = P.run()
userids = set([x for x, y in listbuckets])
executor = ThreadPoolExecutor(max_workers=1)
for userid, bucket in listbuckets:
U = askRedis(**redis_conf)
data = U.read('buckets', bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
data = U.read('buckets', 'mpuShadowBucket'+bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, 'mpuShadowBucket'+bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
executor.submit(safe_print, "")
for userid in sorted(userids):
U = askRedis(**redis_conf)
data = U.read('accounts', userid)
content = "Account:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, data["files"], data["total_size"])
executor.submit(safe_print, content) | 35.554455 | 114 | 0.634085 |
7cdbf4b64b3ba075f66b013f73d0f70e791b05ee | 52,272 | py | Python | src/skim/modeling/skim_attention/modeling_skim.py | recitalAI/skim-attention | a37a277072d1f70ea615cfd19e5b84a6effd2464 | [
"Apache-2.0"
] | 4 | 2021-09-08T17:20:59.000Z | 2021-12-08T07:49:24.000Z | src/skim/modeling/skim_attention/modeling_skim.py | recitalAI/skim-attention | a37a277072d1f70ea615cfd19e5b84a6effd2464 | [
"Apache-2.0"
] | null | null | null | src/skim/modeling/skim_attention/modeling_skim.py | recitalAI/skim-attention | a37a277072d1f70ea615cfd19e5b84a6effd2464 | [
"Apache-2.0"
] | 1 | 2021-12-09T00:02:23.000Z | 2021-12-09T00:02:23.000Z | from collections import namedtuple
import logging
from dataclasses import dataclass
from typing import Optional, Tuple
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, LayerNorm
from torch.autograd.function import Function
from transformers.file_utils import (
ModelOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput,
TokenClassifierOutput,
)
from transformers.models.bert.modeling_bert import (
BertConfig,
BertEmbeddings,
BertIntermediate,
BertOutput,
BertPooler,
BertEncoder,
BertOnlyMLMHead,
)
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings
from .configuration_skim import (
SkimformerConfig,
BertWithSkimEmbedConfig,
SkimmingMaskConfig,
)
logger = logging.getLogger(__name__)
SkimformerEncoderOutput = namedtuple(
"SkimformerEncoderOutput",
["hidden_states", "all_hidden_states"],
)
| 39.540091 | 168 | 0.661674 |
7cdcd3e9c2d7e86acfb845c8d72f0dc9c0f23f7d | 2,290 | py | Python | api/routers/dashboard.py | xming521/coco_API | 51d7ac3141e58f1d6a5438af135fba3ea101bd53 | [
"MIT"
] | null | null | null | api/routers/dashboard.py | xming521/coco_API | 51d7ac3141e58f1d6a5438af135fba3ea101bd53 | [
"MIT"
] | null | null | null | api/routers/dashboard.py | xming521/coco_API | 51d7ac3141e58f1d6a5438af135fba3ea101bd53 | [
"MIT"
] | null | null | null | import time
import psutil
import pymysql
from fastapi import APIRouter
from api.utils import response_code
router = APIRouter()
| 30.533333 | 79 | 0.620524 |
7cdd5ddf7b7d2568fd208a60927251ae8e3ac857 | 10,399 | py | Python | retargeting/models/Kinematics.py | yujiatay/deep-motion-editing | 0a6fc5fd20059c5074f68a452cd49cf6ede36ea8 | [
"BSD-2-Clause"
] | 1 | 2021-07-06T14:34:12.000Z | 2021-07-06T14:34:12.000Z | retargeting/models/Kinematics.py | bmd080/deep-motion-editing | 19604abdc0ead66f8c82d9211b8c5862c6a68089 | [
"BSD-2-Clause"
] | null | null | null | retargeting/models/Kinematics.py | bmd080/deep-motion-editing | 19604abdc0ead66f8c82d9211b8c5862c6a68089 | [
"BSD-2-Clause"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
import math
| 36.233449 | 131 | 0.521877 |
7cdd5ff32b1c2238dcf9d02a8e0c07b84239dfc5 | 13,145 | py | Python | tests/operators/test_hive_operator.py | Ryan-Miao/airflow | a2aca8714fac014ed7da97229d7877f1bc6e5a59 | [
"Apache-2.0"
] | null | null | null | tests/operators/test_hive_operator.py | Ryan-Miao/airflow | a2aca8714fac014ed7da97229d7877f1bc6e5a59 | [
"Apache-2.0"
] | null | null | null | tests/operators/test_hive_operator.py | Ryan-Miao/airflow | a2aca8714fac014ed7da97229d7877f1bc6e5a59 | [
"Apache-2.0"
] | 1 | 2020-09-29T05:26:34.000Z | 2020-09-29T05:26:34.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
import unittest
from unittest import mock
import nose
from airflow import DAG, configuration, operators
from airflow.models import TaskInstance
from airflow.operators.hive_operator import HiveOperator
from airflow.utils import timezone
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
if 'AIRFLOW_RUNALL_TESTS' in os.environ:
import airflow.hooks.hive_hooks
import airflow.operators.presto_to_mysql
| 39.005935 | 88 | 0.617117 |
7cdde129ca347d44c4fc15ae483831b97628b1d6 | 4,553 | py | Python | main.py | OrionDark7/Alakajam12 | 4f9f8f87a05feb718baddb12aa8cbbed3e36a071 | [
"MIT"
] | null | null | null | main.py | OrionDark7/Alakajam12 | 4f9f8f87a05feb718baddb12aa8cbbed3e36a071 | [
"MIT"
] | null | null | null | main.py | OrionDark7/Alakajam12 | 4f9f8f87a05feb718baddb12aa8cbbed3e36a071 | [
"MIT"
] | null | null | null | import pygame, math
from game import map, ui
window = pygame.display.set_mode([800, 600])
ui.window = window
screen = "game"
s = {"fullscreen": False}
running = True
gamedata = {"level": 0, "coal": 0, "iron": 1, "copper":0}
tiles = pygame.sprite.Group()
rails = pygame.sprite.Group()
carts = pygame.sprite.Group()
interactables = pygame.sprite.Group()
listmap = []
clock = pygame.time.Clock()
selected = pygame.image.load("./resources/images/selected.png")
selected2 = pygame.image.load("./resources/images/selected2.png")
box = pygame.image.load("./resources/images/box.png")
uibox = pygame.image.load("./resources/images/ui box.png")
m = Mouse()
loadlevel(0)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
m.pos(pygame.mouse.get_pos())
if screen == "game":
if pygame.sprite.spritecollide(m, carts, False) and m.mode == "select":
carts.update("select", m, listmap)
if m.clickedcart != None:
m.mode = "action"
elif m.mode == "action" and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:
m.clickedcart.pathfind(listmap, snaptogrid(m.tl))
m.clickedcart = None
m.mode = "select"
elif event.type == pygame.MOUSEMOTION:
m.pos(pygame.mouse.get_pos())
if screen == "game":
m.hoveritem = None
if len(pygame.sprite.spritecollide(m, carts, False)) > 0:
m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0]
elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0:
m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0]
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
carts.add(map.Cart(snaptogrid(m.tl), "miner"))
if screen == "game":
window.fill([100, 100, 100])
tiles.draw(window)
carts.draw(window)
carts.update("update", m, listmap)
if not m.hoveritem == None and not m.mode == "action":
window.blit(box, [m.rect.left+10, m.rect.top+10])
ui.Resize(30)
ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])
if m.hoveritem.type.startswith("mine") and m.hoveritem not in carts:
ui.Resize(18)
ui.Text("Carts Inside: " + str(m.hoveritem.data["carts"]), [m.rect.left+27, m.rect.top+47])
ui.Text("Max Carts: " + str(m.hoveritem.data["max"]), [m.rect.left+27, m.rect.top+60])
if not m.clickedcart == None:
window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2])
if m.mode == "action":
window.blit(box, [m.rect.left+10, m.rect.top+10])
ui.Resize(30)
try:
ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])
except:
ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25])
if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:
ui.Resize(22)
ui.Text("Click to move", [m.rect.left+27, m.rect.top+45])
ui.Text("Cart Here", [m.rect.left+27, m.rect.top+60])
window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2])
window.blit(uibox, [555, 475])
pygame.display.flip()
clock.tick(60)
fps = clock.get_fps()
pygame.quit()
| 40.292035 | 124 | 0.573468 |
7cde0e155e222f52e34bae521e25a21b28caf52a | 550 | py | Python | Code/extract_method3.py | AbdullahNoori/CS-2.1-Trees-Sorting | 59ba182d60abe6171a3d7d64981f79ee192de3bb | [
"MIT"
] | null | null | null | Code/extract_method3.py | AbdullahNoori/CS-2.1-Trees-Sorting | 59ba182d60abe6171a3d7d64981f79ee192de3bb | [
"MIT"
] | null | null | null | Code/extract_method3.py | AbdullahNoori/CS-2.1-Trees-Sorting | 59ba182d60abe6171a3d7d64981f79ee192de3bb | [
"MIT"
] | null | null | null | # Written by Kamran Bigdely
# Example for Compose Methods: Extract Method.
import math
print('distance', get_distance())
# *** somewhere else in your program ***
print('length', get_length())
| 26.190476 | 88 | 0.670909 |
7cde5911adb7d9da7046ae21614759503f243fc8 | 51,158 | py | Python | sympy/integrals/prde.py | Abhi58/sympy | 5ca228b17a7d44ef08a268ba1fa959d5763634af | [
"BSD-3-Clause"
] | 2 | 2019-06-12T16:15:39.000Z | 2019-10-06T10:40:59.000Z | sympy/integrals/prde.py | Abhi58/sympy | 5ca228b17a7d44ef08a268ba1fa959d5763634af | [
"BSD-3-Clause"
] | null | null | null | sympy/integrals/prde.py | Abhi58/sympy | 5ca228b17a7d44ef08a268ba1fa959d5763634af | [
"BSD-3-Clause"
] | 1 | 2019-10-02T10:47:13.000Z | 2019-10-02T10:47:13.000Z | """
Algorithms for solving Parametric Risch Differential Equations.
The methods used for solving Parametric Risch Differential Equations parallel
those for solving Risch Differential Equations. See the outline in the
docstring of rde.py for more information.
The Parametric Risch Differential Equation problem is, given f, g1, ..., gm in
K(t), to determine if there exist y in K(t) and c1, ..., cm in Const(K) such
that Dy + f*y == Sum(ci*gi, (i, 1, m)), and to find such y and ci if they exist.
For the algorithms here G is a list of tuples of factions of the terms on the
right hand side of the equation (i.e., gi in k(t)), and Q is a list of terms on
the right hand side of the equation (i.e., qi in k[t]). See the docstring of
each function for more information.
"""
from __future__ import print_function, division
from sympy.core import Dummy, ilcm, Add, Mul, Pow, S
from sympy.core.compatibility import reduce, range
from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer,
bound_degree)
from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation,
residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel,
recognize_log_derivative)
from sympy.matrices import zeros, eye
from sympy.polys import Poly, lcm, cancel, sqf_list
from sympy.polys.polymatrix import PolyMatrix as Matrix
from sympy.solvers import solve
def prde_normal_denom(fa, fd, G, DE):
"""
Parametric Risch Differential Equation - Normal part of the denominator.
Given a derivation D on k[t] and f, g1, ..., gm in k(t) with f weakly
normalized with respect to t, return the tuple (a, b, G, h) such that
a, h in k[t], b in k<t>, G = [g1, ..., gm] in k(t)^m, and for any solution
c1, ..., cm in Const(k) and y in k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)),
q == y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)).
"""
dn, ds = splitfactor(fd, DE)
Gas, Gds = list(zip(*G))
gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t))
en, es = splitfactor(gd, DE)
p = dn.gcd(en)
h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t)))
a = dn*h
c = a*h
ba = a*fa - dn*derivation(h, DE)*fd
ba, bd = ba.cancel(fd, include=True)
G = [(c*A).cancel(D, include=True) for A, D in G]
return (a, (ba, bd), G, h)
def real_imag(ba, bd, gen):
"""
Helper function, to get the real and imaginary part of a rational function
evaluated at sqrt(-1) without actually evaluating it at sqrt(-1)
Separates the even and odd power terms by checking the degree of terms wrt
mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part
of the numerator ba[1] is the imaginary part and bd is the denominator
of the rational function.
"""
bd = bd.as_poly(gen).as_dict()
ba = ba.as_poly(gen).as_dict()
denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()]
denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()]
bd_real = sum(r for r in denom_real)
bd_imag = sum(r for r in denom_imag)
num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()]
num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()]
ba_real = sum(r for r in num_real)
ba_imag = sum(r for r in num_imag)
ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen))
bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen)
return (ba[0], ba[1], bd)
def prde_special_denom(a, ba, bd, G, DE, case='auto'):
"""
Parametric Risch Differential Equation - Special part of the denominator.
case is one of {'exp', 'tan', 'primitive'} for the hyperexponential,
hypertangent, and primitive cases, respectively. For the hyperexponential
(resp. hypertangent) case, given a derivation D on k[t] and a in k[t],
b in k<t>, and g1, ..., gm in k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in
k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp.
gcd(a, t**2 + 1) == 1), return the tuple (A, B, GG, h) such that A, B, h in
k[t], GG = [gg1, ..., ggm] in k(t)^m, and for any solution c1, ..., cm in
Const(k) and q in k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r == q*h in
k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)).
For case == 'primitive', k<t> == k[t], so it returns (a, b, G, 1) in this
case.
"""
# TODO: Merge this with the very similar special_denom() in rde.py
if case == 'auto':
case = DE.case
if case == 'exp':
p = Poly(DE.t, DE.t)
elif case == 'tan':
p = Poly(DE.t**2 + 1, DE.t)
elif case in ['primitive', 'base']:
B = ba.quo(bd)
return (a, B, G, Poly(1, DE.t))
else:
raise ValueError("case must be one of {'exp', 'tan', 'primitive', "
"'base'}, not %s." % case)
nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t)
nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G])
n = min(0, nc - min(0, nb))
if not nb:
# Possible cancellation.
if case == 'exp':
dcoeff = DE.d.quo(Poly(DE.t, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t)
etaa, etad = frac_in(dcoeff, DE.t)
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
if A is not None:
Q, m, z = A
if Q == 1:
n = min(n, m)
elif case == 'tan':
dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t)
betad = alphad
etaa, etad = frac_in(dcoeff, DE.t)
if recognize_log_derivative(2*betaa, betad, DE):
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
B = parametric_log_deriv(betaa, betad, etaa, etad, DE)
if A is not None and B is not None:
Q, s, z = A
# TODO: Add test
if Q == 1:
n = min(n, s/2)
N = max(0, -nb)
pN = p**N
pn = p**-n # This is 1/h
A = a*pN
B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN
G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G]
h = pn
# (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n)
return (A, B, G, h)
def prde_linear_constraints(a, b, G, DE):
"""
Parametric Risch Differential Equation - Generate linear constraints on the constants.
Given a derivation D on k[t], a, b, in k[t] with gcd(a, b) == 1, and
G = [g1, ..., gm] in k(t)^m, return Q = [q1, ..., qm] in k[t]^m and a
matrix M with entries in k(t) such that for any solution c1, ..., cm in
Const(k) and p in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)),
(c1, ..., cm) is a solution of Mx == 0, and p and the ci satisfy
a*Dp + b*p == Sum(ci*qi, (i, 1, m)).
Because M has entries in k(t), and because Matrix doesn't play well with
Poly, M will be a Matrix of Basic expressions.
"""
m = len(G)
Gns, Gds = list(zip(*G))
d = reduce(lambda i, j: i.lcm(j), Gds)
d = Poly(d, field=True)
Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G]
if not all([ri.is_zero for _, ri in Q]):
N = max([ri.degree(DE.t) for _, ri in Q])
M = Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i))
else:
M = Matrix(0, m, []) # No constraints, return the empty matrix.
qs, _ = list(zip(*Q))
return (qs, M)
def poly_linear_constraints(p, d):
"""
Given p = [p1, ..., pm] in k[t]^m and d in k[t], return
q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k such
that Sum(ci*pi, (i, 1, m)), for c1, ..., cm in k, is divisible
by d if and only if (c1, ..., cm) is a solution of Mx = 0, in
which case the quotient is Sum(ci*qi, (i, 1, m)).
"""
m = len(p)
q, r = zip(*[pi.div(d) for pi in p])
if not all([ri.is_zero for ri in r]):
n = max([ri.degree() for ri in r])
M = Matrix(n + 1, m, lambda i, j: r[j].nth(i))
else:
M = Matrix(0, m, []) # No constraints.
return q, M
def constant_system(A, u, DE):
"""
Generate a system for the constant solutions.
Given a differential field (K, D) with constant field C = Const(K), a Matrix
A, and a vector (Matrix) u with coefficients in K, returns the tuple
(B, v, s), where B is a Matrix with coefficients in C and v is a vector
(Matrix) such that either v has coefficients in C, in which case s is True
and the solutions in C of Ax == u are exactly all the solutions of Bx == v,
or v has a non-constant coefficient, in which case s is False Ax == u has no
constant solution.
This algorithm is used both in solving parametric problems and in
determining if an element a of K is a derivative of an element of K or the
logarithmic derivative of a K-radical using the structure theorem approach.
Because Poly does not play well with Matrix yet, this algorithm assumes that
all matrix entries are Basic expressions.
"""
if not A:
return A, u
Au = A.row_join(u)
Au = Au.rref(simplify=cancel, normalize_last=False)[0]
# Warning: This will NOT return correct results if cancel() cannot reduce
# an identically zero expression to 0. The danger is that we might
# incorrectly prove that an integral is nonelementary (such as
# risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x).
# But this is a limitation in computer algebra in general, and implicit
# in the correctness of the Risch Algorithm is the computability of the
# constant field (actually, this same correctness problem exists in any
# algorithm that uses rref()).
#
# We therefore limit ourselves to constant fields that are computable
# via the cancel() function, in order to prevent a speed bottleneck from
# calling some more complex simplification function (rational function
# coefficients will fall into this class). Furthermore, (I believe) this
# problem will only crop up if the integral explicitly contains an
# expression in the constant field that is identically zero, but cannot
# be reduced to such by cancel(). Therefore, a careful user can avoid this
# problem entirely by being careful with the sorts of expressions that
# appear in his integrand in the variables other than the integration
# variable (the structure theorems should be able to completely decide these
# problems in the integration variable).
Au = Au.applyfunc(cancel)
A, u = Au[:, :-1], Au[:, -1]
for j in range(A.cols):
for i in range(A.rows):
if A[i, j].has(*DE.T):
# This assumes that const(F(t0, ..., tn) == const(K) == F
Ri = A[i, :]
# Rm+1; m = A.rows
Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/
derivation(A[i, j], DE, basic=True))
Rm1 = Rm1.applyfunc(cancel)
um1 = cancel(derivation(u[i], DE, basic=True)/
derivation(A[i, j], DE, basic=True))
for s in range(A.rows):
# A[s, :] = A[s, :] - A[s, i]*A[:, m+1]
Asj = A[s, j]
A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj]))
# u[s] = u[s] - A[s, j]*u[m+1
u.row_op(s, lambda r, jj: cancel(r - Asj*um1))
A = A.col_join(Rm1)
u = u.col_join(Matrix([um1]))
return (A, u)
def prde_spde(a, b, Q, n, DE):
"""
Special Polynomial Differential Equation algorithm: Parametric Version.
Given a derivation D on k[t], an integer n, and a, b, q1, ..., qm in k[t]
with deg(a) > 0 and gcd(a, b) == 1, return (A, B, Q, R, n1), with
Qq = [q1, ..., qm] and R = [r1, ..., rm], such that for any solution
c1, ..., cm in Const(k) and q in k[t] of degree at most n of
a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has
degree at most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m))
"""
R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q]))
A = a
B = b + derivation(a, DE)
Qq = [zi - derivation(ri, DE) for ri, zi in zip(R, Z)]
R = list(R)
n1 = n - a.degree(DE.t)
return (A, B, Qq, R, n1)
def prde_no_cancel_b_large(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
db = b.degree(DE.t)
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, -1, -1): # [n, ..., 0]
for i in range(m):
si = Q[i].nth(N + db)/b.LC()
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if all(qi.is_zero for qi in Q):
dc = -1
M = zeros(0, 2)
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
def prde_no_cancel_b_small(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) small enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1, r)) where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, 0, -1): # [n, ..., 1]
for i in range(m):
si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC())
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if b.degree(DE.t) > 0:
for i in range(m):
si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t)
H[i] = H[i] + si
Q[i] = Q[i] - derivation(si, DE) - b*si
if all(qi.is_zero for qi in Q):
dc = -1
M = Matrix()
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
# else: b is in k, deg(qi) < deg(Dt)
t = DE.t
if DE.case != 'base':
with DecrementLevel(DE):
t0 = DE.t # k = k0(t0)
ba, bd = frac_in(b, t0, field=True)
Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q]
f, B = param_rischDE(ba, bd, Q0, DE)
# f = [f1, ..., fr] in k^r and B is a matrix with
# m + r columns and entries in Const(k) = Const(k0)
# such that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has
# a solution y0 in k with c1, ..., cm in Const(k)
# if and only y0 = Sum(dj*fj, (j, 1, r)) where
# d1, ..., dr ar in Const(k) and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0.
# Transform fractions (fa, fd) in f into constant
# polynomials fa/fd in k[t].
# (Is there a better way?)
f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True)
for fa, fd in f]
else:
# Base case. Dy == 0 for all y in k and b == 0.
# Dy + b*y = Sum(ci*qi) is solvable if and only if
# Sum(ci*qi) == 0 in which case the solutions are
# y = d1*f1 for f1 = 1 and any d1 in Const(k) = k.
f = [Poly(1, t, field=True)] # r = 1
B = Matrix([[qi.TC() for qi in Q] + [S(0)]])
# The condition for solvability is
# B*Matrix([c1, ..., cm, d1]) == 0
# There are no constraints on d1.
# Coefficients of t^j (j > 0) in Sum(ci*qi) must be zero.
d = max([qi.degree(DE.t) for qi in Q])
if d > 0:
M = Matrix(d, m, lambda i, j: Q[j].nth(i + 1))
A, _ = constant_system(M, zeros(d, 1), DE)
else:
# No constraints on the hj.
A = Matrix(0, m, [])
# Solutions of the original equation are
# y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)),
# where ei == ci (i = 1, ..., m), when
# A*Matrix([c1, ..., cm]) == 0 and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0
# Build combined constraint matrix with m + r + m columns.
r = len(f)
I = eye(m)
A = A.row_join(zeros(A.rows, r + m))
B = B.row_join(zeros(B.rows, m))
C = I.row_join(zeros(m, r)).row_join(-I)
return f + H, A.col_join(B).col_join(C)
def prde_cancel_liouvillian(b, Q, n, DE):
"""
Pg, 237.
"""
H = []
# Why use DecrementLevel? Below line answers that:
# Assuming that we can solve such problems over 'k' (not k[t])
if DE.case == 'primitive':
with DecrementLevel(DE):
ba, bd = frac_in(b, DE.t, field=True)
for i in range(n, -1, -1):
if DE.case == 'exp': # this re-checking can be avoided
with DecrementLevel(DE):
ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t,
DE.t, field=True)
with DecrementLevel(DE):
Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q]
fi, Ai = param_rischDE(ba, bd, Qy, DE)
fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True)
for fa, fd in fi]
ri = len(fi)
if i == n:
M = Ai
else:
M = Ai.col_join(M.row_join(zeros(M.rows, ri)))
Fi, hi = [None]*ri, [None]*ri
# from eq. on top of p.238 (unnumbered)
for j in range(ri):
hji = fi[j]*DE.t**i
hi[j] = hji
# building up Sum(djn*(D(fjn*t^n) - b*fjnt^n))
Fi[j] = -(derivation(hji, DE) - b*hji)
H += hi
# in the next loop instead of Q it has
# to be Q + Fi taking its place
Q = Q + Fi
return (H, M)
def param_poly_rischDE(a, b, q, n, DE):
"""Polynomial solutions of a parametric Risch differential equation.
Given a derivation D in k[t], a, b in k[t] relatively prime, and q
= [q1, ..., qm] in k[t]^m, return h = [h1, ..., hr] in k[t]^r and
a matrix A with m + r columns and entries in Const(k) such that
a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has a solution p of degree <= n
in k[t] with c1, ..., cm in Const(k) if and only if p = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
"""
m = len(q)
if n < 0:
# Only the trivial zero solution is possible.
# Find relations between the qi.
if all([qi.is_zero for qi in q]):
return [], zeros(1, m) # No constraints.
N = max([qi.degree(DE.t) for qi in q])
M = Matrix(N + 1, m, lambda i, j: q[j].nth(i))
A, _ = constant_system(M, zeros(M.rows, 1), DE)
return [], A
if a.is_ground:
# Normalization: a = 1.
a = a.LC()
b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q]
if not b.is_zero and (DE.case == 'base' or
b.degree() > max(0, DE.d.degree() - 1)):
return prde_no_cancel_b_large(b, q, n, DE)
elif ((b.is_zero or b.degree() < DE.d.degree() - 1)
and (DE.case == 'base' or DE.d.degree() >= 2)):
return prde_no_cancel_b_small(b, q, n, DE)
elif (DE.d.degree() >= 2 and
b.degree() == DE.d.degree() - 1 and
n > -b.as_poly().LC()/DE.d.as_poly().LC()):
raise NotImplementedError("prde_no_cancel_b_equal() is "
"not yet implemented.")
else:
# Liouvillian cases
if DE.case == 'primitive' or DE.case == 'exp':
return prde_cancel_liouvillian(b, q, n, DE)
else:
raise NotImplementedError("non-linear and hypertangent "
"cases have not yet been implemented")
# else: deg(a) > 0
# Iterate SPDE as long as possible cumulating coefficient
# and terms for the recovery of original solutions.
alpha, beta = 1, [0]*m
while n >= 0: # and a, b relatively prime
a, b, q, r, n = prde_spde(a, b, q, n, DE)
beta = [betai + alpha*ri for betai, ri in zip(beta, r)]
alpha *= a
# Solutions p of a*Dp + b*p = Sum(ci*qi) correspond to
# solutions alpha*p + Sum(ci*betai) of the initial equation.
d = a.gcd(b)
if not d.is_ground:
break
# a*Dp + b*p = Sum(ci*qi) may have a polynomial solution
# only if the sum is divisible by d.
qq, M = poly_linear_constraints(q, d)
# qq = [qq1, ..., qqm] where qqi = qi.quo(d).
# M is a matrix with m columns an entries in k.
# Sum(fi*qi, (i, 1, m)), where f1, ..., fm are elements of k, is
# divisible by d if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the quotient is Sum(fi*qqi).
A, _ = constant_system(M, zeros(M.rows, 1), DE)
# A is a matrix with m columns and entries in Const(k).
# Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero
# for c1, ..., cm in Const(k) if and only if
# A*Matrix([c1, ...,cm]) == 0.
V = A.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi).
# Sum(ci*qi) is divisible by d if and only if ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case, solutions of
# a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi))
# are the same as those of
# (a/d)*Dp + (b/d)*p = Sum(dj*rj)
# where rj = Sum(aji*qqi).
if not V: # No non-trivial solution.
return [], eye(m) # Could return A, but this has
# the minimum number of rows.
Mqq = Matrix([qq]) # A single row.
r = [(Mqq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to
# solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial
# equation. These are equal to alpha*p + Sum(dj*fj) where
# fj = Sum(aji*betai).
Mbeta = Matrix([beta])
f = [(Mbeta*vj)[0] for vj in V] # [f1, ..., fu]
#
# Solve the reduced equation recursively.
#
g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE)
# g = [g1, ..., gv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*gk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation are then
# Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)).
# Collect solution components.
h = f + [alpha*gk for gk in g]
# Build combined relation matrix.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(g)))
A = A.col_join(zeros(B.rows, m).row_join(B))
return h, A
def param_rischDE(fa, fd, G, DE):
"""
Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)).
Given a derivation D in k(t), f in k(t), and G
= [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and
a matrix A with m + r columns and entries in Const(k) such that
Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y
in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
Elements of k(t) are tuples (a, d) with a and d in k[t].
"""
m = len(G)
q, (fa, fd) = weak_normalizer(fa, fd, DE)
# Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi)
# correspond to solutions y = z/q of the original equation.
gamma = q
G = [(q*ga).cancel(gd, include=True) for ga, gd in G]
a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE)
# Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond
# to solutions z = q/hn of the weakly normalized equation.
gamma *= hn
A, B, G, hs = prde_special_denom(a, ba, bd, G, DE)
# Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond
# to solutions q = p/hs of the previous equation.
gamma *= hs
g = A.gcd(B)
a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for
gia, gid in G]
# a*Dp + b*p = Sum(ci*gi) may have a polynomial solution
# only if the sum is in k[t].
q, M = prde_linear_constraints(a, b, g, DE)
# q = [q1, ..., qm] where qi in k[t] is the polynomial component
# of the partial fraction expansion of gi.
# M is a matrix with m columns and entries in k.
# Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k,
# is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the sum is equal to Sum(fi*qi).
M, _ = constant_system(M, zeros(M.rows, 1), DE)
# M is a matrix with m columns and entries in Const(k).
# Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k)
# if and only if M*Matrix([c1, ..., cm]) == 0,
# in which case the sum is Sum(ci*qi).
## Reduce number of constants at this point
V = M.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u).
# Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case,
# Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj)
# where rj = Sum(aji*qi) (j = 1, ..., u) in k[t].
if not V: # No non-trivial solution
return [], eye(m)
Mq = Matrix([q]) # A single row.
r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions
# y = p/gamma of the initial equation with ci = Sum(dj*aji).
try:
# We try n=5. At least for prde_spde, it will always
# terminate no matter what n is.
n = bound_degree(a, b, r, DE, parametric=True)
except NotImplementedError:
# A temporary bound is set. Eventually, it will be removed.
# the currently added test case takes large time
# even with n=5, and much longer with large n's.
n = 5
h, B = param_poly_rischDE(a, b, r, n, DE)
# h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation for ci = Sum(dj*aji)
# (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma.
## Build combined relation matrix with m + u + v columns.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(h)))
A = A.col_join(zeros(B.rows, m).row_join(B))
## Eliminate d1, ..., du.
W = A.nullspace()
# W = [w1, ..., wt] where each wl is a column matrix with
# entries blk (k = 1, ..., m + u + v) in Const(k).
# The vectors (bl1, ..., blm) generate the space of those
# constant families (c1, ..., cm) for which a solution of
# the equation Dy + f*y == Sum(ci*Gi) exists. They generate
# the space and form a basis except possibly when Dy + f*y == 0
# is solvable in k(t}. The corresponding solutions are
# y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u.
v = len(h)
M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's.
N = M.nullspace()
# N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column
# vectors generating the space of linear relations between
# c1, ..., cm, e1, ..., ev.
C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns.
return [hk.cancel(gamma, include=True) for hk in h], C
def limited_integrate_reduce(fa, fd, G, DE):
"""
Simpler version of step 1 & 2 for the limited integration problem.
Given a derivation D on k(t) and f, g1, ..., gn in k(t), return
(a, b, h, N, g, V) such that a, b, h in k[t], N is a non-negative integer,
g in k(t), V == [v1, ..., vm] in k(t)^m, and for any solution v in k(t),
c1, ..., cm in C of f == Dv + Sum(ci*wi, (i, 1, m)), p = v*h is in k<t>, and
p and the ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore,
if S1irr == Sirr, then p is in k[t], and if t is nonlinear or Liouvillian
over k, then deg(p) <= N.
So that the special part is always computed, this function calls the more
general prde_special_denom() automatically if it cannot determine that
S1irr == Sirr. Furthermore, it will automatically call bound_degree() when
t is linear and non-Liouvillian, which for the transcendental case, implies
that Dt == a*t + b with for some a, b in k*.
"""
dn, ds = splitfactor(fd, DE)
E = [splitfactor(gd, DE) for _, gd in G]
En, Es = list(zip(*E))
c = reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm)
hn = c.gcd(c.diff(DE.t))
a = hn
b = -derivation(hn, DE)
N = 0
# These are the cases where we know that S1irr = Sirr, but there could be
# others, and this algorithm will need to be extended to handle them.
if DE.case in ['base', 'primitive', 'exp', 'tan']:
hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm)
a = hn*hs
b -= (hn*derivation(hs, DE)).quo(hs)
mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for
ga, gd in G]))
# So far, all the above are also nonlinear or Liouvillian, but if this
# changes, then this will need to be updated to call bound_degree()
# as per the docstring of this function (DE.case == 'other_linear').
N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu)
else:
# TODO: implement this
raise NotImplementedError
V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G]
return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V)
def limited_integrate(fa, fd, G, DE):
"""
Solves the limited integration problem: f = Dv + Sum(ci*wi, (i, 1, n))
"""
fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic()
# interpretting limited integration problem as a
# parametric Risch DE problem
Fa = Poly(0, DE.t)
Fd = Poly(1, DE.t)
G = [(fa, fd)] + G
h, A = param_rischDE(Fa, Fd, G, DE)
V = A.nullspace()
V = [v for v in V if v[0] != 0]
if not V:
return None
else:
# we can take any vector from V, we take V[0]
c0 = V[0][0]
# v = [-1, c1, ..., cm, d1, ..., dr]
v = V[0]/(-c0)
r = len(h)
m = len(v) - r - 1
C = list(v[1: m + 1])
y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \
for i in range(r)])
y_num, y_den = y.as_numer_denom()
Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t)
Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic()
return Y, C
def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None):
"""
Parametric logarithmic derivative heuristic.
Given a derivation D on k[t], f in k(t), and a hyperexponential monomial
theta over k(t), raises either NotImplementedError, in which case the
heuristic failed, or returns None, in which case it has proven that no
solution exists, or returns a solution (n, m, v) of the equation
n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and n, m in ZZ with n != 0.
If this heuristic fails, the structure theorem approach will need to be
used.
The argument w == Dtheta/theta
"""
# TODO: finish writing this and write tests
c1 = c1 or Dummy('c1')
p, a = fa.div(fd)
q, b = wa.div(wd)
B = max(0, derivation(DE.t, DE).degree(DE.t) - 1)
C = max(p.degree(DE.t), q.degree(DE.t))
if q.degree(DE.t) > B:
eqs = [p.nth(i) - c1*q.nth(i) for i in range(B + 1, C + 1)]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) > B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N*fa*wd - M*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE,
'auto')
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
if p.degree(DE.t) > B:
return None
c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC())
l = fd.monic().lcm(wd.monic())*Poly(c, DE.t)
ln, ls = splitfactor(l, DE)
z = ls*ln.gcd(ln.diff(DE.t))
if not z.has(DE.t):
# TODO: We treat this as 'no solution', until the structure
# theorem version of parametric_log_deriv is implemented.
return None
u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z)
u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z)
eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) <= B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE)
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
def is_deriv_k(fa, fd, DE):
r"""
Checks if Df/f is the derivative of an element of k(t).
a in k(t) is the derivative of an element of k(t) if there exists b in k(t)
such that a = Db. Either returns (ans, u), such that Df/f == Du, or None,
which means that Df/f is not the derivative of an element of k(t). ans is
a list of tuples such that Add(*[i*j for i, j in ans]) == u. This is useful
for seeing exactly which elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df/f is the derivative of a element of K if and only if there are ri
in QQ such that::
--- --- Dt
\ r * Dt + \ r * i Df
/ i i / i --- = --.
--- --- t f
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). E_args are the arguments of the
hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] ==
exp(E_args[i])). This is needed to compute the final answer u such that
Df/f == Du.
log(f) will be the same as u up to a additive constant. This is because
they will both behave the same as monomials. For example, both log(x) and
log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant.
Therefore, the term const is returned. const is such that
log(const) + f == u. This is calculated by dividing the arguments of one
logarithm from the other. Therefore, it is necessary to pass the arguments
of the logarithmic terms in L_args.
To handle the case where we are given Df/f, not f, use is_deriv_k_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical
"""
# Compute Df/f
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa
dfa, dfd = dfa.cancel(dfd, include=True)
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
terms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Add(*[Mul(i, j) for i, j in ans])
argterms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
l = []
ld = []
for i, j in zip(argterms, u):
# We need to get around things like sqrt(x**2) != x
# and also sqrt(x**2 + 2*x + 1) != x + 1
# Issue 10798: i need not be a polynomial
i, d = i.as_numer_denom()
icoeff, iterms = sqf_list(i)
l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms])))
dcoeff, dterms = sqf_list(d)
ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms])))
const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld))
return (ans, result, const)
def is_log_deriv_k_t_radical(fa, fd, DE, Df=True):
r"""
Checks if Df is the logarithmic derivative of a k(t)-radical.
b in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u.
Either returns (ans, u, n, const) or None, which means that Df cannot be
written as the logarithmic derivative of a k(t)-radical. ans is a list of
tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for
seeing exactly what elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df is the logarithmic derivative of a K-radical if and only if there
are ri in QQ such that::
--- --- Dt
\ r * Dt + \ r * i
/ i i / i --- = Df.
--- --- t
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). L_args are the arguments of the logarithms
indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is
needed to compute the final answer u such that n*f == Du/u.
exp(f) will be the same as u up to a multiplicative constant. This is
because they will both behave the same as monomials. For example, both
exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const
is returned. const is such that exp(const)*f == u. This is calculated by
subtracting the arguments of one exponential from the other. Therefore, it
is necessary to pass the arguments of the exponential terms in E_args.
To handle the case where we are given Df, not f, use
is_log_deriv_k_t_radical_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_deriv_k
"""
H = []
if Df:
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2,
include=True)
else:
dfa, dfd = fa, fd
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
# TODO: But maybe we can tell if they're not rational, like
# log(2)/log(3). Also, there should be an option to continue
# anyway, even if the result might potentially be wrong.
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
n = reduce(ilcm, [i.as_numer_denom()[1] for i in u])
u *= n
terms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Mul(*[Pow(i, j) for i, j in ans])
# exp(f) will be the same as result up to a multiplicative
# constant. We now find the log of that constant.
argterms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
const = cancel(fa.as_expr()/fd.as_expr() -
Add(*[Mul(i, j/n) for i, j in zip(argterms, u)]))
return (ans, result, n, const)
def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None):
"""
Checks if f can be written as the logarithmic derivative of a k(t)-radical.
It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False)
for any given fa, fd, DE in that it finds the solution in the
given field not in some (possibly unspecified extension) and
"in_field" with the function name is used to indicate that.
f in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u.
Either returns (n, u) or None, which means that f cannot be written as the
logarithmic derivative of a k(t)-radical.
case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive,
hyperexponential, and hypertangent cases, respectively. If case is 'auto',
it will attempt to determine the type of the derivation automatically.
See also
========
is_log_deriv_k_t_radical, is_deriv_k
"""
fa, fd = fa.cancel(fd, include=True)
# f must be simple
n, s = splitfactor(fd, DE)
if not s.is_one:
pass
z = z or Dummy('z')
H, b = residue_reduce(fa, fd, DE, z=z)
if not b:
# I will have to verify, but I believe that the answer should be
# None in this case. This should never happen for the
# functions given when solving the parametric logarithmic
# derivative problem when integration elementary functions (see
# Bronstein's book, page 255), so most likely this indicates a bug.
return None
roots = [(i, i.real_roots()) for i, _ in H]
if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for
i, j in roots):
# If f is the logarithmic derivative of a k(t)-radical, then all the
# roots of the resultant must be rational numbers.
return None
# [(a, i), ...], where i*log(a) is a term in the log-part of the integral
# of f
respolys, residues = list(zip(*roots)) or [[], []]
# Note: this might be empty, but everything below should work find in that
# case (it should be the same as if it were [[1, 1]])
residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for
i in residues[j]]
# TODO: finish writing this and write tests
p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z))
p = p.as_poly(DE.t)
if p is None:
# f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical
return None
if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)):
return None
if case == 'auto':
case = DE.case
if case == 'exp':
wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True)
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t, cancel=True)
wa, wd = frac_in((wa, wd), DE.t)
A = parametric_log_deriv(pa, pd, wa, wd, DE)
if A is None:
return None
n, e, u = A
u *= DE.t**e
elif case == 'primitive':
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t)
A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto')
if A is None:
return None
n, u = A
elif case == 'base':
# TODO: we can use more efficient residue reduction from ratint()
if not fd.is_sqf or fa.degree() >= fd.degree():
# f is the logarithmic derivative in the base case if and only if
# f = fa/fd, fd is square-free, deg(fa) < deg(fd), and
# gcd(fa, fd) == 1. The last condition is handled by cancel() above.
return None
# Note: if residueterms = [], returns (1, 1)
# f had better be 0 in that case.
n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S(1))
u = Mul(*[Pow(i, j*n) for i, j in residueterms])
return (n, u)
elif case == 'tan':
raise NotImplementedError("The hypertangent case is "
"not yet implemented for is_log_deriv_k_t_radical_in_field()")
elif case in ['other_linear', 'other_nonlinear']:
# XXX: If these are supported by the structure theorems, change to NotImplementedError.
raise ValueError("The %s case is not supported in this function." % case)
else:
raise ValueError("case must be one of {'primitive', 'exp', 'tan', "
"'base', 'auto'}, not %s" % case)
common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j in
residueterms]] + [n], S(1))
residueterms = [(i, j*common_denom) for i, j in residueterms]
m = common_denom//n
if common_denom != n*m: # Verify exact division
raise ValueError("Inexact division")
u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms]))
return (common_denom, u)
| 40.123922 | 110 | 0.561222 |
7ce07b8311ef90c93682c15fc681abf9e95c0bb7 | 1,076 | py | Python | ssh_telnet/netmiko/ex07_netmiko_command_mult_prompts.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | 11 | 2021-04-05T09:30:23.000Z | 2022-03-09T13:27:56.000Z | ssh_telnet/netmiko/ex07_netmiko_command_mult_prompts.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | null | null | null | ssh_telnet/netmiko/ex07_netmiko_command_mult_prompts.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | 11 | 2021-04-06T03:44:35.000Z | 2022-03-04T21:20:40.000Z | from pprint import pprint
import yaml
import netmiko
import paramiko
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
r1 = devices[0]
out = send_cmd_with_prompt(
r1, "copy run start", wait_for="Destination filename", confirmation="\n"
)
print(out)
"""
R1#copy run start
Destination filename [startup-config]?
Building configuration...
[OK]
R1#
"""
| 25.619048 | 84 | 0.616171 |
7ce146b894402021fe89e46e79f310a76ff9ef08 | 2,479 | py | Python | LightTestLoop.py | Growing-Beyond-Earth/GettingStarted | 04c2fd5fa36224ac25a6c6c62c4d6e558b27e700 | [
"Apache-2.0"
] | null | null | null | LightTestLoop.py | Growing-Beyond-Earth/GettingStarted | 04c2fd5fa36224ac25a6c6c62c4d6e558b27e700 | [
"Apache-2.0"
] | null | null | null | LightTestLoop.py | Growing-Beyond-Earth/GettingStarted | 04c2fd5fa36224ac25a6c6c62c4d6e558b27e700 | [
"Apache-2.0"
] | null | null | null | # GROWNG BEYOND EARTH CONTROL BOX Traning
# RASPBERRY PI PICO / MICROPYTHON
# FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18, 2021
# The Growing Beyond Earth (GBE) control box is a device that controls
# the LED lights and fan in a GBE growth chamber. It can also control
# accessories including a 12v water pump and environmental sensors.
# The device is based on a Raspberry Pi Pico microcontroller running
# Micropython.
# lesson Written by @MarioTheMaker
from sys import stdin, stdout, exit
import machine
import time
#Set the brightness for each color
red_brightness = 100
green_brightness = 100
blue_brightness = 100
white_brightness = 100
# Pulse width modulation (PWM) is a way to get an artificial analog output on a digital pin.
# It achieves this by rapidly toggling the pin from low to high. There are two parameters
# associated with this: the frequency of the toggling, and the duty cycle.
# The duty cycle is defined to be how long the pin is high compared with the length of a
# single period (low plus high time). Maximum duty cycle is when the pin is high all of the
# time, and minimum is when it is low all of the time.
# https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#:
# control I/O pins
# machine.Pin(id, mode=- 1, pull=- 1, *, value, drive, alt)
# Access the pin peripheral (GPIO pin) associated with the given id.
# If additional arguments are given in the constructor then they are used to initialise
# the pin. Any settings that are not specified will remain in their previous state.
# More info https://docs.micropython.org/en/latest/library/machine.Pin.html
r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel
g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel
b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel
w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel
# More info https://docs.micropython.org/en/latest/library/machine.PWM.html
# Start a loop and change the brightness multiplier "n"
# PWM.duty_u16([value]) Get the current duty cycle of the PWM output,
# as an unsigned 16-bit value in the range 0 to 65535 inclusive.
n = 100
while n > 0:
print("Power Level ",n)
r.duty_u16(int(red_brightness)*n)
g.duty_u16(int(green_brightness)*n)
b.duty_u16(int(blue_brightness)*n)
w.duty_u16(int(white_brightness)*n)
time.sleep(.3)
n = n - 5
#Turn all the lights off
time.sleep(3)
r.duty_u16(0)
g.duty_u16(0)
b.duty_u16(0)
w.duty_u16(0)
| 38.138462 | 92 | 0.745058 |
7ce15c82ddc26277baddffb09d13b58c226ab5d6 | 3,409 | py | Python | core/known_bugs_utils.py | nicolasbock/hotsos | 6a0d650a8d76b5a5f85f4ddc8c0a9f8939e1de7a | [
"Apache-2.0"
] | null | null | null | core/known_bugs_utils.py | nicolasbock/hotsos | 6a0d650a8d76b5a5f85f4ddc8c0a9f8939e1de7a | [
"Apache-2.0"
] | null | null | null | core/known_bugs_utils.py | nicolasbock/hotsos | 6a0d650a8d76b5a5f85f4ddc8c0a9f8939e1de7a | [
"Apache-2.0"
] | null | null | null | import os
import yaml
from core import plugintools
from core import constants
from core.searchtools import SearchDef
from core.issues.issue_utils import IssueEntry
LAUNCHPAD = "launchpad"
MASTER_YAML_KNOWN_BUGS_KEY = "bugs-detected"
KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []}
def _get_known_bugs():
"""
Fetch the current plugin known_bugs.yaml if it exists and return its
contents or None if it doesn't exist yet.
"""
if not os.path.isdir(constants.PLUGIN_TMP_DIR):
raise Exception("plugin tmp dir '{}' not found".
format(constants.PLUGIN_TMP_DIR))
known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, "known_bugs.yaml")
if not os.path.exists(known_bugs_yaml):
return {}
bugs = yaml.safe_load(open(known_bugs_yaml))
if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY):
return bugs
return {}
def add_known_bug(bug_id, description=None, type=LAUNCHPAD):
"""
Fetch the current plugin known_bugs.yaml if it exists and add new bug with
description of the bug.
"""
if not os.path.isdir(constants.PLUGIN_TMP_DIR):
raise Exception("plugin tmp dir '{}' not found".
format(constants.PLUGIN_TMP_DIR))
if type == LAUNCHPAD:
new_bug = "https://bugs.launchpad.net/bugs/{}".format(bug_id)
if description is None:
description = "no description provided"
entry = IssueEntry(new_bug, description, key="id")
current = _get_known_bugs()
if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY):
current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data)
else:
current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]}
known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, "known_bugs.yaml")
with open(known_bugs_yaml, 'w') as fd:
fd.write(yaml.dump(current))
def add_known_bugs_to_master_plugin():
"""
Fetch the current plugin known_bugs.yaml and add it to the master yaml.
Note that this can only be called once per plugin and is typically
performed as a final part after all others have executed.
"""
bugs = _get_known_bugs()
if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY):
plugintools.save_part(bugs, priority=99)
| 33.097087 | 79 | 0.680845 |
7ce16cb7ee2c1e090289468a70fd88401aba8ddc | 339 | py | Python | examples/xml-rpc/echoserver.py | keobox/yap101 | 26913da9f61ef3d0d9cb3ef54bbfc451a9ef9de9 | [
"MIT"
] | null | null | null | examples/xml-rpc/echoserver.py | keobox/yap101 | 26913da9f61ef3d0d9cb3ef54bbfc451a9ef9de9 | [
"MIT"
] | null | null | null | examples/xml-rpc/echoserver.py | keobox/yap101 | 26913da9f61ef3d0d9cb3ef54bbfc451a9ef9de9 | [
"MIT"
] | null | null | null | import SimpleXMLRPCServer as xmls
server = echoserver(('127.0.0.1', 8001))
server.register_function(echo, 'echo')
print 'Listening on port 8001'
try:
server.serve_forever()
except:
server.server_close()
| 19.941176 | 42 | 0.728614 |
7ce1993cbdc65a6d053c9478a3f9b9475d29bb5c | 7,083 | py | Python | tf_pose/slim/nets/mobilenet/mobilenet_v2_test.py | gpspelle/pose-estimation | b817dcc120092002984d8a41431046f323bc02c8 | [
"Apache-2.0"
] | 862 | 2019-12-11T18:40:48.000Z | 2022-03-29T15:23:58.000Z | tf_pose/slim/nets/mobilenet/mobilenet_v2_test.py | bvanelli/tf-pose-estimation | 1dec506ac8abf00616dc0fe76bf476ccdfd6b93e | [
"Apache-2.0"
] | 72 | 2019-05-07T18:33:32.000Z | 2022-03-10T07:48:39.000Z | tf_pose/slim/nets/mobilenet/mobilenet_v2_test.py | bvanelli/tf-pose-estimation | 1dec506ac8abf00616dc0fe76bf476ccdfd6b93e | [
"Apache-2.0"
] | 165 | 2019-12-11T20:04:22.000Z | 2022-03-29T06:18:12.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
slim = tf.contrib.slim
def find_ops(optype):
"""Find ops of a given type in graphdef or a graph.
Args:
optype: operation type (e.g. Conv2D)
Returns:
List of operations.
"""
gd = tf.get_default_graph()
return [var for var in gd.get_operations() if var.type == optype]
if __name__ == '__main__':
tf.test.main()
| 37.278947 | 80 | 0.674432 |
7ce2ce6e7522a59e86a553aeb0f5ee90bd00e269 | 1,402 | py | Python | firebase-gist.py | darwin/firebase-gist | 5aa4eb89e82fbf2971d7afca07471e1f51ff6e51 | [
"MIT"
] | 1 | 2017-08-15T15:37:21.000Z | 2017-08-15T15:37:21.000Z | firebase-gist.py | darwin/firebase-gist | 5aa4eb89e82fbf2971d7afca07471e1f51ff6e51 | [
"MIT"
] | null | null | null | firebase-gist.py | darwin/firebase-gist | 5aa4eb89e82fbf2971d7afca07471e1f51ff6e51 | [
"MIT"
] | null | null | null | from firebase import firebase
import os
import datetime
import json
import logging
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from github3 import login
firebase_url = os.environ['FIREBASE_DB']
firebase_secret = os.environ['FIREBASE_SECRET']
firebase_path = os.environ['FIREBASE_PATH']
firebase_username = os.environ['FIREBASE_USERNAME'] # not checked ATM
gh_token = os.environ['GH_TOKEN']
gh_gist = os.environ['GH_GIST']
gh_fname = os.environ['GH_FNAME']
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info('==================================')
logger.info('Fetching firebase data')
f = connect_firebase()
data = f.get(firebase_path, None)
new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True)
logger.info('Reading existing gist')
gh = login(token=gh_token)
gist = gh.gist(gh_gist)
old_content = ""
for f in gist.iter_files():
if f.filename == gh_fname:
old_content = f.content
break
if old_content == new_content:
logger.info('No changes detected')
else:
logger.info('Updating gist with new content')
gist.edit(files={
gh_fname: {
"content": new_content
}
})
logger.info('Done.') | 25.962963 | 100 | 0.738231 |
7ce32e38118a236e7f22400e28b670e7f2079e82 | 869 | py | Python | practice/2008/qualification/C-Fly_swatter/c.py | victorWeiFreelancer/CodeJam | edb8f921860a35985823cb3dbd3ebec8a8f3c12f | [
"MIT"
] | null | null | null | practice/2008/qualification/C-Fly_swatter/c.py | victorWeiFreelancer/CodeJam | edb8f921860a35985823cb3dbd3ebec8a8f3c12f | [
"MIT"
] | null | null | null | practice/2008/qualification/C-Fly_swatter/c.py | victorWeiFreelancer/CodeJam | edb8f921860a35985823cb3dbd3ebec8a8f3c12f | [
"MIT"
] | null | null | null | import sys
sys.dont_write_bytecode = True
if __name__ == '__main__':
main() | 25.558824 | 69 | 0.537399 |
7ce34380af3cdc654ec22dc00486fd1079b00edb | 25,614 | py | Python | synapse/notifier.py | rkfg/synapse | 0b3112123da5fae4964db784e3bab0c4d83d9d62 | [
"Apache-2.0"
] | 1 | 2021-09-09T08:50:13.000Z | 2021-09-09T08:50:13.000Z | synapse/notifier.py | rkfg/synapse | 0b3112123da5fae4964db784e3bab0c4d83d9d62 | [
"Apache-2.0"
] | null | null | null | synapse/notifier.py | rkfg/synapse | 0b3112123da5fae4964db784e3bab0c4d83d9d62 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
from typing import (
Awaitable,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
import attr
from prometheus_client import Counter
from twisted.internet import defer
import synapse.server
from synapse.api.constants import EventTypes, HistoryVisibility, Membership
from synapse.api.errors import AuthError
from synapse.events import EventBase
from synapse.handlers.presence import format_user_presence_state
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import log_kv, start_active_span
from synapse.logging.utils import log_function
from synapse.metrics import LaterGauge
from synapse.streams.config import PaginationConfig
from synapse.types import (
Collection,
PersistedEventPosition,
RoomStreamToken,
StreamToken,
UserID,
)
from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
from synapse.util.metrics import Measure
from synapse.visibility import filter_events_for_client
logger = logging.getLogger(__name__)
notified_events_counter = Counter("synapse_notifier_notified_events", "")
users_woken_by_stream_counter = Counter(
"synapse_notifier_users_woken_by_stream", "", ["stream"]
)
T = TypeVar("T")
# TODO(paul): Should be shared somewhere
def count(func: Callable[[T], bool], it: Iterable[T]) -> int:
"""Return the number of items in it for which func returns true."""
n = 0
for x in it:
if func(x):
n += 1
return n
def notify_replication(self) -> None:
"""Notify the any replication listeners that there's a new event"""
for cb in self.replication_callbacks:
cb()
def notify_remote_server_up(self, server: str):
"""Notify any replication that a remote server has come back up"""
# We call federation_sender directly rather than registering as a
# callback as a) we already have a reference to it and b) it introduces
# circular dependencies.
if self.federation_sender:
self.federation_sender.wake_destination(server)
| 36.025316 | 87 | 0.605216 |
7ce35b3e99fb727d7c2e93f64173851978f39e8c | 30,955 | py | Python | saleor/checkout/tests/test_base_calculations.py | nestfiy/saleor | 6fce3bc5c0ca72ac28db99553e6d2b49249c6dac | [
"CC-BY-4.0"
] | null | null | null | saleor/checkout/tests/test_base_calculations.py | nestfiy/saleor | 6fce3bc5c0ca72ac28db99553e6d2b49249c6dac | [
"CC-BY-4.0"
] | 76 | 2021-11-01T04:53:42.000Z | 2022-03-28T04:51:25.000Z | saleor/checkout/tests/test_base_calculations.py | nestfiy/saleor | 6fce3bc5c0ca72ac28db99553e6d2b49249c6dac | [
"CC-BY-4.0"
] | null | null | null | from decimal import Decimal
from prices import Money, TaxedMoney
from ...discount import DiscountValueType, VoucherType
from ...discount.utils import get_product_discount_on_sale
from ..base_calculations import (
base_checkout_total,
base_tax_rate,
calculate_base_line_total_price,
calculate_base_line_unit_price,
)
from ..fetch import fetch_checkout_lines
| 37.475787 | 88 | 0.772767 |
7ce3c5ddd55aea55c48d0f942c54f7645b346e45 | 24,690 | py | Python | tests/test_date.py | andy-z/ged4py | 2270bd8366174dcc98424cc6671bdaecf770fda0 | [
"MIT"
] | 10 | 2017-07-25T22:39:34.000Z | 2022-03-01T04:40:38.000Z | tests/test_date.py | andy-z/ged4py | 2270bd8366174dcc98424cc6671bdaecf770fda0 | [
"MIT"
] | 20 | 2018-03-25T10:25:40.000Z | 2021-05-02T20:38:48.000Z | tests/test_date.py | andy-z/ged4py | 2270bd8366174dcc98424cc6671bdaecf770fda0 | [
"MIT"
] | 6 | 2018-04-29T12:45:34.000Z | 2021-09-14T14:30:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ged4py.date` module."""
import unittest
from ged4py.calendar import (
CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate,
CalendarDateVisitor
)
from ged4py.date import (
DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated,
DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod,
DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes,
DateValueVisitor
)
| 41.356784 | 108 | 0.637708 |
7ce3f2fa9ab64a2e056aae0886c6829b2b5285e6 | 7,722 | py | Python | src/quart/local.py | Dunkledore/quart | 803c8678b083895f4ece35fccb6aca56e189ee0a | [
"MIT"
] | 3 | 2020-03-31T10:36:31.000Z | 2020-04-23T12:01:10.000Z | venv/lib/python3.9/site-packages/quart/local.py | ryanwwest/kademlia | e1e5b84db0a7710cf372663325041850802d55f1 | [
"MIT"
] | 6 | 2020-09-05T01:40:23.000Z | 2022-03-12T00:40:58.000Z | src/quart/local.py | ccns1/ccns11 | d6edfac34fbee06fe974cda007d24a088d31ad30 | [
"MIT"
] | 1 | 2020-09-05T00:19:03.000Z | 2020-09-05T00:19:03.000Z | from __future__ import annotations
import asyncio
import copy
from contextvars import ContextVar # noqa # contextvars not understood as stdlib
from typing import Any # noqa # contextvars not understood as stdlib
from typing import Callable, Dict, Optional
| 41.740541 | 95 | 0.630536 |
7ce4682d4472c3403cd709b201e4107d5de073fb | 20,891 | py | Python | pytorch3dunet/unet3d/predictor.py | searobbersduck/pytorch-3dunet | 5bb8ed2b6966b2cd06b1dc676b62d1ad98329305 | [
"MIT"
] | null | null | null | pytorch3dunet/unet3d/predictor.py | searobbersduck/pytorch-3dunet | 5bb8ed2b6966b2cd06b1dc676b62d1ad98329305 | [
"MIT"
] | null | null | null | pytorch3dunet/unet3d/predictor.py | searobbersduck/pytorch-3dunet | 5bb8ed2b6966b2cd06b1dc676b62d1ad98329305 | [
"MIT"
] | null | null | null | import time
import h5py
import hdbscan
import numpy as np
import torch
from sklearn.cluster import MeanShift
from pytorch3dunet.datasets.hdf5 import SliceBuilder
from pytorch3dunet.unet3d.utils import get_logger
from pytorch3dunet.unet3d.utils import unpad
logger = get_logger('UNet3DPredictor')
class StandardPredictor(_AbstractPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predictions from the network are kept in memory. If the results from the network don't fit in into RAM
use `LazyPredictor` instead.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
class LazyPredictor(StandardPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor
is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
class EmbeddingsPredictor(_AbstractPredictor):
"""
Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format.
The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings
with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together.
"""
def _embeddings_to_segmentation(self, embeddings):
"""
Cluster embeddings vectors with HDBSCAN and return the segmented volume.
Args:
embeddings (ndarray): 4D (CDHW) embeddings tensor
Returns:
3D (DHW) segmentation
"""
# shape of the output segmentation
output_shape = embeddings.shape[1:]
# reshape (C, D, H, W) -> (C, D * H * W) and transpose -> (D * H * W, C)
flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose()
logger.info('Clustering embeddings...')
# perform clustering and reshape in order to get the segmentation volume
start = time.time()
clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape)
logger.info(
f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.')
return clusters
def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array):
"""
Given the `segmentation` patch, its `index` in the `output_segmentation` array and the array visited voxels
merge the segmented patch (`segmentation`) into the `output_segmentation`
Args:
segmentation (ndarray): segmented patch
index (tuple): position of the patch inside `output_segmentation` volume
output_segmentation (ndarray): current state of the output segmentation
visited_voxels_array (ndarray): array of voxels visited so far (same size as `output_segmentation`); visited
voxels will be marked by a number greater than 0
"""
index = tuple(index)
# get new unassigned label
max_label = np.max(output_segmentation) + 1
# make sure there are no clashes between current segmentation patch and the output_segmentation
# but keep the noise label
noise_mask = segmentation == self.noise_label
segmentation += int(max_label)
segmentation[noise_mask] = self.noise_label
# get the overlap mask in the current patch
overlap_mask = visited_voxels_array[index] > 0
# get the new labels inside the overlap_mask
new_labels = np.unique(segmentation[overlap_mask])
merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation)
# relabel new segmentation with the merged labels
for current_label, new_label in merged_labels:
segmentation[segmentation == new_label] = current_label
# update the output_segmentation
output_segmentation[index] = segmentation
# visit the patch
visited_voxels_array[index] += 1
| 49.387707 | 134 | 0.631372 |
7ce4ea0979a0d8bcdfade749e59f8ad94da264f2 | 3,487 | py | Python | var/spack/repos/builtin/packages/visionary-dev-tools/package.py | electronicvisions/spack | d6121eb35b4948f7d8aef7ec7a305a5123a7439e | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-10T13:47:48.000Z | 2019-04-17T13:05:17.000Z | var/spack/repos/builtin/packages/visionary-dev-tools/package.py | einc-eu/spack | 15468b92ed21d970c0111ae19144e85e66746433 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-05-28T06:39:59.000Z | 2022-03-30T15:12:35.000Z | var/spack/repos/builtin/packages/visionary-dev-tools/package.py | einc-eu/spack | 15468b92ed21d970c0111ae19144e85e66746433 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-04-06T09:04:11.000Z | 2020-01-24T12:52:12.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path as osp
| 33.854369 | 124 | 0.677086 |
7ce65373a75b86fe5dcecdc0e2146bc5ea3033e1 | 3,593 | py | Python | extra/convertBAMtoPILFER.py | MartaLoBalastegui/XICRA | 74a7e74379c7e1b3fc1360d2c609994e884ee37a | [
"MIT"
] | 3 | 2021-05-16T21:13:22.000Z | 2022-01-23T08:47:48.000Z | extra/convertBAMtoPILFER.py | MartaLoBalastegui/XICRA | 74a7e74379c7e1b3fc1360d2c609994e884ee37a | [
"MIT"
] | 16 | 2021-03-11T10:51:25.000Z | 2022-03-12T01:02:00.000Z | extra/convertBAMtoPILFER.py | MartaLoBalastegui/XICRA | 74a7e74379c7e1b3fc1360d2c609994e884ee37a | [
"MIT"
] | 3 | 2021-03-05T10:07:38.000Z | 2022-01-23T08:48:06.000Z | #usr/bin/env python
## useful imports
import time
import io
import os
import re
import sys
from sys import argv
import subprocess
## ARGV
if len (sys.argv) < 5:
print ("\nUsage:")
print ("python3 %s bam_file folder bedtools_bin samtools_bin logfile\n" %os.path.realpath(__file__))
exit()
bam_file = os.path.abspath(argv[1])
folder = argv[2]
bedtools_exe = argv[3]
samtools_exe = argv[4]
logFile = argv[5]
# start
output_file = open(logFile, 'a')
output_file.write("\nConvert BAM to Pilfer Input file:\n")
## Variables
dirname_name = os.path.dirname(bam_file)
split_name = os.path.splitext( os.path.basename(bam_file) )
bed_file = folder + '/' + split_name[0] + '.bed'
sam_file = folder + '/' + split_name[0] + '.sam'
pilfer_tmp = folder + '/' + split_name[0] + '.tmp.pilfer.bed'
pilfer_file = folder + '/' + split_name[0] + '.pilfer.bed'
## START
print ("\n+ Converting BAM file into PILFER input file")
## generate bed file with bedtools bamtobed -i bam_file
if (os.path.isfile(bed_file)):
print ("\t+ File %s already exists" %bed_file)
else:
cmd_bedtools = "%s bamtobed -i %s > %s" %(bedtools_exe, bam_file, bed_file)
output_file.write(cmd_bedtools)
output_file.write("\n")
try:
subprocess.check_output(cmd_bedtools, shell = True)
except Exception as exc:
print ('***ERROR:')
print (cmd_bedtools)
print('bedtools command generated an exception: %s' %exc)
exit()
## generate samtools
if (os.path.isfile(sam_file)):
print ("\t+ File %s already exists" %sam_file)
else:
cmd_samtools = "%s view %s > %s" %(samtools_exe, bam_file, sam_file)
output_file.write(cmd_samtools)
output_file.write("\n")
try:
subprocess.check_output(cmd_samtools, shell = True)
except Exception as exc:
print ('***ERROR:')
print (cmd_samtools)
print('samtools view command generated an exception: %s' %exc)
exit()
## generate paste filter tmp file
if (os.path.isfile(pilfer_tmp)):
print ("\t+ File %s already exists" %pilfer_tmp)
else:
## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v "OFS=\t" '{print $1, $2, $3, $16, $6}'
cmd_paste = "paste %s %s | awk -v \"OFS=\t\" \'{print $1, $2, $3, $16, $6}\' > %s" %(bed_file, sam_file, pilfer_tmp)
output_file.write(cmd_paste)
output_file.write("\n")
try:
subprocess.check_output(cmd_paste, shell = True)
except Exception as exc:
print ('***ERROR:')
print (cmd_paste)
print('paste bed sam command generated an exception: %s' %exc)
exit()
## parse pilfer tmp file
counter = 1
previous_line = ()
# Open file OUT
output_file = open(pilfer_file, 'w')
# Open file IN
fileHandler = open (pilfer_tmp, "r")
while True:
# Get next line from file
line = fileHandler.readline().strip()
# If line is empty then end of file reached
if not line :
break;
seq = line.split('\t')[3]
real_seq = seq.split('::PU')
seq_len = len(str(real_seq[0]))
## Discard smaller
if (previous_line):
if (previous_line == line):
line = previous_line
counter += 1
else:
line_split = previous_line.split('\t')
output_file.write('%s\t%s\t%s\t%s::PI\t%s\t%s\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4]))
#counter += 1
while True:
#get next line
next_line = fileHandler.readline().strip()
if (next_line == line):
counter += 1
else:
line_split = line.split('\t')
output_file.write('%s\t%s\t%s\t%s::PI\t%s\t%s\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4]))
previous_line = next_line
counter = 1
break;
## close and finish
fileHandler.close()
output_file.close()
| 27.219697 | 138 | 0.680768 |
7ce786042f57d81a5644876d6657fee00934ca96 | 154 | py | Python | day7/main5list.py | nikhilsamninan/python-files | 15198459081097058a939b40b5e8ef754e578fe0 | [
"Apache-2.0"
] | null | null | null | day7/main5list.py | nikhilsamninan/python-files | 15198459081097058a939b40b5e8ef754e578fe0 | [
"Apache-2.0"
] | null | null | null | day7/main5list.py | nikhilsamninan/python-files | 15198459081097058a939b40b5e8ef754e578fe0 | [
"Apache-2.0"
] | null | null | null | a="Betty Bought a butter the butter was bitter so betty bought a better butter which was not bitter"
v=[a[-1] for a in a.split() if(len(a)%2==0)]
print(v) | 51.333333 | 100 | 0.707792 |
7ce89a8d2a94f66d0921f4dfd7dff6f5d544c025 | 2,727 | py | Python | app/reader.py | lcarnevale/proxy-mqtt2influx | 89b3cd354b465d7451556a2d2ec49ac8688b4f17 | [
"MIT"
] | null | null | null | app/reader.py | lcarnevale/proxy-mqtt2influx | 89b3cd354b465d7451556a2d2ec49ac8688b4f17 | [
"MIT"
] | null | null | null | app/reader.py | lcarnevale/proxy-mqtt2influx | 89b3cd354b465d7451556a2d2ec49ac8688b4f17 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
"""Writer class based on InfluxDB
This implementation does its best to follow the Robert Martin's Clean code guidelines.
The comments follows the Google Python Style Guide:
https://github.com/google/styleguide/blob/gh-pages/pyguide.md
"""
__copyright__ = 'Copyright 2021, FCRlab at University of Messina'
__author__ = 'Lorenzo Carnevale <[email protected]>'
__credits__ = ''
__description__ = 'Writer class based on InfluxDB'
import time
import logging
import threading
import persistqueue
from datetime import datetime
from influxdb_client.client.write_api import SYNCHRONOUS
from influxdb_client import InfluxDBClient, Point, WritePrecision | 32.464286 | 105 | 0.604694 |
7ce89c46f636fde71ee0a887ac7403a640c90ce5 | 1,781 | py | Python | example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py | DottaPaperella/TALight | 580322c3121c9acde9827f996fd4e39e31d93a6f | [
"MIT"
] | null | null | null | example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py | DottaPaperella/TALight | 580322c3121c9acde9827f996fd4e39e31d93a6f | [
"MIT"
] | null | null | null | example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py | DottaPaperella/TALight | 580322c3121c9acde9827f996fd4e39e31d93a6f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from sys import stderr, exit, argv
from random import randrange
#from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
# METADATA OF THIS TAL_SERVICE:
problem="tiling_mxn-boards_with_1x2-boards"
service="is_tilable"
args_list = [
('m',int),
('n',int),
('my_conjecture',str),
('h',int),
('k',int),
('lang',str),
('ISATTY',bool),
]
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
TAc.print(LANG.opening_msg, "green")
# START CODING YOUR SERVICE:
assert ENV['h']==1
assert ENV['k']==2
print()
if (ENV['m'] * ENV['n']) % 2 == 1:
if ENV['my_conjecture'] == "yes":
TAc.NO()
print(LANG.render_feedback("FALSE-is-not-tilable", f"Contrary to what you have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If you are not convinced you can submit a tiling of that grid to the service 'check_my_tiling'."))
if ENV['my_conjecture'] == "no":
TAc.OK()
print(LANG.render_feedback("TRUE-is-not-tilable", f"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable."))
if (ENV['m'] * ENV['n']) % 2 == 0:
if ENV['my_conjecture'] == "yes":
TAc.OK()
print(LANG.render_feedback("TRUE-is-tilable", f"We agree on the fact that the {ENV['m']}x{ENV['n']}-grid is tilable. If you want to exhibit us a tiling for this grid you can submit it to the service 'check_my_tiling'."))
if ENV['my_conjecture'] == "no":
TAc.NO()
print(LANG.render_feedback("FALSE-is-tilable", f"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If you can not believe a tiling of the {ENV['m']}x{ENV['n']}-grid exists try the service 'gimme_hints_on_a_tiling'."))
exit(0)
| 35.62 | 242 | 0.64009 |
7ceab378038506dba92e4b8d3ecd8a07fc74f4a2 | 1,469 | py | Python | tests/unit/peapods/runtimes/remote/ssh/test_ssh_remote.py | yk/jina | ab66e233e74b956390f266881ff5dc4e0110d3ff | [
"Apache-2.0"
] | 1 | 2020-12-23T12:34:00.000Z | 2020-12-23T12:34:00.000Z | tests/unit/peapods/runtimes/remote/ssh/test_ssh_remote.py | yk/jina | ab66e233e74b956390f266881ff5dc4e0110d3ff | [
"Apache-2.0"
] | null | null | null | tests/unit/peapods/runtimes/remote/ssh/test_ssh_remote.py | yk/jina | ab66e233e74b956390f266881ff5dc4e0110d3ff | [
"Apache-2.0"
] | null | null | null | import pytest
from jina.enums import RemoteAccessType
from jina.flow import Flow
from jina.parser import set_pea_parser, set_pod_parser
from jina.peapods.pods import BasePod
from jina.peapods.runtimes.remote.ssh import SSHRuntime
from jina.proto import jina_pb2
| 32.644444 | 96 | 0.701157 |
7ceacff0df961aecfe4df023a30c5731860a145d | 181 | py | Python | waio/factory/models/basic.py | dotX12/waio | 6bc41df2d650f31fdb11a1a2b67c6149afa0e11a | [
"MIT"
] | 24 | 2021-11-08T13:37:05.000Z | 2022-03-24T12:49:54.000Z | waio/factory/models/basic.py | dotX12/waio | 6bc41df2d650f31fdb11a1a2b67c6149afa0e11a | [
"MIT"
] | 1 | 2022-01-19T03:11:58.000Z | 2022-01-19T03:13:30.000Z | waio/factory/models/basic.py | dotX12/waio | 6bc41df2d650f31fdb11a1a2b67c6149afa0e11a | [
"MIT"
] | 5 | 2021-11-11T04:11:12.000Z | 2022-02-15T10:41:58.000Z | from dataclasses import dataclass
| 12.066667 | 33 | 0.740331 |
7cec971c07d5ed98dc62f84b80e44472db92d7d3 | 531 | py | Python | Uber/validExpression.py | Nithanaroy/random_scripts | 908e539e2b7050a09e03b4fc0d2621b23733d65a | [
"MIT"
] | null | null | null | Uber/validExpression.py | Nithanaroy/random_scripts | 908e539e2b7050a09e03b4fc0d2621b23733d65a | [
"MIT"
] | null | null | null | Uber/validExpression.py | Nithanaroy/random_scripts | 908e539e2b7050a09e03b4fc0d2621b23733d65a | [
"MIT"
] | null | null | null |
if __name__ =='__main__':
print main('{(abc})')
| 25.285714 | 57 | 0.551789 |
7cee12e1d9ee7123f1ed98d591b1f1a9ee9c89f2 | 10,845 | py | Python | sgains/tool.py | KrasnitzLab/sgains | 501c42bfdad4542725f00ca8199983eccf8c0b3f | [
"MIT"
] | 1 | 2017-09-08T05:09:59.000Z | 2017-09-08T05:09:59.000Z | sgains/tool.py | KrasnitzLab/sgains | 501c42bfdad4542725f00ca8199983eccf8c0b3f | [
"MIT"
] | 35 | 2017-07-31T04:13:40.000Z | 2019-09-06T13:32:17.000Z | sgains/tool.py | KrasnitzLab/sgains | 501c42bfdad4542725f00ca8199983eccf8c0b3f | [
"MIT"
] | 3 | 2017-09-08T05:10:34.000Z | 2019-06-11T09:06:41.000Z | import os
import sys
from copy import deepcopy
import traceback
import functools
from collections import defaultdict
import yaml
from argparse import ArgumentParser,\
RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter
from sgains.configuration.parser import SgainsValidator, Config
from sgains.configuration.schema import sgains_schema
from sgains.executor import Executor
from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline
from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline
from sgains.pipelines.bins_pipeline import BinsPipeline
from sgains.pipelines.mapping_pipeline import MappingPipeline
from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline
from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline
from sgains.pipelines.varbin_pipeline import VarbinPipeline
from sgains.pipelines.r_pipeline import Rpipeline
from sgains.pipelines.composite_pipeline import CompositePipeline
SGAINS_COMMANDS = {
"genomeindex": {
"config_groups": ["aligner", "genome"],
"help": "builds appropriate hisat2 or bowtie index for the "
"reference genome",
},
"mappable_regions": {
"config_groups": ["aligner", "genome", "mappable_regions", "sge"],
"help": "finds all mappable regions in specified genome",
},
"bins": {
"config_groups": ["genome", "mappable_regions", "bins", "sge"],
"help": "calculates all bins boundaries for specified bins count "
"and read length",
},
"prepare": {
"config_groups": [
"aligner", "genome", "mappable_regions", "bins", "sge"],
"help": "combines all preparation steps ('genome', 'mappable-regions' "
"and 'bins') into single command",
},
"mapping": {
"config_groups": ["aligner", "genome", "reads", "mapping", "sge"],
"help": "performs mapping of cells reads to the reference genome",
},
"extract_10x": {
"config_groups": [
"data_10x", "reads", "sge"],
"help": "extracts cells reads from 10x Genomics datasets",
},
"varbin": {
"config_groups": ["bins", "mapping", "varbin", "sge"],
"help": "applies varbin algorithm to count read mappings in each bin",
},
"varbin_10x": {
"config_groups": [
"data_10x", "bins", "varbin", "sge"],
"help": "applies varbin algorithm to count read mappings in each bin "
"to 10x Genomics datasets without realigning",
},
"scclust": {
"config_groups": ["bins", "varbin", "scclust"],
"help": "segmentation and clustering based bin counts and "
"preparation of the SCGV input data"
},
"process": {
"config_groups": [
"aligner", "genome", "reads", "mapping", "bins", "varbin",
"scclust", "sge"],
"help": "combines all process steps ('mapping', 'varbin' "
"and 'scclust') into single command"
},
}
if __name__ == "__main__":
sys.exit(main())
| 30.635593 | 79 | 0.616136 |
7cee8f95a77e8d2ded7b9467b41b6c25c5fb7cdf | 3,135 | py | Python | lib/modeling/VGG16.py | rsumner31/Detectron | 021685d42f7e8ac097e2bcf79fecb645f211378e | [
"Apache-2.0"
] | 429 | 2018-04-28T00:01:57.000Z | 2021-12-18T12:53:22.000Z | lib/modeling/VGG16.py | absorbguo/Detectron | 2f8161edc3092b0382cab535c977a180a8b3cc4d | [
"Apache-2.0"
] | 54 | 2018-12-26T13:04:32.000Z | 2020-04-24T04:09:30.000Z | lib/modeling/VGG16.py | absorbguo/Detectron | 2f8161edc3092b0382cab535c977a180a8b3cc4d | [
"Apache-2.0"
] | 96 | 2018-12-24T05:12:36.000Z | 2021-04-23T15:51:21.000Z | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""VGG16 from https://arxiv.org/abs/1409.1556."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from core.config import cfg
| 41.25 | 78 | 0.648166 |
7ceed4646921c1456f0b28f435da564f3dae7896 | 2,913 | py | Python | setup.py | yangjing1127/xmind2testcase | 49a581159a0d8e028f89939777399493662df111 | [
"MIT"
] | 537 | 2018-12-26T03:02:54.000Z | 2022-03-30T17:41:53.000Z | setup.py | yangjing1127/xmind2testcase | 49a581159a0d8e028f89939777399493662df111 | [
"MIT"
] | 49 | 2019-01-08T09:59:15.000Z | 2022-03-30T00:58:47.000Z | setup.py | yangjing1127/xmind2testcase | 49a581159a0d8e028f89939777399493662df111 | [
"MIT"
] | 190 | 2018-12-29T07:09:48.000Z | 2022-03-31T01:55:02.000Z | #!/usr/env/bin python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import setup, find_packages, Command
about = {}
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as f: # custom
exec(f.read(), about)
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
install_requires = [ # custom
"xmind",
"flask",
"arrow",
]
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=long_description,
long_description_content_type='text/markdown',
keywords=about['__keywords__'],
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
license=about['__license__'],
packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom
package_data={ # custom
'': ['README.md'],
'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'],
},
install_requires=install_requires,
extras_require={},
python_requires='>=3.0, <4', # custom
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={ # custom
'console_scripts': [
'xmind2testcase=xmind2testcase.cli:cli_main',
]
},
cmdclass={
# python3 setup.py pypi
'pypi': PyPiCommand
}
)
| 28.281553 | 100 | 0.604875 |
7cef0095a10852826052b744b28e1db78c985b8d | 2,670 | py | Python | skultrafast/styles.py | Tillsten/skultrafast | 778eaf1539b6d85f21ac53b011472605673ef7e8 | [
"BSD-3-Clause"
] | 10 | 2019-02-17T15:57:51.000Z | 2021-11-15T02:00:33.000Z | skultrafast/styles.py | cZahn/skultrafast | 23572ba9ea32238f34a8a15390fb572ecd8bc6fa | [
"BSD-3-Clause"
] | 1 | 2019-01-17T11:56:38.000Z | 2019-07-11T15:30:58.000Z | skultrafast/styles.py | cZahn/skultrafast | 23572ba9ea32238f34a8a15390fb572ecd8bc6fa | [
"BSD-3-Clause"
] | 6 | 2018-11-08T14:11:06.000Z | 2021-09-01T14:53:02.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 21:33:24 2015
@author: Tillsten
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
tableau20 = [(r/255., g/255., b/255.) for r,g,b, in tableau20]
#plt.rcParams['savefig.dpi'] = 110
#plt.rcParams['font.family'] = 'Vera Sans'
out_ticks = {'xtick.direction': 'out',
'xtick.major.width': 1.5,
'xtick.minor.width': 1,
'xtick.major.size': 6,
'xtick.minor.size': 3,
'xtick.minor.visible': True,
'ytick.direction': 'out',
'ytick.major.width': 1.5,
'ytick.minor.width': 1,
'ytick.major.size': 6,
'ytick.minor.size': 3,
'ytick.minor.visible': True,
'axes.spines.top': False,
'axes.spines.right': False,
'text.hinting': True,
'axes.titlesize': 'xx-large',
'axes.titleweight': 'semibold',
}
plt.figure(figsize=(6,4))
with plt.style.context(out_ticks):
ax = plt.subplot(111)
x = np.linspace(0, 7, 1000)
y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi))
l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey')
l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey')
l, = plt.plot(x, y, lw=1.1)
#l.set_clip_on(0)
plt.tick_params(which='both', top=False, right=False)
plt.margins(0.01)
ax.text(7, 1, r'$y(t)=\exp\left(-t/1.5\right)\cos(\omega_1t)\cos(\omega_2t)$',
fontsize=18, va='top', ha='right')
#plt.title("Hallo")
plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude')
ax = plt.axes([0.57, 0.25, 0.3, .2])
#ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2])
ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2],
abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r')
ax.set_xlim(0, 10)
ax.set_xlabel("Frequency")
ax.xaxis.labelpad = 1
plt.locator_params(nbins=4)
plt.tick_params(which='both', top=False, right=False)
plt.tick_params(which='minor', bottom=False, left=False)
#plt.grid(1, axis='y', linestyle='-', alpha=0.3, lw=.5)
plt.show()
| 37.083333 | 83 | 0.522846 |
7cef6acdfa1f2191c94118bdb071a657a3a738d4 | 3,634 | py | Python | src/oci/devops/models/github_build_run_source.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2020-09-10T22:09:45.000Z | 2021-12-24T17:00:07.000Z | src/oci/devops/models/github_build_run_source.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/devops/models/github_build_run_source.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .build_run_source import BuildRunSource
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
| 33.648148 | 245 | 0.660704 |
7cf24c69a41740779ba55ee7c2d11c15c8feec7e | 12,133 | py | Python | aiida_fleur/tests/tools/test_common_fleur_wf.py | anoopkcn/aiida-fleur | 5d4cc2092b7c3ce5402f1d4b89787eae53b2e60f | [
"MIT"
] | null | null | null | aiida_fleur/tests/tools/test_common_fleur_wf.py | anoopkcn/aiida-fleur | 5d4cc2092b7c3ce5402f1d4b89787eae53b2e60f | [
"MIT"
] | null | null | null | aiida_fleur/tests/tools/test_common_fleur_wf.py | anoopkcn/aiida-fleur | 5d4cc2092b7c3ce5402f1d4b89787eae53b2e60f | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import pytest
import os
# is_code
def test_get_inputs_fleur():
'''
Tests if get_inputs_fleur assembles inputs correctly.
Note it is the work of FleurCalculation
to check if input types are correct i.e. 'code' is a Fleur code etc.
'''
from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur
from aiida.orm import Dict
inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp',
'options': {'custom_scheduler_commands': 'test_command'}, 'label': 'label',
'description': 'description', 'settings': {'test': 1}, 'serial': False}
results = get_inputs_fleur(**inputs)
out_options = results['options'].get_dict()
out_settings = results['settings'].get_dict()
assert results['code'] == 'code'
assert results['fleurinpdata'] == 'fleurinp'
assert results['parent_folder'] == 'remote'
assert results['description'] == 'description'
assert results['label'] == 'label'
assert out_options == {'custom_scheduler_commands': 'test_command',
'withmpi': True}
assert out_settings == {'test': 1}
inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp',
'options': {'custom_scheduler_commands': 'test_command'}, 'serial': True}
results = get_inputs_fleur(**inputs)
out_options = results['options'].get_dict()
assert results['description'] == ''
assert results['label'] == ''
assert out_options == {'custom_scheduler_commands': 'test_command',
'withmpi': False, 'resources': {"num_machines": 1}}
def test_get_inputs_inpgen(fixture_code, generate_structure):
'''
Tests if get_inputs_fleur assembles inputs correctly.
Note it is the work of FleurinputgenCalculation
to check if input types are correct i.e. 'code' is a Fleur code etc.
'''
from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen
from aiida.orm import Dict
code = fixture_code('fleur.inpgen')
structure = generate_structure()
params = Dict(dict={'test': 1})
inputs = {'structure': structure, 'inpgencode': code, 'options': {},
'label': 'label', 'description': 'description',
'params': params}
returns = {'metadata': {
'options': {'withmpi': False, 'resources': {'num_machines': 1}},
'description': 'description', 'label': 'label'},
'code': code, 'parameters': params, 'structure': structure
}
assert get_inputs_inpgen(**inputs) == returns
# repeat without a label and description
inputs = {'structure': structure, 'inpgencode': code, 'options': {},
'params': params}
returns = {'metadata': {
'options': {'withmpi': False, 'resources': {'num_machines': 1}},
'description': '', 'label': ''},
'code': code, 'parameters': params, 'structure': structure}
assert get_inputs_inpgen(**inputs) == returns
# test_and_get_codenode
def test_test_and_get_codenode_inpgen(fixture_code):
from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode
from aiida.orm import Code
from aiida.common.exceptions import NotExistent
# install code setup code
code = fixture_code('fleur.inpgen')
code_fleur = fixture_code('fleur.fleur')
code_fleur.label = 'fleur_test'
code_fleur.store()
expected = 'fleur.inpgen'
nonexpected = 'fleur.fleur'
not_existing = 'fleur.not_existing'
assert isinstance(test_and_get_codenode(code, expected), Code)
with pytest.raises(ValueError) as msg:
test_and_get_codenode(code, nonexpected, use_exceptions=True)
assert str(msg.value) == ("Given Code node is not of expected code type.\n"
"Valid labels for a fleur.fleur executable are:\n"
"* fleur_test@localhost-test")
with pytest.raises(ValueError) as msg:
test_and_get_codenode(code, not_existing, use_exceptions=True)
assert str(msg.value) == ("Code not valid, and no valid codes for fleur.not_existing.\n"
"Configure at least one first using\n"
" verdi code setup")
def test_get_kpoints_mesh_from_kdensity(generate_structure):
from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity
from aiida.orm import KpointsData
a, b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1)
assert a == ([21, 21, 21], [0.0, 0.0, 0.0])
assert isinstance(b, KpointsData)
# @pytest.mark.skip(reason="There seems to be now way to add outputs to CalcJobNode")
def test_performance_extract_calcs(fixture_localhost,
generate_calc_job_node):
from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs
from aiida.common.links import LinkType
from aiida.orm import Dict
out = Dict(dict={'title': 'A Fleur input generator calculation with aiida',
'energy': -138529.7052157,
'bandgap': 6.0662e-06,
'end_date': {'date': '2019/11/12', 'time': '16:12:08'},
'unparsed': [],
'walltime': 43,
'warnings': {'info': {}, 'debug': {}, 'error': {}, 'warning': {}},
'start_date': {'date': '2019/11/12', 'time': '16:11:25'},
'parser_info': 'AiiDA Fleur Parser v0.2beta',
'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711',
'creator_name': 'fleur 30',
'energy_units': 'eV',
'kmax': 4.2,
'fermi_energy': 0.0605833326,
'spin_density': 0.0792504665,
'bandgap_units': 'eV',
'force_largest': 0.0,
'energy_hartree': -5090.8728101494,
'walltime_units': 'seconds',
'charge_density1': 0.0577674505,
'charge_density2': 0.0461840944,
'number_of_atoms': 4,
'parser_warnings': [],
'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162],
'number_of_kpoints': 8,
'number_of_species': 1,
'fermi_energy_units': 'Htr',
'sum_of_eigenvalues': -2973.4129786677,
'output_file_version': '0.27',
'energy_hartree_units': 'Htr',
'number_of_atom_types': 4,
'number_of_iterations': 11,
'number_of_symmetries': 8,
'energy_core_electrons': -2901.8120489845,
'magnetic_moment_units': 'muBohr',
'overall_charge_density': 0.0682602474,
'creator_target_structure': ' ',
'energy_valence_electrons': -71.6009296831,
'magnetic_spin_up_charges': [9.1494766577,
9.1494806151,
9.1494806833,
9.1494806834],
'orbital_magnetic_moments': [],
'density_convergence_units': 'me/bohr^3',
'number_of_spin_components': 2,
'charge_den_xc_den_integral': -223.295208608,
'magnetic_spin_down_charges': [5.777470284,
5.7775460208,
5.7775477657,
5.7775477672],
'number_of_iterations_total': 11,
'creator_target_architecture': 'GEN',
'orbital_magnetic_moment_units': 'muBohr',
'orbital_magnetic_spin_up_charges': [],
'orbital_magnetic_spin_down_charges': []})
out.store()
node = generate_calc_job_node('fleur.fleur', fixture_localhost)
node.store()
out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters')
result = performance_extract_calcs([node.pk])
assert result == {'n_symmetries': [8], 'n_spin_components': [2], 'n_kpoints': [8],
'n_iterations': [11], 'walltime_sec': [43],
'walltime_sec_per_it': [3.909090909090909],
'n_iterations_total': [11], 'density_distance': [0.0682602474],
'computer': ['localhost-test'],
'n_atoms': [4], 'kmax': [4.2], 'cost': [75866.11200000001],
'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43],
'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326],
'bandgap': [6.0662e-06], 'energy': [-138529.7052157],
'force_largest': [0.0],
'ncores': [12], 'pk': [node.pk], 'uuid': [node.uuid],
'serial': [False],
'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]}
inputs_optimize = [(4, 8, 3, True, 0.5, None, 720),
(4, 8, 3, True, 2, None, 720),
(4, 8, 3, True, 100, None, 720),
(4, 8, 3, True, 100, None, 720, 0.5),
(4, 8, 3, False, 0.5, None, 720)]
results_optimize = [
(4, 3, 8, 'Computational setup is perfect! Nodes: 4, MPIs per node 3, OMP per MPI 8. Number of k-points is 720'),
(4, 6, 4, 'Computational setup is perfect! Nodes: 4, MPIs per node 6, OMP per MPI 4. Number of k-points is 720'),
(4, 12, 2, 'Computational setup is perfect! Nodes: 4, MPIs per node 12, OMP per MPI 2. Number of k-points is 720'),
(3, 24, 1, 'WARNING: Changed the number of nodes from 4 to 3'),
(4, 20, 1, 'WARNING: Changed the number of MPIs per node from 8 to 20 an OMP from 3 to 1. Changed the number of nodes from 4 to 4. Number of k-points is 720.')]
| 43.332143 | 164 | 0.597461 |
7cf2ae07e37425db960a133be2b5c330c6ba9916 | 36,957 | py | Python | src/probnum/random_variables/_random_variable.py | admdev8/probnum | 792b6299bac247cf8b1b5056756f0f078855d83a | [
"MIT"
] | null | null | null | src/probnum/random_variables/_random_variable.py | admdev8/probnum | 792b6299bac247cf8b1b5056756f0f078855d83a | [
"MIT"
] | 2 | 2020-12-28T19:37:16.000Z | 2020-12-28T19:37:31.000Z | src/probnum/random_variables/_random_variable.py | admdev8/probnum | 792b6299bac247cf8b1b5056756f0f078855d83a | [
"MIT"
] | null | null | null | """
Random Variables.
This module implements random variables. Random variables are the main in- and outputs
of probabilistic numerical methods.
"""
from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union
import numpy as np
from probnum import utils as _utils
from probnum.type import (
ArrayLikeGetitemArgType,
DTypeArgType,
FloatArgType,
RandomStateArgType,
RandomStateType,
ShapeArgType,
ShapeType,
)
try:
# functools.cached_property is only available in Python >=3.8
from functools import cached_property
except ImportError:
from cached_property import cached_property
_ValueType = TypeVar("ValueType")
def sample(self, size: ShapeArgType = ()) -> _ValueType:
"""
Draw realizations from a random variable.
Parameters
----------
size : tuple
Size of the drawn sample of realizations.
Returns
-------
sample : array-like
Sample of realizations with the given ``size`` and the inherent ``shape``.
"""
if self.__sample is None:
raise NotImplementedError("No sampling method provided.")
return self.__sample(size=_utils.as_shape(size))
def cdf(self, x: _ValueType) -> np.float_:
"""
Cumulative distribution function.
Parameters
----------
x : array-like
Evaluation points of the cumulative distribution function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The cdf evaluation will be broadcast over all additional dimensions.
Returns
-------
q : array-like
Value of the cumulative density function at the given points.
"""
if self.__cdf is not None:
return RandomVariable._ensure_numpy_float(
"cdf", self.__cdf(self._as_value_type(x))
)
elif self.__logcdf is not None:
cdf = np.exp(self.logcdf(self._as_value_type(x)))
assert isinstance(cdf, np.float_)
return cdf
else:
raise NotImplementedError(
f"Neither the `cdf` nor the `logcdf` of the random variable object "
f"with type `{type(self).__name__}` is implemented."
)
def logcdf(self, x: _ValueType) -> np.float_:
"""
Log-cumulative distribution function.
Parameters
----------
x : array-like
Evaluation points of the cumulative distribution function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The logcdf evaluation will be broadcast over all additional dimensions.
Returns
-------
q : array-like
Value of the log-cumulative density function at the given points.
"""
if self.__logcdf is not None:
return RandomVariable._ensure_numpy_float(
"logcdf", self.__logcdf(self._as_value_type(x))
)
elif self.__cdf is not None:
logcdf = np.log(self.__cdf(x))
assert isinstance(logcdf, np.float_)
return logcdf
else:
raise NotImplementedError(
f"Neither the `logcdf` nor the `cdf` of the random variable object "
f"with type `{type(self).__name__}` is implemented."
)
def quantile(self, p: FloatArgType) -> _ValueType:
"""Quantile function.
The quantile function :math:`Q \\colon [0, 1] \\to \\mathbb{R}` of a random
variable :math:`X` is defined as
:math:`Q(p) = \\inf\\{ x \\in \\mathbb{R} \\colon p \\le F_X(x) \\}`, where
:math:`F_X \\colon \\mathbb{R} \\to [0, 1]` is the :meth:`cdf` of the random
variable. From the definition it follows that the quantile function always
returns values of the same dtype as the random variable. For instance, for a
discrete distribution over the integers, the returned quantiles will also be
integers. This means that, in general, :math:`Q(0.5)` is not equal to the
:attr:`median` as it is defined in this class. See
https://en.wikipedia.org/wiki/Quantile_function for more details and examples.
"""
if self.__shape != ():
raise NotImplementedError(
"The quantile function is only defined for scalar random variables."
)
if self.__quantile is None:
raise NotImplementedError
try:
p = _utils.as_numpy_scalar(p, dtype=np.floating)
except TypeError as exc:
raise TypeError(
"The given argument `p` can not be cast to a `np.floating` object."
) from exc
quantile = self.__quantile(p)
if quantile.shape != self.__shape:
raise ValueError(
f"The quantile function should return values of the same shape as the "
f"random variable, i.e. {self.__shape}, but it returned a value with "
f"{quantile.shape}."
)
if quantile.dtype != self.__dtype:
raise ValueError(
f"The quantile function should return values of the same dtype as the "
f"random variable, i.e. `{self.__dtype.name}`, but it returned a value "
f"with dtype `{quantile.dtype.name}`."
)
return quantile
def reshape(self, newshape: ShapeArgType) -> "RandomVariable":
"""
Give a new shape to a random variable.
Parameters
----------
newshape : int or tuple of ints
New shape for the random variable. It must be compatible with the original
shape.
Returns
-------
reshaped_rv : ``self`` with the new dimensions of ``shape``.
"""
newshape = _utils.as_shape(newshape)
return RandomVariable(
shape=newshape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: self.sample(size).reshape(size + newshape),
mode=lambda: self.mode.reshape(newshape),
median=lambda: self.median.reshape(newshape),
mean=lambda: self.mean.reshape(newshape),
cov=lambda: self.cov,
var=lambda: self.var.reshape(newshape),
std=lambda: self.std.reshape(newshape),
entropy=lambda: self.entropy,
as_value_type=self.__as_value_type,
)
def transpose(self, *axes: int) -> "RandomVariable":
"""
Transpose the random variable.
Parameters
----------
axes : None, tuple of ints, or n ints
See documentation of numpy.ndarray.transpose.
Returns
-------
transposed_rv : The transposed random variable.
"""
return RandomVariable(
shape=np.empty(shape=self.shape).transpose(*axes).shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: self.sample(size).transpose(*axes),
mode=lambda: self.mode.transpose(*axes),
median=lambda: self.median.transpose(*axes),
mean=lambda: self.mean.transpose(*axes),
cov=lambda: self.cov,
var=lambda: self.var.transpose(*axes),
std=lambda: self.std.transpose(*axes),
entropy=lambda: self.entropy,
as_value_type=self.__as_value_type,
)
T = property(transpose)
# Unary arithmetic operations
# Binary arithmetic operations
__array_ufunc__ = None
"""
This prevents numpy from calling elementwise arithmetic
operations allowing expressions like: y = np.array([1, 1]) + RV
to call the arithmetic operations defined by RandomVariable
instead of elementwise. Thus no array of RandomVariables but a
RandomVariable with the correct shape is returned.
"""
def _as_value_type(self, x: Any) -> _ValueType:
if self.__as_value_type is not None:
return self.__as_value_type(x)
return x
| 34.668856 | 107 | 0.596152 |
7cf38c52d649f28843e5da3730409c34a52dc82f | 8,026 | py | Python | platform/gcutil/lib/google_compute_engine/gcutil_lib/address_cmds_test.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | platform/gcutil/lib/google_compute_engine/gcutil_lib/address_cmds_test.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | platform/gcutil/lib/google_compute_engine/gcutil_lib/address_cmds_test.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | 2 | 2020-07-25T05:03:06.000Z | 2020-11-04T04:55:57.000Z | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for address collection commands."""
import path_initializer
path_initializer.InitSysPath()
import json
import unittest
import gflags as flags
from gcutil_lib import address_cmds
from gcutil_lib import gcutil_unittest
from gcutil_lib import mock_api
from gcutil_lib import mock_lists
FLAGS = flags.FLAGS
if __name__ == '__main__':
unittest.main(testLoader=gcutil_unittest.GcutilLoader())
| 32.362903 | 75 | 0.705457 |
7cf46d5f8307606187101597e795384399b48446 | 804 | py | Python | vote/migrations/0005_auto_20210204_1900.py | jnegrete2005/JuradoFMS | 25848037e51de1781c419155615d0fb41edc07ec | [
"MIT"
] | 2 | 2021-02-24T21:57:50.000Z | 2021-03-15T08:44:09.000Z | vote/migrations/0005_auto_20210204_1900.py | jnegrete2005/JuradoFMS | 25848037e51de1781c419155615d0fb41edc07ec | [
"MIT"
] | null | null | null | vote/migrations/0005_auto_20210204_1900.py | jnegrete2005/JuradoFMS | 25848037e51de1781c419155615d0fb41edc07ec | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-02-05 00:00
import django.contrib.postgres.fields
from django.db import migrations, models
| 32.16 | 163 | 0.655473 |
7cf5ba1b8968aa69e6a1b87247368728da9bf55b | 11,847 | py | Python | tools/wasm-sourcemap.py | ngzhian/emscripten | 94b1555a09f869d65354a2033da724ce77a43106 | [
"MIT"
] | 1 | 2019-08-16T23:42:09.000Z | 2019-08-16T23:42:09.000Z | tools/wasm-sourcemap.py | ngzhian/emscripten | 94b1555a09f869d65354a2033da724ce77a43106 | [
"MIT"
] | null | null | null | tools/wasm-sourcemap.py | ngzhian/emscripten | 94b1555a09f869d65354a2033da724ce77a43106 | [
"MIT"
] | 1 | 2019-09-26T20:05:46.000Z | 2019-09-26T20:05:46.000Z | #!/usr/bin/env python
# Copyright 2018 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Utility tools that extracts DWARF information encoded in a wasm output
produced by the LLVM tools, and encodes it as a wasm source map. Additionally,
it can collect original sources, change files prefixes, and strip debug
sections from a wasm file.
"""
import argparse
from collections import OrderedDict, namedtuple
import json
import logging
from math import floor, log
import os
import re
from subprocess import Popen, PIPE
import sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from tools.shared import asstr
logger = logging.getLogger('wasm-sourcemap')
# SourceMapPrefixes contains resolver for file names that are:
# - "sources" is for names that output to source maps JSON
# - "load" is for paths that used to load source text
SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG if os.environ.get('EMCC_DEBUG') else logging.INFO)
sys.exit(main())
| 35.258929 | 171 | 0.653668 |
7cf6644c8071f50f1d98ec6acd6e40c622a3ce59 | 12,540 | py | Python | pysnmp-with-texts/ENTERASYS-NAC-APPLIANCE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/ENTERASYS-NAC-APPLIANCE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/ENTERASYS-NAC-APPLIANCE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module ENTERASYS-NAC-APPLIANCE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ENTERASYS-NAC-APPLIANCE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:04:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
etsysModules, = mibBuilder.importSymbols("ENTERASYS-MIB-NAMES", "etsysModules")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
Bits, ObjectIdentity, MibIdentifier, Counter64, iso, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, IpAddress, Unsigned32, TimeTicks, Gauge32, Integer32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "ObjectIdentity", "MibIdentifier", "Counter64", "iso", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "IpAddress", "Unsigned32", "TimeTicks", "Gauge32", "Integer32", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
etsysNacApplianceMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73))
etsysNacApplianceMIB.setRevisions(('2010-03-09 13:03',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: etsysNacApplianceMIB.setRevisionsDescriptions(('The initial version of this MIB module.',))
if mibBuilder.loadTexts: etsysNacApplianceMIB.setLastUpdated('201003091303Z')
if mibBuilder.loadTexts: etsysNacApplianceMIB.setOrganization('Enterasys Networks, Inc')
if mibBuilder.loadTexts: etsysNacApplianceMIB.setContactInfo('Postal: Enterasys Networks 50 Minuteman Rd. Andover, MA 01810-1008 USA Phone: +1 978 684 1000 E-mail: [email protected] WWW: http://www.enterasys.com')
if mibBuilder.loadTexts: etsysNacApplianceMIB.setDescription("This MIB module defines a portion of the SNMP enterprise MIBs under Enterasys Networks' enterprise OID pertaining to NAC Appliance Status.")
etsysNacApplianceObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1))
etsysNacApplAuthenticationRequests = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAuthenticationRequests.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAuthenticationRequests.setDescription('Represents the number of authentication requests made since the NAC was started.')
etsysNacApplAuthenticationSuccesses = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAuthenticationSuccesses.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAuthenticationSuccesses.setDescription('Represents the number of successful authentication requests made since the NAC was started.')
etsysNacApplAuthenticationFailures = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAuthenticationFailures.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAuthenticationFailures.setDescription('Represents the number of failed authentication requests made since the NAC was started.')
etsysNacApplRadiusChallenges = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplRadiusChallenges.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplRadiusChallenges.setDescription('Represents the number of Radius challenges made since the NAC was started.')
etsysNacApplAuthenticationInvalidRequests = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAuthenticationInvalidRequests.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAuthenticationInvalidRequests.setDescription('Represents the number of invalid authentication requests made since the NAC was started.')
etsysNacApplAuthenticationDuplicateRequests = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAuthenticationDuplicateRequests.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAuthenticationDuplicateRequests.setDescription('Represents the number of duplicate authentication requests made since the NAC was started.')
etsysNacApplAuthenticationMalformedRequests = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAuthenticationMalformedRequests.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAuthenticationMalformedRequests.setDescription('Represents the number of malformed authentication requests made since the NAC was started.')
etsysNacApplAuthenticationBadRequests = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAuthenticationBadRequests.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAuthenticationBadRequests.setDescription('Represents the number of bad authentication requests made since the NAC was started.')
etsysNacApplAuthenticationDroppedPackets = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAuthenticationDroppedPackets.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAuthenticationDroppedPackets.setDescription('Represents the number of dropped authentication packets since the NAC was started.')
etsysNacApplAuthenticationUnknownTypes = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAuthenticationUnknownTypes.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAuthenticationUnknownTypes.setDescription('Represents the number of unknown authentication types since the NAC was started.')
etsysNacApplAssessmentRequests = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplAssessmentRequests.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplAssessmentRequests.setDescription('Represents the number of assessment requests made since the NAC was started.')
etsysNacApplCaptivePortalRequests = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplCaptivePortalRequests.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplCaptivePortalRequests.setDescription('Represents the number of captive portal requests made since the NAC was started.')
etsysNacApplContactLostSwitches = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplContactLostSwitches.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplContactLostSwitches.setDescription('Represents the number of configured switches with which the NAC has lost SNMP contact.')
etsysNacApplIPResolutionFailures = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplIPResolutionFailures.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplIPResolutionFailures.setDescription('Represents the number of failed IP Resolution attempts made since the NAC was started.')
etsysNacApplIPResolutionTimeouts = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplIPResolutionTimeouts.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplIPResolutionTimeouts.setDescription('Represents the number of IP Resolution attempts that timed out since the NAC was started.')
etsysNacApplConnectedAgents = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysNacApplConnectedAgents.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplConnectedAgents.setDescription('Represents the number of End-System Assessment Agents currently connected to the NAC.')
etsysNacApplianceMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 2))
etsysNacApplianceMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 2, 1))
etsysNacApplianceMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 2, 2))
etsysNacApplianceMIBGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 2, 1, 1)).setObjects(("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAuthenticationRequests"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAuthenticationSuccesses"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAuthenticationFailures"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplRadiusChallenges"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAuthenticationInvalidRequests"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAuthenticationDuplicateRequests"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAuthenticationMalformedRequests"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAuthenticationBadRequests"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAuthenticationDroppedPackets"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAuthenticationUnknownTypes"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplAssessmentRequests"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplCaptivePortalRequests"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplContactLostSwitches"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplIPResolutionFailures"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplIPResolutionTimeouts"), ("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplConnectedAgents"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNacApplianceMIBGroup = etsysNacApplianceMIBGroup.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplianceMIBGroup.setDescription('The basic collection of objects providing status information about the NAC Appliance.')
etsysNacApplianceMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 5624, 1, 2, 73, 2, 2, 1)).setObjects(("ENTERASYS-NAC-APPLIANCE-MIB", "etsysNacApplianceMIBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysNacApplianceMIBCompliance = etsysNacApplianceMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: etsysNacApplianceMIBCompliance.setDescription('The compliance statement for clients implementing the NAC Appliance Status MIB.')
mibBuilder.exportSymbols("ENTERASYS-NAC-APPLIANCE-MIB", etsysNacApplianceMIBCompliance=etsysNacApplianceMIBCompliance, etsysNacApplAuthenticationDuplicateRequests=etsysNacApplAuthenticationDuplicateRequests, etsysNacApplIPResolutionTimeouts=etsysNacApplIPResolutionTimeouts, etsysNacApplianceObjects=etsysNacApplianceObjects, etsysNacApplAuthenticationInvalidRequests=etsysNacApplAuthenticationInvalidRequests, etsysNacApplAuthenticationUnknownTypes=etsysNacApplAuthenticationUnknownTypes, etsysNacApplianceMIBCompliances=etsysNacApplianceMIBCompliances, etsysNacApplAssessmentRequests=etsysNacApplAssessmentRequests, etsysNacApplAuthenticationBadRequests=etsysNacApplAuthenticationBadRequests, etsysNacApplAuthenticationRequests=etsysNacApplAuthenticationRequests, etsysNacApplRadiusChallenges=etsysNacApplRadiusChallenges, etsysNacApplAuthenticationMalformedRequests=etsysNacApplAuthenticationMalformedRequests, etsysNacApplContactLostSwitches=etsysNacApplContactLostSwitches, etsysNacApplAuthenticationDroppedPackets=etsysNacApplAuthenticationDroppedPackets, etsysNacApplCaptivePortalRequests=etsysNacApplCaptivePortalRequests, etsysNacApplAuthenticationSuccesses=etsysNacApplAuthenticationSuccesses, etsysNacApplIPResolutionFailures=etsysNacApplIPResolutionFailures, etsysNacApplianceMIBConformance=etsysNacApplianceMIBConformance, PYSNMP_MODULE_ID=etsysNacApplianceMIB, etsysNacApplianceMIBGroups=etsysNacApplianceMIBGroups, etsysNacApplianceMIB=etsysNacApplianceMIB, etsysNacApplAuthenticationFailures=etsysNacApplAuthenticationFailures, etsysNacApplianceMIBGroup=etsysNacApplianceMIBGroup, etsysNacApplConnectedAgents=etsysNacApplConnectedAgents)
| 145.813953 | 1,653 | 0.817384 |
7cf7d0d22a5ee01c1d25faa33b9b8f99ef2f0210 | 3,300 | py | Python | Unsupervised/pix2pixHD/extract_frames.py | Kebniss/AutoDetect | 44ca4d6930ef5fbf044ebeed5c9fd925f04bc1a8 | [
"MIT"
] | 1 | 2019-07-25T02:16:32.000Z | 2019-07-25T02:16:32.000Z | Unsupervised/pix2pixHD/extract_frames.py | Kebniss/AutoDetect | 44ca4d6930ef5fbf044ebeed5c9fd925f04bc1a8 | [
"MIT"
] | null | null | null | Unsupervised/pix2pixHD/extract_frames.py | Kebniss/AutoDetect | 44ca4d6930ef5fbf044ebeed5c9fd925f04bc1a8 | [
"MIT"
] | null | null | null | import os
import cv2
import argparse
from utils import *
from tqdm import tqdm
from glob import glob
from pathlib import Path
parser = argparse.ArgumentParser(
description='build a "frame dataset" from a given video')
parser.add_argument('-input', dest="input", required=True,
help='''Path to a single video or a folder. If path to folder the algorithm
will extract frames from all files with extension defined in
--extension and save them under separate folders under dest_folder.
The frames from each video will be saved under a folder with its name.
''')
parser.add_argument('--dest-folder', dest="dest_folder", default='./dataset/',
help='''Path where to store frames. NB all files in this folder will be
removed before adding the new frames''')
parser.add_argument('--same-folder', dest="same_folder", default=False,
help='''Set it to True if you want to save the frames of all videos to the
same folder in ascending order going from the first frame of the first video
to the last frame of the last video. If True frames will be saved in
dest_folder/frames.''')
parser.add_argument('--sampling', help='how many fps', default='3')
parser.add_argument('--run-type', help='train or test', default='train')
parser.add_argument('--extension', help='avi, mp4, mov...', default='mp4')
parser.add_argument('-width', help='output width', default=640, type=int)
parser.add_argument('-height', help='output height', default=480, type=int)
args = parser.parse_args()
mkdir(args.dest_folder)
if (args.width % 32 != 0) or (args.height % 32 != 0):
raise Exception("Please use width and height that are divisible by 32")
if os.path.isdir(args.input):
inp = str(Path(args.input) / f'*.{args.extension}')
videos = [v for v in glob(inp)]
if not videos:
raise Exception(f'No {args.extension} files in input directory {args.input}')
elif os.path.isfile(args.input):
_, ext = get_filename_extension(args.input)
if ext != args.extension:
raise ValueError(f'Correct inputs: folder or path to {args.extension} file only')
videos = [args.input]
else:
raise ValueError(f'Correct inputs: folder or path to {args.extension} file only')
if args.same_folder:
start = 0
dest_folder = str(Path(args.dest_folder) / f'{args.run_type}_frames')
mkdir(dest_folder)
for v in tqdm(videos):
if not args.same_folder:
start = 0
name, _ = get_filename_extension(v)
dest_folder = str(Path(args.dest_folder) / name)
mkdir(dest_folder)
start = _extract_frames(v, dest_folder, start, sampling_f=int(args.sampling))
| 39.285714 | 89 | 0.677879 |
7cf872b37fe89e4b5c0f3a287b43439b1d433523 | 2,923 | py | Python | burp-filter-options/filter-options.py | parsiya/Parsia-Code | e75bd9f7f295e6d8e584de67f90dd02cb75ae915 | [
"MIT"
] | 21 | 2018-09-10T03:09:17.000Z | 2022-02-07T09:20:03.000Z | burp-filter-options/filter-options.py | parsiya/Parsia-Code | e75bd9f7f295e6d8e584de67f90dd02cb75ae915 | [
"MIT"
] | 1 | 2019-11-10T21:17:23.000Z | 2020-01-19T04:36:19.000Z | burp-filter-options/filter-options.py | parsiya/Parsia-Code | e75bd9f7f295e6d8e584de67f90dd02cb75ae915 | [
"MIT"
] | 8 | 2019-10-11T23:29:58.000Z | 2021-05-26T12:11:43.000Z | # modified "example traffic redirector"
# https://raw.githubusercontent.com/PortSwigger/example-traffic-redirector/master/python/TrafficRedirector.py
# Idea: https://github.com/pajswigger/filter-options/blob/master/src/filter-options.kt
# Usage: Put both files in a directory and add filter-options.py to Burp. Nees Jython.
# Blog post: https://parsiya.net/blog/2019-04-06-hiding-options-an-adventure-in-dealing-with-burp-proxy-in-an-extension/
# support for burp-exceptions - see https://github.com/securityMB/burp-exceptions
try:
from exceptions_fix import FixBurpExceptions
import sys
except ImportError:
pass
# support for burputils - https://github.com/parsiya/burputils
try:
from burputils import BurpUtils
except ImportError:
pass
from burp import IBurpExtender
from burp import IHttpListener
# support for burp-exceptions
try:
FixBurpExceptions()
except:
pass | 31.095745 | 121 | 0.659254 |
7cf8865345a71c46f4e1edec308e018d877fedb9 | 11,128 | py | Python | AppServer/google/appengine/tools/devappserver2/login.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/google/appengine/tools/devappserver2/login.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/google/appengine/tools/devappserver2/login.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handles login/logout pages and dealing with user cookies.
Includes a WSGI application that serves the login page and handles login and
logout HTTP requests. It accepts these GET query parameters:
continue: URL to redirect to after a login or logout has completed.
email: Email address to set for the client.
admin: If 'True', the client should be logged in as an admin.
action: What action to take ('Login' or 'Logout').
To view the current user information and a form for logging in and out,
supply no parameters.
"""
import cgi
import Cookie
import hashlib
import logging
import os
import sha
import sys
import urllib
import uuid
import webapp2
app_dashboard_lib = '/../../../../../AppDashboard/lib'
sys.path.append(os.path.dirname(__file__) + app_dashboard_lib)
from app_dashboard_helper import AppDashboardHelper
# URL of the login page within the dev appserver.
LOGIN_URL_RELATIVE = '_ah/login'
# CGI parameter constants.
CONTINUE_PARAM = 'continue'
_EMAIL_PARAM = 'email'
_ADMIN_PARAM = 'admin'
ACTION_PARAM = 'action'
# Values for the action parameter.
LOGOUT_ACTION = 'logout'
LOGIN_ACTION = 'login'
# Name of the cookie that stores the user info.
_COOKIE_NAME = 'dev_appserver_login'
# Indicates that the user has admin access to all applications.
CLOUD_ADMIN_MARKER = 'CLOUD_ADMIN'
# The port that the AppDashboard serves HTTPS traffic on.
DASHBOARD_HTTPS_PORT = "1443"
def get_user_info(http_cookie, cookie_name=_COOKIE_NAME):
"""Gets the requestor's user info from an HTTP Cookie header.
Args:
http_cookie: The value of the 'Cookie' HTTP request header.
cookie_name: The name of the cookie that stores the user info.
Returns:
A tuple (email, admin, user_id) where:
email: The user's email address, if any.
admin: True if the user is an admin; False otherwise.
user_id: The user ID, if any.
"""
try:
cookie = Cookie.SimpleCookie(http_cookie)
except Cookie.CookieError:
return '', False, ''
cookie_dict = dict((k, v.value) for k, v in cookie.iteritems())
return _get_user_info_from_dict(cookie_dict, cookie_name)
def _get_user_info_from_dict(cookie_dict, cookie_name=_COOKIE_NAME):
"""Gets the requestor's user info from a cookie dictionary.
Args:
cookie_dict: A dictionary mapping cookie names onto values.
cookie_name: The name of the cookie that stores the user info.
Returns:
A tuple (email, admin, user_id) where:
email: The user's email address, if any.
admin: True if the user is an admin; False otherwise.
user_id: The user ID, if any.
"""
cookie_secret = os.environ['COOKIE_SECRET']
cookie_value = cookie_dict.get(cookie_name, '')
cookie_value = cookie_value.replace("%3A",":")
cookie_value = cookie_value.replace("%40",'@')
cookie_value = cookie_value.replace("%2C",",")
email, nickname, admin, hsh = (cookie_value.split(':') + ['', '', '', ''])[:4]
if email == '':
nickname = ''
admin = ''
return '', False, ''
else:
vhsh = sha.new(email+nickname+admin+cookie_secret).hexdigest()
if hsh != vhsh:
logging.info("{0} has an invalid cookie, so ignoring it.".format(email))
return '', False, ''
admin_apps = admin.split(',')
current_app = os.environ['APPLICATION_ID']
is_admin = current_app in admin_apps or CLOUD_ADMIN_MARKER in admin_apps
return email, is_admin, nickname
def _create_cookie_data(email, admin):
"""Creates cookie payload data.
Args:
email: The user's email address.
admin: True if the user is an admin; False otherwise.
Returns:
A string containing the cookie payload.
"""
if email:
user_id_digest = hashlib.md5(email.lower()).digest()
user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
else:
user_id = ''
return '%s:%s:%s' % (email, admin, user_id)
def _set_user_info_cookie(email, admin, cookie_name=_COOKIE_NAME):
"""Creates a cookie to set the user information for the requestor.
Args:
email: The email to set for the user.
admin: True if the user should be admin; False otherwise.
cookie_name: The name of the cookie that stores the user info.
Returns:
Set-Cookie value for setting the user info of the requestor.
"""
cookie_value = _create_cookie_data(email, admin)
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = cookie_value
cookie[cookie_name]['path'] = '/'
return cookie[cookie_name].OutputString()
def _clear_user_info_cookie(cookie_name=_COOKIE_NAME):
"""Clears the user info cookie from the requestor, logging them out.
Args:
cookie_name: The name of the cookie that stores the user info.
Returns:
A Set-Cookie value for clearing the user info of the requestor.
"""
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = ''
cookie[cookie_name]['path'] = '/'
cookie[cookie_name]['max-age'] = '0'
if AppDashboardHelper.USE_SHIBBOLETH:
cookie[cookie_name]['domain'] = AppDashboardHelper.\
SHIBBOLETH_COOKIE_DOMAIN
return cookie[cookie_name].OutputString()
_LOGIN_TEMPLATE = """<html>
<head>
<title>Login</title>
</head>
<body>
<form method="get" action="%(login_url)s"
style="text-align:center; font: 13px sans-serif">
<div style="width: 20em; margin: 1em auto;
text-align:left;
padding: 0 2em 1.25em 2em;
background-color: #d6e9f8;
border: 2px solid #67a7e3">
<h3>%(login_message)s</h3>
<p style="padding: 0; margin: 0">
<label for="email" style="width: 3em">Email:</label>
<input name="email" type="email" value="%(email)s" id="email"/>
</p>
<p style="margin: .5em 0 0 3em; font-size:12px">
<input name="admin" type="checkbox" value="True"
%(admin_checked)s id="admin"/>
<label for="admin">Sign in as Administrator</label>
</p>
<p style="margin-left: 3em">
<input name="action" value="Login" type="submit"
id="submit-login" />
<input name="action" value="Logout" type="submit"
id="submit-logout" />
</p>
</div>
<input name="continue" type="hidden" value="%(continue_url)s"/>
</form>
</body>
</html>
"""
def _render_login_template(login_url, continue_url, email, admin):
"""Renders the login page.
Args:
login_url: The parameter to _login_response.
continue_url: The parameter to _login_response.
email: The email address of the current user, if any.
admin: True if the user is currently an admin; False otherwise.
Returns:
A string containing the contents of the login page.
"""
if email:
login_message = 'Logged in'
else:
login_message = 'Not logged in'
email = 'test\x40example.com'
admin_checked = 'checked' if admin else ''
template_dict = {
'email': cgi.escape(email, quote=True),
'admin_checked': admin_checked,
'login_message': login_message,
'login_url': cgi.escape(login_url, quote=True),
'continue_url': cgi.escape(continue_url, quote=True),
}
return _LOGIN_TEMPLATE % template_dict
def login_redirect(application_url, continue_url, start_response):
"""Writes a login redirection URL to a user.
This redirects to login_url with a continue parameter to return to
continue_url. The login_url should be on the canonical front-end server,
regardless of the host:port the user connected to.
Args:
application_url: The URL of the dev appserver domain
(e.g., 'http://localhost:8080').
continue_url: The URL to continue to after the user logs in.
start_response: A WSGI start_response function.
Returns:
An (empty) iterable over strings containing the body of the HTTP response.
"""
if AppDashboardHelper.USE_SHIBBOLETH:
redirect_url = '{0}:{1}/login?{2}={3}'.format(
AppDashboardHelper.SHIBBOLETH_CONNECTOR,
AppDashboardHelper.SHIBBOLETH_CONNECTOR_PORT,
CONTINUE_PARAM,
urllib.quote(continue_url)
)
else:
hostname = os.environ['NGINX_HOST']
redirect_url = 'https://{0}:{1}/login?{2}={3}'.format(
hostname,
DASHBOARD_HTTPS_PORT,
CONTINUE_PARAM,
urllib.quote(continue_url))
start_response('302 Requires login',
[('Location', redirect_url)])
return []
def fake_admin():
""" Generate the fake admin login secret
Returns:
A string containing the fake login secret
"""
return hashlib.sha1('{}/{}'.format(
os.environ.get('APPNAME', str(uuid.uuid4())),
os.environ.get('COOKIE_SECRET', str(uuid.uuid4())))).hexdigest()
application = webapp2.WSGIApplication([('/.*', Handler)], debug=True)
| 31.885387 | 80 | 0.689162 |
7cf8d4321937161cb10d000e0dbd87e721b04ad3 | 7,060 | py | Python | sdks/python/apache_beam/runners/portability/job_server.py | noah-goodrich/beam | 5a851b734f53206c20efe08d93d15760bbc15b0c | [
"Apache-2.0"
] | 1 | 2019-12-05T04:36:46.000Z | 2019-12-05T04:36:46.000Z | sdks/python/apache_beam/runners/portability/job_server.py | noah-goodrich/beam | 5a851b734f53206c20efe08d93d15760bbc15b0c | [
"Apache-2.0"
] | 14 | 2020-02-12T22:20:41.000Z | 2021-11-09T19:41:23.000Z | sdks/python/apache_beam/runners/portability/job_server.py | violalyu/beam | dd605e568d70b1a6ebea60c15b2aec3e240f3914 | [
"Apache-2.0"
] | 1 | 2021-03-21T23:28:23.000Z | 2021-03-21T23:28:23.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import atexit
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import grpc
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.runners.portability import local_job_service
from apache_beam.utils import subprocess_server
from apache_beam.version import __version__ as beam_version
class DockerizedJobServer(SubprocessJobServer):
"""
Spins up the JobServer in a docker container for local execution.
"""
| 33.619048 | 79 | 0.703258 |
7cf950bf294a91d7beefe8c59885eaed2c328e0e | 14,856 | py | Python | sympy/printing/pycode.py | tachycline/sympy | abf6fec12012852c7e6fae38461da9723cadc8b9 | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/pycode.py | tachycline/sympy | abf6fec12012852c7e6fae38461da9723cadc8b9 | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/pycode.py | tachycline/sympy | abf6fec12012852c7e6fae38461da9723cadc8b9 | [
"BSD-3-Clause"
] | null | null | null | from collections import defaultdict
from functools import wraps
from itertools import chain
from sympy.core import sympify
from .precedence import precedence
from .codeprinter import CodePrinter
_kw_py2and3 = {
'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif',
'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while',
'with', 'yield', 'None' # 'None' is actually not in Python 2's keyword.kwlist
}
_kw_only_py2 = {'exec', 'print'}
_kw_only_py3 = {'False', 'nonlocal', 'True'}
_known_functions = {
'Abs': 'abs',
}
_known_functions_math = {
'acos': 'acos',
'acosh': 'acosh',
'asin': 'asin',
'asinh': 'asinh',
'atan': 'atan',
'atan2': 'atan2',
'atanh': 'atanh',
'ceiling': 'ceil',
'cos': 'cos',
'cosh': 'cosh',
'erf': 'erf',
'erfc': 'erfc',
'exp': 'exp',
'expm1': 'expm1',
'factorial': 'factorial',
'floor': 'floor',
'gamma': 'gamma',
'hypot': 'hypot',
'loggamma': 'lgamma',
'log': 'log',
'log10': 'log10',
'log1p': 'log1p',
'log2': 'log2',
'sin': 'sin',
'sinh': 'sinh',
'Sqrt': 'sqrt',
'tan': 'tan',
'tanh': 'tanh'
} # Not used from ``math``: [copysign isclose isfinite isinf isnan ldexp frexp pow modf
# radians trunc fmod fsum gcd degrees fabs]
_known_constants_math = {
'Exp1': 'e',
'Pi': 'pi',
# Only in python >= 3.5:
# 'Infinity': 'inf',
# 'NaN': 'nan'
}
for k in PythonCodePrinter._kf:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_math:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_const)
_not_in_mpmath = 'log1p log2'.split()
_in_mpmath = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_mpmath]
_known_functions_mpmath = dict(_in_mpmath)
_known_constants_mpmath = {
'Pi': 'pi'
}
for k in MpmathPrinter._kf:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_mpmath:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_const)
_not_in_numpy = 'erf erfc factorial gamma lgamma'.split()
_in_numpy = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_numpy]
_known_functions_numpy = dict(_in_numpy, **{
'acos': 'arccos',
'acosh': 'arccosh',
'asin': 'arcsin',
'asinh': 'arcsinh',
'atan': 'arctan',
'atan2': 'arctan2',
'atanh': 'arctanh',
'exp2': 'exp2',
})
for k in NumPyPrinter._kf:
setattr(NumPyPrinter, '_print_%s' % k, _print_known_func)
for k in NumPyPrinter._kc:
setattr(NumPyPrinter, '_print_%s' % k, _print_known_const)
_known_functions_scipy_special = {
'erf': 'erf',
'erfc': 'erfc',
'gamma': 'gamma',
'loggamma': 'gammaln'
}
_known_constants_scipy_constants = {
'GoldenRatio': 'golden_ratio'
}
for k in SciPyPrinter._kf:
setattr(SciPyPrinter, '_print_%s' % k, _print_known_func)
for k in SciPyPrinter._kc:
setattr(SciPyPrinter, '_print_%s' % k, _print_known_const)
| 34.388889 | 119 | 0.601642 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.