seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
461701069
|
from django.contrib.auth.tokens import default_token_generator
from django.core import mail
from rest_framework import status, viewsets
from rest_framework.decorators import action, api_view
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework_simplejwt.tokens import RefreshToken
from api.permissions import IsAdmin
from api.serializers import UserSerializer
from api_yamdb.settings import ADMIN_EMAIL
from .models import User
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAuthenticated, IsAdmin, ]
http_method_names = ['get', 'post', 'patch', 'delete']
lookup_field = 'username'
@action(methods=['get', 'patch', ], detail=False, permission_classes=[IsAuthenticated, ])
def me(self, request):
user = User.objects.get(username=request.user.username)
if request.method == 'GET':
serializer = UserSerializer(user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
if request.method == 'PATCH':
serializer = UserSerializer(user, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(data=serializer.data, status=status.HTTP_200_OK)
return Response(data=serializer.errors, status=status.HTTP_200_OK)
@api_view(['POST', ])
def signup(request):
email = request.POST['email']
if not User.objects.filter(email=email).exists():
username = email.split('@')[0]
user = User.objects.create(username=username, email=email)
else:
user = User.objects.filter(email=email).first()
code = default_token_generator.make_token(user)
mail.send_mail(
subject='Your YaMDb confirmation code',
message=f'"confirmation_code": "{code}"',
from_email=ADMIN_EMAIL,
recipient_list=[email, ],
fail_silently=True
)
return Response(data={'email': email}, status=status.HTTP_200_OK)
@api_view(['POST', ])
def login(request):
email = request.POST['email']
confirmation_code = request.POST['confirmation_code']
user = User.objects.filter(email=email).first()
data = {'field_name': []}
if user is None:
data['field_name'].append('email')
if not default_token_generator.check_token(user, confirmation_code):
data['field_name'].append('confirmation_code')
if len(data['field_name']) != 0:
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
token = RefreshToken.for_user(user)
return Response(data={'token': str(token.access_token)}, status=status.HTTP_200_OK)
| null |
users/views.py
|
views.py
|
py
| 2,744 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.User.objects.all",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "api.serializers.UserSerializer",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticated",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "api.permissions.IsAdmin",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "api.serializers.UserSerializer",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "api.serializers.UserSerializer",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.action",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rest_framework.permissions.IsAuthenticated",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "models.User.objects.filter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "models.User.objects.create",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "models.User.objects.filter",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.tokens.default_token_generator.make_token",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.tokens.default_token_generator",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.core.mail.send_mail",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.core.mail",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "api_yamdb.settings.ADMIN_EMAIL",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "models.User.objects.filter",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.tokens.default_token_generator.check_token",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.tokens.default_token_generator",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "rest_framework_simplejwt.tokens.RefreshToken.for_user",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "rest_framework_simplejwt.tokens.RefreshToken",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 56,
"usage_type": "call"
}
] |
95814644
|
#---------------------------------------------------------------------
# Basic ePub reader written in Python, using wxPython for the GUI
# Author: Michael Stover
#
# Status: Work in Progress
# To-Do:
# - Add function that places images into memory
# - Add function for placing CSS files into memory
# - Add function that "fixes" links so that they point to memory
# - Re-add "Day/Night" mode
# - Add Text Size adjustments
# - Add Help/About
# - "Prettify" Toolbar icons
#---------------------------------------------------------------------
import os
import sys
import wx
import wx.html
import _worker
class HtmlWindow(wx.html.HtmlWindow):
'''Subclass of wx.html.HtmlWindow. Will allow for users to
click to internal chapter links and skip to desired chapter,
or open external links in default browser.'''
def __init__(self, parent, *arg, **kw):
'''Constructor.'''
wx.html.HtmlWindow.__init__(self, parent, *arg, **kw)
if "gtk2" in wx.PlatformInfo:
self.SetStandardFonts()
self.parent = parent
self.PrimeFrame = wx.GetTopLevelParent(self.parent)
def OnLinkClicked(self, link):
'''Override default behavior and perform basic check on
the link clicked. Attempt to load links in default browser.'''
clicked = link.GetHref()
if clicked.startswith('http') or clicked.startswith('www'):
wx.LaunchDefaultBrowser(link.GetHref())
else:
pass
class MainFrame(wx.Frame):
def __init__(self, parent=None, *arg, **kw):
'''Constructor for initialization of gui, variables and
MemoryFSHandler'''
super(MainFrame, self).__init__(parent, *arg, **kw)
self.panel = wx.Panel(self, -1)
self.epubFile = []
self.epubBookTitle = []
self.epubInfo = {}
self.theBook = []
self.epubImages = []
self.epubContents = []
self.epubChapters = []
self.epubBookmark = {}
self.nightMode = False
self.currentSection = 0
# Set the filesystem handler
wx.FileSystem.AddHandler(wx.MemoryFSHandler())
# Set up our htmlWin object
self.htmlWin = HtmlWindow(self.panel)
self.TopSizer = wx.BoxSizer(wx.VERTICAL)
self.TopSizer.Add(self.htmlWin, 1, wx.EXPAND|wx.ALL, 5)
self.panel.SetSizerAndFit(self.TopSizer)
self.SetSize((650,400))
self.onInitTB()
def onInitTB(self):
'''Initialize the Toolbar.'''
self.toolBar = self.CreateToolBar(
style=wx.TB_NODIVIDER
)
self.toolBar.AddSeparator()
toolOpen = self.toolBar.AddLabelTool(wx.ID_ANY, 'Open Book',
wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN))
self.toolBar.AddSeparator()
self.toolBar.AddSeparator()
toolHome = self.toolBar.AddLabelTool(wx.ID_ANY, 'Front Page',
wx.ArtProvider.GetBitmap(wx.ART_GO_HOME))
self.toolBar.AddSeparator()
toolBack = self.toolBar.AddLabelTool(wx.ID_ANY, 'Back',
wx.ArtProvider.GetBitmap(wx.ART_GO_BACK))
toolNext = self.toolBar.AddLabelTool(wx.ID_ANY, 'Next',
wx.ArtProvider.GetBitmap(wx.ART_GO_FORWARD))
self.toolBar.AddStretchableSpace()
self.toolBar.Realize()
self.Bind(wx.EVT_TOOL, self.onSelectEpub, toolOpen)
self.Bind(wx.EVT_TOOL, self.onPageHome, toolHome)
self.Bind(wx.EVT_TOOL, self.onPageNext, toolNext)
self.Bind(wx.EVT_TOOL, self.onPageBack, toolBack)
def OnReset(self):
'''Reset variables for next book, includes removing
previous images from memory.'''
for image in self.epubImages:
wx.MemoryFSHandler.RemoveFile(image)
self.epubChapters = []
self.currentSection = 0
def onSelectEpub(self, event):
'''Open FileDialog to select epub file.'''
wildcard = 'ePub File (*.epub)|*.epub'
dlg = wx.FileDialog(self, 'Choose a file',
'', '', wildcard, wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.OnReset()
self.epubFile = os.path.join(
dlg.GetDirectory(),
dlg.GetFilename()
)
self.onLoadEpub(self.epubFile)
dlg.Destroy()
def onLoadEpub(self, inFile):
'''Function to open epub and run various processing
functions on it.'''
self.theBook = _worker.open_epub(inFile)
content_soup = _worker.get_epub_content_soup(self.theBook)
self.epubImages, self.epubText, epubCss = _worker.get_epub_content_lists(content_soup)
self.epubInfo = _worker.get_epub_general_info(content_soup)
self.onLoadImgsToMem(self.theBook)
self.epubBookTitle = self.epubInfo['title']
self.SetTitle('Reading: %s, by: %s' % (self.epubBookTitle, self.epubInfo['creator']))
for chapter in self.epubText:
raw_page = _worker.get_epub_section(self.theBook, chapter)
new_page = _worker.clean_convert_links(raw_page)
self.epubChapters.append(new_page)
self.onLoadPages()
def onLoadImgsToMem(self, epub_file):
for image in self.epubImages:
im_img = _worker.preprocess_image(epub_file, image)
new_image = wx.EmptyImage(im_img.size[0],im_img.size[1])
new_image.SetData(im_img.convert("RGB").tostring())
finalImg = wx.BitmapFromImage(new_image)
wx.MemoryFSHandler.AddFile(image, finalImg, wx.BITMAP_TYPE_BMP)
def onLoadPages(self):
'''Load page into htmlWin.'''
self.htmlWin.SetPage(self.epubChapters[0])
def onPageHome(self, event):
'''Sets the current page back to the beginning of the book.'''
self.currentSection = 0
content = self.epubChapters[self.currentSection]
self.htmlWin.SetPage(content)
def onPageNext(self, event):
'''Change to next ePub section / chapter. This needs completed.'''
if self.currentSection < len(self.epubText)-1:
self.currentSection += 1
content = self.epubChapters[self.currentSection]
self.htmlWin.SetPage(content)
else:
dlg = wx.MessageBox('We cannot go further than the end of the book!',
'ERROR: End of the line!', wx.OK|wx.ICON_HAND)
event.Skip()
def onPageBack(self, event):
'''Change to previous ePub section / chapter. This needs completed.'''
if self.currentSection > 0:
self.currentSection -= 1
content = self.epubChapters[self.currentSection]
self.htmlWin.SetPage(content)
else:
dlg = wx.MessageBox('We cannot go further than the start of the book!',
'ERROR: Front Page Reached!', wx.OK|wx.ICON_HAND)
event.Skip()
def onQuit(self, event):
'''Close the application'''
self.Close()
def onLoadCSSToMem(self, epub_file):
'''Load CCS pages to memory.'''
# Needs implemented
pass
def RunApp():
'''Initialize wxApp, set primary frame and run MainLoop'''
app = wx.App(False)
frame = MainFrame(title='ePub Reader')
frame.Show()
app.MainLoop()
if __name__ == '__main__':
''''''
RunApp()
| null |
eReader.py
|
eReader.py
|
py
| 7,332 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "wx.html",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "wx.html.HtmlWindow.__init__",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "wx.html",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "wx.PlatformInfo",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "wx.GetTopLevelParent",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "wx.LaunchDefaultBrowser",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "wx.Frame",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "wx.Panel",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "wx.FileSystem.AddHandler",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "wx.FileSystem",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "wx.MemoryFSHandler",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "wx.BoxSizer",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "wx.VERTICAL",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "wx.EXPAND",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "wx.ALL",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "wx.TB_NODIVIDER",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_ANY",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "wx.ArtProvider.GetBitmap",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "wx.ArtProvider",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "wx.ART_FILE_OPEN",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_ANY",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "wx.ArtProvider.GetBitmap",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "wx.ArtProvider",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "wx.ART_GO_HOME",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_ANY",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "wx.ArtProvider.GetBitmap",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "wx.ArtProvider",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "wx.ART_GO_BACK",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_ANY",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "wx.ArtProvider.GetBitmap",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "wx.ArtProvider",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "wx.ART_GO_FORWARD",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_TOOL",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_TOOL",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_TOOL",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_TOOL",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "wx.MemoryFSHandler.RemoveFile",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "wx.MemoryFSHandler",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "wx.FileDialog",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "wx.FD_OPEN",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_OK",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "_worker.open_epub",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "_worker.get_epub_content_soup",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "_worker.get_epub_content_lists",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "_worker.get_epub_general_info",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "_worker.get_epub_section",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "_worker.clean_convert_links",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "_worker.preprocess_image",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "wx.EmptyImage",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "wx.BitmapFromImage",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "wx.MemoryFSHandler.AddFile",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "wx.MemoryFSHandler",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "wx.BITMAP_TYPE_BMP",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "wx.MessageBox",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "wx.OK",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "wx.ICON_HAND",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "wx.MessageBox",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "wx.OK",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "wx.ICON_HAND",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "wx.App",
"line_number": 202,
"usage_type": "call"
}
] |
166665161
|
import rubrik_oracle_module as rbk
import click
import datetime
import pytz
import json
@click.command()
@click.argument('host_cluster_db')
@click.argument('target_host')
@click.option('--time_restore', '-t', type=str, help='Point in time to mount the DB, iso format is YY:MM:DDTHH:MM:SS example 2019-01-01T20:30:15')
def cli(host_cluster_db, target_host, time_restore):
"""Live mount a Rubrik Oracle Backup.
\b
Gets the backup for the Oracle database on the Oracle database host and will live mount it on the host provided.
\b
Args:
host_cluster_db (str): The hostname the database is running on : The database name
target_host (str): The host to live mount the database. (Must be a compatible Oracle host on Rubrik)
time_restore: The point in time for the live mount iso 8601 format (2019-04-30T18:23:21)
\b
Returns:
live_mount_info (json); JSON text file with the Rubrik cluster response to the live mount request
"""
rubrik = rbk.connect_rubrik()
cluster_info = rbk.get_cluster_info(rubrik)
timezone = cluster_info['timezone']['timezone']
print("Connected to cluster: {}, version: {}, Timezone: {}.".format(cluster_info['name'], cluster_info['version'], timezone))
host_cluster_db = host_cluster_db.split(":")
oracle_db_id = rbk.get_oracle_db_id(rubrik, host_cluster_db[1], host_cluster_db[0])
oracle_db_info = rbk.get_oracle_db_info(rubrik, oracle_db_id)
# If source DB is RAC then the target for the live mount must be a RAC cluster
if 'racName' in oracle_db_info.keys():
if oracle_db_info['racName']:
host_id = rbk.get_rac_id(rubrik, cluster_info['id'], target_host)
else:
host_id = rbk.get_host_id(rubrik, cluster_info['id'], target_host)
if time_restore:
time_ms = rbk.epoch_time(time_restore, timezone)
print("Using {} for mount.". format(time_restore))
else:
print("Using most recent recovery point for mount.")
oracle_db_info = rbk.get_oracle_db_info(rubrik, oracle_db_id)
time_ms = rbk.epoch_time(oracle_db_info['latestRecoveryPoint'], timezone)
print("Starting Live Mount of {} on {}.".format(host_cluster_db[1], target_host))
live_mount_info = rbk.live_mount(rubrik, oracle_db_id, host_id, time_ms)
# Set the time format for the printed result
cluster_timezone = pytz.timezone(timezone)
utc = pytz.utc
start_time = utc.localize(datetime.datetime.fromisoformat(live_mount_info['startTime'][:-1])).astimezone(cluster_timezone)
fmt = '%Y-%m-%d %H:%M:%S %Z'
print("Live mount status: {}, Started at {}.".format(live_mount_info['status'], start_time.strftime(fmt)))
return json.dumps(live_mount_info)
if __name__ == "__main__":
cli()
| null |
rubrik_oracle_db_mount.py
|
rubrik_oracle_db_mount.py
|
py
| 2,757 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rubrik_oracle_module.connect_rubrik",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "rubrik_oracle_module.get_cluster_info",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "rubrik_oracle_module.get_oracle_db_id",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rubrik_oracle_module.get_oracle_db_info",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "rubrik_oracle_module.get_rac_id",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "rubrik_oracle_module.get_host_id",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "rubrik_oracle_module.epoch_time",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "rubrik_oracle_module.get_oracle_db_info",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "rubrik_oracle_module.epoch_time",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "rubrik_oracle_module.live_mount",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pytz.timezone",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.fromisoformat",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "click.command",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 11,
"usage_type": "call"
}
] |
645904433
|
# -*- coding: utf-8 -*-
from django.contrib.auth import login,logout,authenticate,get_user_model
from django.views.decorators.http import require_POST
from django.http import HttpResponse,JsonResponse
from .forms import LoginForm,RegisterForm,ChangeUserForm
from utils import restful
from django.shortcuts import render,reverse,redirect
from utils.captcha.xfzcaptcha import Captcha
from io import BytesIO
from django.core.cache import cache
from utils import smssender
import random
#from .models import User
User = get_user_model()#跟导入上面的方式一样
@require_POST
def login_view(request):
form = LoginForm(request.POST)
if form.is_valid():
telephone = form.cleaned_data.get('telephone')
password = form.cleaned_data.get('password')
remember = form.cleaned_data.get('remember')
user1 = authenticate(request,telephone=telephone,password=password)#验证是否存在该用户
if user1:
if user1.is_active:
login(request,user1)
if remember:
request.session.set_expiry(None)
else:
request.session.set_expiry(0)
return restful.ok()
else:
return restful.unauth(message="您的账号已经冻结")
else:
return restful.params_error(message="您的账号或者密码错误")
else:
errors = form.get_errors()
return restful.params_error(message=errors)
def logout_view(request):
#退出登录
logout(request)
#重定向到index页面
return redirect(reverse('index'))
@require_POST
def register(request):
form = RegisterForm(request.POST)
if form.is_valid():
telephone = form.cleaned_data.get('telephone')
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = User.objects.create_user(telephone=telephone,username=username,password=password)
login(request,user)
return restful.ok()
else:
print(form.get_errors())
return restful.params_error(message="注册失败")
#验证码
def img_captcha(request):
text,image = Captcha.gene_code()
#BytesIO:相当于一个管道,用来存储图片
out = BytesIO()
# 调用image的save方法,将这个image对象保存到BytesIO中
image.save(out, 'png')
# 将BytesIO的文件指针移动到最开始的位置
out.seek(0)
response = HttpResponse(content_type='image/png')
# 从BytesIO的管道中,读取出图片数据,保存到response对象上
response.write(out.read())
response['Content-length'] = out.tell()#获取文件长度
# 12Df:12Df.lower()存储在cache中
cache.set(text.lower(), text.lower(), 5 * 60)
return response
def sms_captcha(request):
telephone = request.GET.get('telephone')
#获取一个随机验证码
# code = Captcha.gene_text()
code1 = random.randint(1000,9999)
code = str(code1)
# print(code)
# code+=''
print(type(code))
#将telephone,code存到memcached,过期时间5分钟
cache.set(telephone,code,5*60)
print("验证码",code)
# return restful.ok()
result = smssender.send(telephone,code)
if result:
return restful.ok()
else:
return restful.params_error(message="短信验证码失败")
def cache_test(request):
cache.set("username",'sunyabo',5*60)
result = cache.get("username")
print(result)
return HttpResponse("success")
def save_person(request):
form = ChangeUserForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
telephone = form.cleaned_data.get('telephone')
email = form.cleaned_data.get('email')
#uid = form.cleaned_data.get('uid')
user1 = request.user#获取当前用户
user1.email = email
user1.username = username
user1.telephone = telephone
user1.save()
return restful.ok()
else:
return restful.params_error(message="保存失败")
| null |
apps/xfzauth/views.py
|
views.py
|
py
| 4,091 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "forms.LoginForm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "utils.restful.ok",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "utils.restful.unauth",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "utils.restful.params_error",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "utils.restful.params_error",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.http.require_POST",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.logout",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.reverse",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "forms.RegisterForm",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "utils.restful.ok",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "utils.restful.params_error",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.http.require_POST",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "utils.captcha.xfzcaptcha.Captcha.gene_code",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "utils.captcha.xfzcaptcha.Captcha",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache.set",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache.set",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "utils.smssender.send",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "utils.smssender",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "utils.restful.ok",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "utils.restful.params_error",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "django.core.cache.cache.set",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "django.core.cache.cache.get",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "forms.ChangeUserForm",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "utils.restful.ok",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "utils.restful.params_error",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "utils.restful",
"line_number": 122,
"usage_type": "name"
}
] |
56017380
|
import os
import random
import cv2
import numpy as np
from mobilenetv2 import train_L2
import test_Predict_keras
from test_Predict_tflite import tflite_predition
import argparse
''' This project utilize to classify the automative spare parts images whether spare part is "Healthy" or "Defected".
Main logics is in the mobilenetv2 file, where you can further customize.
'''
def load_training_data(root_dir,classes_dir):
healthy_samples= []
defective_samples= []
for cls in classes_dir:
src = root_dir + cls # Folder to copy images from
train_FileNames = os.listdir(src)
print(cls)
if cls=='/YE358311_defects':
defective_samples=[src+'/'+ name for name in train_FileNames]
print('Images in defective set are: ', len(defective_samples))
else:
healthy_samples = [src+'/'+ name for name in train_FileNames]
print('Images in healthy set are: ', len(healthy_samples))
return defective_samples,healthy_samples
def data_preprocessing(image_path,image_width,image_height):
"""
Pre-process the data- alligning, and resizing the orignal images
"""
image=cv2.imread(image_path)
image = np.array(image, dtype=np.uint8)
#Rotating if image is vertical
if image.shape[1]<image.shape[0]:
image = np.rot90(image)
image=cv2.resize(image, (image_width,image_height), interpolation=cv2.INTER_CUBIC)
# cv2.imwrite(image_path,image)
return image
def training_data_preparation(defective_samples,healthy_samples,image_width,image_height):
#Label the dataset- Healthy and Defective
y=[]
x=[]
for image in defective_samples:
x.append(data_preprocessing(image,image_width,image_height))
y.append(1)
print("defective_samples is ",len(x),len(defective_samples))
for image in healthy_samples:
x.append(data_preprocessing(image,image_width,image_height))
y.append(0)
print("healthy_samples is ",len(x))
# Shuffle the data samples randomlly and the order of samples and label is remaince same
data = list(zip(x, y))
random.shuffle(data)
x_rand, y_rand = zip(*data)
#list to array
x_train = np.array(x_rand)
y_train = np.array(y_rand)
x_data_size=x_train.shape
y_data_size=y_train.shape
print("Shape of training dataset is",x_data_size,y_data_size)
return x_train,y_train
def main():
###****** This is the main function of Defect-detection-in-automative-parts project
ap = argparse.ArgumentParser()
ap.add_argument("--opMode", "-mode", default='Test',
help="opertation mode test or trained")
ap.add_argument("--Data_Directory", "-datadir", default='dataset/YE358311_Fender_apron',
help="Path to test data")
ap.add_argument("--classes_dir", "-classes_dir", default=['/YE358311_defects', '/YE358311_Healthy'],
help="Different data class")
ap.add_argument("--classes", "-no_classes", default=2,
help="number of classes")
ap.add_argument("--epoch", "-no_iterations", default=5,
help="number of epoch for learning model")
ap.add_argument("--alpha", "-alpha", default=0.5,
help="alpha for learning model")
ap.add_argument("--batch", "-batchsize", default=8,
help="number of images in one batch")
args = vars(ap.parse_args())
trained_models = 'Trained_model'
Keras_model='MobileNet_model_keras.h5'
Tf_model = 'model.pb'
tflite_model='converted_model.tflite'
image_width = 224
image_height = 224
chanel=3
input_shape = (image_width, image_height, chanel)
data_dir = args["Data_Directory"]
classes_dir = args['classes_dir']
no_of_classes= args['classes']
epoch = args['epoch']
alpha = args['alpha']
batch = args['batch']
opMode = args["opMode"]
###***************Train deep learnig model****************************##
if opMode == 'Train':
training_data_dir=data_dir+'/train'
train_defective_samples,train_healthy_samples=load_training_data(training_data_dir,classes_dir)
x_train,y_train=training_data_preparation(train_defective_samples,train_healthy_samples,image_width,image_height)
print("L2-SoftmaxLoss training...")
train_L2(x_train, y_train, no_of_classes,input_shape, epoch, alpha,batch, True, True, trained_models, Keras_model, Tf_model, tflite_model)
###***************Test the model performance on test Dataset (unseen samples)****************************##
elif opMode == 'Test':
test_data_dir=data_dir+'/test'
test_defective_samples,test_healthy_samples=load_training_data(test_data_dir,classes_dir)
x_test,y_test=training_data_preparation(test_defective_samples,test_healthy_samples,image_width,image_height)
###***************Classify images using Keras model****************************##
# test_Predict_keras.predict_images_labels(x_test,y_test)
###***************Classify images using TFLite model****************************##
tflite_predition(x_test,y_test,trained_models,tflite_model)
if __name__== "__main__":
main()
| null |
main.py
|
main.py
|
py
| 5,149 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.rot90",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "random.shuffle",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "mobilenetv2.train_L2",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "test_Predict_tflite.tflite_predition",
"line_number": 117,
"usage_type": "call"
}
] |
78972637
|
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import UserManager as AuthUserManager
from django.contrib.auth.signals import user_logged_in
from django.core.mail import send_mail
from django.db.models.signals import post_save
from django.db import models
class UserManager(AuthUserManager):
def create_superuser(self, username, email, password, **extra_fields):
extra_fields.setdefault('sex', 'm')
return super().create_superuser(username, email, password, **extra_fields)
class User(AbstractUser):
sex = models.CharField(
max_length=1,
choices=(
('f', 'female'),
('m', 'male'),
))
score = models.CharField(default="", max_length=100, null=True, blank=True)
objects = UserManager()
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
usr_id = models.TextField(blank=True)
#website_url = models.URLField(blank=True)
#score = models.CharField(default="", max_length=100, null=True, blank=True)
def on_post_save_for_user(sender, **kwargs):
if kwargs['created']:
# 가입시기
user = kwargs['instance']
# 환영 이메일 보내기
send_mail(
'환영합니다.',
'Here is the message.',
'[email protected]',
[user.email],
fail_silently=False,
)
post_save.connect(on_post_save_for_user, sender=settings.AUTH_USER_MODEL)
class UserSession(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, editable=False)
session_key = models.CharField(max_length=40, editable=False)
created_at = models.DateTimeField(auto_now_add=True)
def on_user_logged_in(sender, request, user, **kwargs):
user.is_user_logged_in = True
user_logged_in.connect(on_user_logged_in)
| null |
models.py
|
models.py
|
py
| 1,966 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.auth.models.UserManager",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.TextField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.core.mail.send_mail",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save.connect",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.CharField",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.signals.user_logged_in.connect",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.signals.user_logged_in",
"line_number": 63,
"usage_type": "name"
}
] |
218548589
|
import sqlite3
# Setting up the database
def connect():
con = sqlite3.connect("movies.db") # reaching for 'movies' database using sqlite3.connection
cur = con.cursor() # using .cursor we can write commands in sql to our database
cur.execute("CREATE TABLE IF NOT EXISTS movies (ID INTEGER PRIMARY KEY, Title TEXT, Year INTEGER, Director TEXT, Length FLOAT, IMN INTEGER)")
con.commit() # submiting changes
con.close()
# Adding a new row to database
def add(t, y, d, l, id):
con = sqlite3.connect("movies.db")
cur = con.cursor()
cur.execute("INSERT INTO movies VALUES (NULL,?,?,?,?,?)", (t, y, d, l, id))
# our NULL value is for the ID column in movies database,
# we set it to null because its set as a PRIMARY KEY whose value is beeing generated automaticaly
con.commit()
con.close()
# Deleteing a row from database by its ID
def delete(id):
con = sqlite3.connect("movies.db")
cur = con.cursor()
cur.execute("DELETE FROM movies WHERE ID=?",(id,))
con.commit()
con.close()
# Updateing an existing row in the database and assigning new values to it
def update(ID, Title, Year, Director, Length, IMN):
con = sqlite3.connect("movies.db")
cur = con.cursor()
cur.execute("UPDATE movies SET Title=?, Year=?, Director=?, Length=?, IMN=? WHERE ID=?", (Title,Year,Director,Length,IMN,ID))
con.commit()
con.close()
# Printing out all the rows in the database
# no need to commit cause we didnt make no changes
def view_all():
con = sqlite3.connect("movies.db")
cur = con.cursor()
cur.execute("SELECT * FROM movies")
rows = cur.fetchall() # we'll need to store the database rows in a list so we can print it later
con.close()
return rows
# Search for a movie by its title
# seach a title in the database by lowercase value - makes it easy to search a title without beeing case sensitive...
def search(t):
con = sqlite3.connect("movies.db")
cur = con.cursor()
cur.execute("SELECT * FROM movies WHERE LOWER(Title)=lower(?)",(t,))
rows=cur.fetchall()
con.close()
return rows
| null |
Application 5 - Desktop Database App/Movies/app5_backend.py
|
app5_backend.py
|
py
| 2,150 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 50,
"usage_type": "call"
}
] |
603776291
|
import sys
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
def check_events(ai_settings, screen, stats, play_button, ship, aliens,
bullets,sb):
"""响应按键和鼠标事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sb.dump_file()
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event,ai_settings, screen, stats, ship, aliens, bullets,sb)
elif event.type == pygame.MOUSEBUTTONDOWN:#检测单击按钮事件
check_mousebuttondown(ai_settings, screen, stats, play_button,ship,
aliens, bullets,sb)
def check_mousebuttondown(ai_settings, screen, stats, play_button,ship,
aliens, bullets,sb):
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, play_button,
ship, aliens, bullets,sb, mouse_x, mouse_y)
check_history_button(stats, play_button,sb, mouse_x, mouse_y)
check_reset_button(stats, play_button,sb, mouse_x, mouse_y)
check_return_button(stats, play_button, mouse_x, mouse_y)
def check_keydown_events(event, ai_settings, screen, ship, bullets):
"""响应按键"""
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_DOWN:
ship.moving_down = True
elif event.key == pygame.K_UP:
ship.moving_up = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
def check_keyup_events(event,ai_settings, screen, stats, ship, aliens, bullets,sb):
"""响应松开"""
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
elif event.key == pygame.K_UP:
ship.moving_up = False
elif event.key == pygame.K_DOWN:
ship.moving_down = False
elif event.key == pygame.K_ESCAPE:
sb.dump_file()
sys.exit()
elif event.key == pygame.K_p:
start_game(ai_settings, screen, stats, ship, aliens, bullets,sb)
def start_game(ai_settings, screen, stats, ship, aliens, bullets,sb):
# 重置游戏设置
ai_settings.initialize_dynamic_settings()
# 隐藏光标
pygame.mouse.set_visible(False)
# 重置游戏统计信息
stats.reset_stats()
stats.game_active = True
# 重置记分牌图像
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
# 清空外星人列表和子弹列表
aliens.empty()
bullets.empty()
# 创建一群新的外星人,并让飞船居中
create_alien(ai_settings,screen,aliens,stats)
ship.center_ship()
def check_play_button(ai_settings, screen, stats, play_button, ship, aliens, bullets,sb, mouse_x, mouse_y):
"""在玩家单击Play按钮时开始新游戏"""
button_clicked = play_button.play_rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active and not stats.history_button:
start_game(ai_settings, screen, stats, ship, aliens, bullets,sb)
def check_history_button(stats, play_button,sb,mouse_x, mouse_y):
button_clicked = play_button.history_rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
stats.history_button = True
sb.load_file()
def check_return_button(stats, play_button, mouse_x, mouse_y):
button_clicked = play_button.return_rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active and stats.history_button:
stats.history_button = False
def check_reset_button(stats, play_button,sb, mouse_x, mouse_y):
button_clicked = play_button.reset_rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active and stats.history_button:
sb.clearly_ranking()
#sb.load_file()
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""检查是否有外星人到达了屏幕底端"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
aliens.remove(alien)
break
def update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""更新整群外星人的位置"""
aliens.update()
# 检测外星人和飞船之间的碰撞
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
# 检查是否有外星人到达屏幕底端
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""响应被外星人撞到的飞船"""
if stats.ships_left > 0:
# 将ships_left减1
stats.ships_left -= 1
# 更新记分牌
sb.prep_ships()
else:
stats.game_active = False
pygame.mouse.set_visible(True)#显示鼠标光标
# 清空外星人列表和子弹列表
aliens.empty()
bullets.empty()
# 创建一群新的外星人,并将飞船放到屏幕底端中央
create_alien(ai_settings,screen,aliens,stats)
ship.center_ship()
# 暂停
sleep(0.5)
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets,
play_button):
screen.fill(ai_settings.bg_color)#这个方法只接受一个实参:一种颜色
# 在飞船和外星人后面重绘所有子弹
for bullet in bullets.sprites():
bullet.draw_bullet()
#让飞船出现在屏幕
ship.blitme()
aliens.draw(screen)
#alien.blitme()
# 显示得分
if stats.history_button:#如果历史按钮活动状态
play_button.draw_score_ranking()
sb.history_score()
sb.show_score()
# 如果游戏处于非活动状态,就绘制Play按钮
if not stats.game_active and not stats.history_button:
play_button.draw_button()
# 让最近绘制的屏幕可见
pygame.display.flip() #不断更新屏幕,以显示元素的新位置
def fire_bullet(ai_settings,screen,ship,bullets):
if len(bullets) < ai_settings.bullets_allowed:
"""如果还没有到达限制,就发射一颗子弹"""
# 创建一颗子弹,并将其加入到编组bullets中
new_bullet = Bullet(ai_settings,screen, ship)
bullets.add(new_bullet)
def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""更新子弹的位置,并删除已消失的子弹"""
# 更新子弹的位置
bullets.update()
# 删除已消失的子弹
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship,
aliens, bullets)
def start_new_level(stats,sb,ai_settings, ship,screen, aliens):
if stats.beat_alien*ai_settings.speedup_scale+stats.score_alien<=stats.score:
# 如果到达等级条件,就提高一个等级
#bullets.empty()
stats.beat_alien += stats.beat_alien*ai_settings.speedup_scale#增加提高等级条件
ai_settings.increase_speed()
# 提高等级
stats.level += 1
sb.prep_level()
# create_fleet(ai_settings, ship,screen, aliens)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship,
aliens, bullets):
"""响应子弹和外星人的碰撞"""
# 删除发生碰撞的子弹和外星人
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
#if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
start_new_level(stats,sb,ai_settings, ship,screen, aliens)
def check_high_score(stats, sb):
"""检查是否诞生了新的最高得分"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
def create_alien(ai_settings,screen,aliens,stats):
"""创建一个外星人并将其放在当前行"""
alien = Alien(ai_settings,screen)
if len(aliens) < 3+int(stats.level/2):
aliens.add(alien)
| null |
game_functions.py
|
game_functions.py
|
py
| 7,629 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.event.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYUP",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_ESCAPE",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pygame.K_p",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_visible",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "alien.rect",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.spritecollideany",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_visible",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "bullet.draw_bullet",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pygame.display.flip",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "bullet.Bullet",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "bullet.rect",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "alien.Alien",
"line_number": 200,
"usage_type": "call"
}
] |
298704684
|
#!/usr/bin/env python
'''
@author Luke Campbell <[email protected]>
@file ion/processes/data/replay/replay_process.py
@date 06/14/12 13:31
@description Implementation for a replay process.
'''
from pyon.core.exception import BadRequest, NotFound
from pyon.core.object import IonObjectDeserializer
from pyon.core.bootstrap import get_obj_registry
from pyon.datastore.datastore import DataStore
from pyon.util.arg_check import validate_is_instance
from pyon.util.log import log
from ion.services.dm.inventory.dataset_management_service import DatasetManagementService
from ion.services.dm.utility.granule import RecordDictionaryTool
from interface.services.dm.idataset_management_service import DatasetManagementServiceProcessClient, DatasetManagementServiceClient
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceProcessClient
from interface.services.dm.ireplay_process import BaseReplayProcess
from gevent.event import Event
from numbers import Number
import datetime
import dateutil.parser
import gevent
import netCDF4
import numpy as np
import time
class ReplayProcess(BaseReplayProcess):
'''
ReplayProcess - A process spawned for the purpose of replaying data
--------------------------------------------------------------------------------
Configurations
==============
process:
dataset_id: "" # Dataset to be replayed
delivery_format: {} # Delivery format to be replayed back (unused for now)
query:
start_time: 0 # Start time (index value) to be replayed
end_time: 0 # End time (index value) to be replayed
parameters: [] # List of parameters to form in the granule
'''
process_type = 'standalone'
publish_limit = 10
dataset_id = None
delivery_format = {}
start_time = None
end_time = None
stride_time = None
parameters = None
stream_id = ''
stream_def_id = ''
def __init__(self, *args, **kwargs):
super(ReplayProcess,self).__init__(*args,**kwargs)
self.deserializer = IonObjectDeserializer(obj_registry=get_obj_registry())
self.publishing = Event()
self.play = Event()
self.end = Event()
def on_start(self):
'''
Starts the process
'''
log.info('Replay Process Started')
super(ReplayProcess,self).on_start()
dsm_cli = DatasetManagementServiceProcessClient(process=self)
pubsub = PubsubManagementServiceProcessClient(process=self)
self.dataset_id = self.CFG.get_safe('process.dataset_id', None)
self.delivery_format = self.CFG.get_safe('process.delivery_format',{})
self.start_time = self.CFG.get_safe('process.query.start_time', None)
self.end_time = self.CFG.get_safe('process.query.end_time', None)
self.stride_time = self.CFG.get_safe('process.query.stride_time', None)
self.parameters = self.CFG.get_safe('process.query.parameters',None)
self.publish_limit = self.CFG.get_safe('process.query.publish_limit', 10)
self.tdoa = self.CFG.get_safe('process.query.tdoa',None)
self.stream_id = self.CFG.get_safe('process.publish_streams.output', '')
self.stream_def = pubsub.read_stream_definition(stream_id=self.stream_id)
self.stream_def_id = self.stream_def._id
self.publishing.clear()
self.play.set()
self.end.clear()
if self.dataset_id is None:
raise BadRequest('dataset_id not specified')
self.dataset = dsm_cli.read_dataset(self.dataset_id)
self.pubsub = PubsubManagementServiceProcessClient(process=self)
@classmethod
def _coverage_to_granule(cls, coverage, start_time=None, end_time=None, stride_time=None, parameters=None, stream_def_id=None, tdoa=None):
slice_ = slice(None) # Defaults to all values
if tdoa is not None and isinstance(tdoa,slice):
slice_ = tdoa
elif stride_time is not None:
validate_is_instance(start_time, Number, 'start_time must be a number for striding.')
validate_is_instance(end_time, Number, 'end_time must be a number for striding.')
validate_is_instance(stride_time, Number, 'stride_time must be a number for striding.')
ugly_range = np.arange(start_time, end_time, stride_time)
idx_values = [cls.get_relative_time(coverage,i) for i in ugly_range]
idx_values = list(set(idx_values)) # Removing duplicates
slice_ = [idx_values]
elif not (start_time is None and end_time is None):
time_var = coverage.temporal_parameter_name
uom = coverage.get_parameter_context(time_var).uom
if start_time is not None:
start_units = cls.ts_to_units(uom,start_time)
log.info('Units: %s', start_units)
start_idx = cls.get_relative_time(coverage,start_units)
log.info('Start Index: %s', start_idx)
start_time = start_idx
if end_time is not None:
end_units = cls.ts_to_units(uom,end_time)
log.info('End units: %s', end_units)
end_idx = cls.get_relative_time(coverage,end_units)
log.info('End index: %s', end_idx)
end_time = end_idx
slice_ = slice(start_time,end_time,stride_time)
log.info('Slice: %s', slice_)
if stream_def_id:
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
else:
rdt = RecordDictionaryTool(param_dictionary=coverage.parameter_dictionary)
if parameters is not None:
fields = list(set(parameters).intersection(rdt.fields))
else:
fields = rdt.fields
for field in fields:
log.info( 'Slice is %s' , slice_)
rdt[field] = coverage.get_parameter_values(field, tdoa=slice_)
return rdt
def execute_retrieve(self):
'''
execute_retrieve Executes a retrieval and returns the result
as a value in lieu of publishing it on a stream
'''
try:
coverage = DatasetManagementService._get_coverage(self.dataset_id)
rdt = self._coverage_to_granule(coverage,self.start_time, self.end_time, self.stride_time, self.parameters,tdoa=self.tdoa)
coverage.close(timeout=5)
except Exception as e:
import traceback
traceback.print_exc(e)
raise BadRequest('Problems reading from the coverage')
return rdt.to_granule()
def execute_replay(self):
'''
execute_replay Performs a replay and publishes the results on a stream.
'''
if self.publishing.is_set():
return False
gevent.spawn(self.replay)
return True
def replay(self):
self.publishing.set() # Minimal state, supposed to prevent two instances of the same process from replaying on the same stream
for rdt in self._replay():
if self.end.is_set():
return
self.play.wait()
self.output.publish(rdt.to_granule())
self.publishing.clear()
return
def pause(self):
self.play.clear()
def resume(self):
self.play.set()
def stop(self):
self.end.set()
@classmethod
def get_last_granule(cls, container, dataset_id):
dsm_cli = DatasetManagementServiceClient()
dataset = dsm_cli.read_dataset(dataset_id)
cc = container
datastore_name = dataset.datastore_name
view_name = dataset.view_name
datastore = cc.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
opts = dict(
start_key = [dataset_id, {}],
end_key = [dataset_id, 0],
descending = True,
limit = 1,
include_docs = True
)
results = datastore.query_view(view_name,opts=opts)
if not results:
raise NotFound('A granule could not be located.')
if results[0] is None:
raise NotFound('A granule could not be located.')
doc = results[0].get('doc')
if doc is None:
return None
ts = float(doc.get('ts_create',0))
coverage = DatasetManagementService._get_coverage(dataset_id)
rdt = cls._coverage_to_granule(coverage,start_time=ts, end_time=None)
coverage.close(timeout=5)
return rdt.to_granule()
@classmethod
def get_last_values(cls, dataset_id, number_of_points):
coverage = DatasetManagementService._get_coverage(dataset_id)
if coverage.num_timesteps < number_of_points:
if coverage.num_timesteps == 0:
rdt = RecordDictionaryTool(param_dictionary=coverage.parameter_dictionary)
return rdt.to_granule()
number_of_points = coverage.num_timesteps
rdt = cls._coverage_to_granule(coverage,tdoa=slice(-number_of_points,None))
coverage.close(timeout=5)
return rdt.to_granule()
def _replay(self):
coverage = DatasetManagementService._get_coverage(self.dataset_id)
rdt = self._coverage_to_granule(coverage, self.start_time, self.end_time, self.stride_time, self.parameters, self.stream_def_id)
elements = len(rdt)
for i in xrange(elements / self.publish_limit):
outgoing = RecordDictionaryTool(stream_definition_id=self.stream_def_id)
fields = self.parameters or outgoing.fields
for field in fields:
outgoing[field] = rdt[field][(i*self.publish_limit) : ((i+1)*self.publish_limit)]
yield outgoing
coverage.close(timeout=5)
return
@classmethod
def get_relative_time(cls, coverage, time):
'''
Determines the relative time in the coverage model based on a given time
The time must match the coverage's time units
'''
time_name = coverage.temporal_parameter_name
pc = coverage.get_parameter_context(time_name)
units = pc.uom
if 'iso' in units:
return None # Not sure how to implement this.... How do you compare iso strings effectively?
values = coverage.get_parameter_values(time_name)
return cls.find_nearest(values,time)
@classmethod
def ts_to_units(cls,units, val):
'''
Converts a unix timestamp into various formats
Example:
ts = time.time()
CoverageCraft.ts_to_units('days since 2000-01-01', ts)
'''
if 'iso' in units:
return time.strftime('%Y-%d-%mT%H:%M:%S', time.gmtime(val))
elif 'since' in units:
t = netCDF4.netcdftime.utime(units)
return t.date2num(datetime.datetime.utcfromtimestamp(val))
else:
return val
@classmethod
def units_to_ts(cls, units, val):
'''
Converts known time formats into a unix timestamp
Example:
ts = CoverageCraft.units_to_ts('days since 2000-01-01', 1200)
'''
if 'since' in units:
t = netCDF4.netcdftime.utime(units)
dtg = t.num2date(val)
return time.mktime(dtg.timetuple())
elif 'iso' in units:
t = dateutil.parser.parse(val)
return time.mktime(t.timetuple())
else:
return val
@classmethod
def find_nearest(cls, arr, val):
'''
The sexiest algorithm for finding the best matching value for a numpy array
'''
idx = np.abs(arr-val).argmin()
return idx
| null |
ion/processes/data/replay/replay_process.py
|
replay_process.py
|
py
| 11,838 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "interface.services.dm.ireplay_process.BaseReplayProcess",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pyon.core.object.IonObjectDeserializer",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pyon.core.bootstrap.get_obj_registry",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "gevent.event.Event",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "gevent.event.Event",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "gevent.event.Event",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log.info",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "interface.services.dm.idataset_management_service.DatasetManagementServiceProcessClient",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "interface.services.dm.ipubsub_management_service.PubsubManagementServiceProcessClient",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pyon.core.exception.BadRequest",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "interface.services.dm.ipubsub_management_service.PubsubManagementServiceProcessClient",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pyon.util.arg_check.validate_is_instance",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numbers.Number",
"line_number": 107,
"usage_type": "argument"
},
{
"api_name": "pyon.util.arg_check.validate_is_instance",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numbers.Number",
"line_number": 108,
"usage_type": "argument"
},
{
"api_name": "pyon.util.arg_check.validate_is_instance",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numbers.Number",
"line_number": 109,
"usage_type": "argument"
},
{
"api_name": "numpy.arange",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log.info",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "pyon.util.log.log.info",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "pyon.util.log.log.info",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "pyon.util.log.log.info",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "pyon.util.log.log.info",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "ion.services.dm.utility.granule.RecordDictionaryTool",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "ion.services.dm.utility.granule.RecordDictionaryTool",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log.info",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "pyon.util.log.log",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "ion.services.dm.inventory.dataset_management_service.DatasetManagementService._get_coverage",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "ion.services.dm.inventory.dataset_management_service.DatasetManagementService",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "traceback.print_exc",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pyon.core.exception.BadRequest",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "gevent.spawn",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "interface.services.dm.idataset_management_service.DatasetManagementServiceClient",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pyon.datastore.datastore.DataStore.DS_PROFILE",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "pyon.datastore.datastore.DataStore",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "pyon.core.exception.NotFound",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "pyon.core.exception.NotFound",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "ion.services.dm.inventory.dataset_management_service.DatasetManagementService._get_coverage",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "ion.services.dm.inventory.dataset_management_service.DatasetManagementService",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "ion.services.dm.inventory.dataset_management_service.DatasetManagementService._get_coverage",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "ion.services.dm.inventory.dataset_management_service.DatasetManagementService",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "ion.services.dm.utility.granule.RecordDictionaryTool",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "ion.services.dm.inventory.dataset_management_service.DatasetManagementService._get_coverage",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "ion.services.dm.inventory.dataset_management_service.DatasetManagementService",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "ion.services.dm.utility.granule.RecordDictionaryTool",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "netCDF4.netcdftime.utime",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "netCDF4.netcdftime",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "netCDF4.netcdftime.utime",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "netCDF4.netcdftime",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "time.mktime",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "dateutil.parser.parser.parse",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "dateutil.parser.parser",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "dateutil.parser",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "time.mktime",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 311,
"usage_type": "call"
}
] |
173229494
|
# -*- coding: utf-8 -*-
import os
import json
import chardet
import urllib2
import common_operations as co
DEFAULT_HOSTS_FN = u"DEFAULT.hosts"
class Hosts(object):
CONFIG_FLAG = "#@SwitchHost!"
def __init__(self, index=0, path=None, icon_idx=0):
self.index = index
self.path = path
self.dc_path = co.decode(path)
self.folder, self.fn = os.path.split(path)
# self.fn = co.decode(self.fn)
# if os.name == "nt":
# self.fn = self.fn.decode("GB18030")
self.title = None
self.icon_idx = icon_idx
self.content = ""
self.is_selected = False
self.url = None # 如果是在线hosts方案,则此项不为空
self.last_fetch_dt = None # 如果是在线hosts方案,则此项为最后更新时间
self.read()
@property
def is_read_only(self):
return self.url is not None
def read(self):
if not self.url:
c = open(self.path, "rb").read().strip() if os.path.isfile(self.path) else ""
else:
c = urllib2.urlopen(self.url).read().strip() if co.httpExists(self.url) else ""
# c = co.decode(c)
self.setContent(c, save=False)
def getConfig(self, ln):
u"""从一行内容中取得配置信息"""
cfg = None
v = ln.partition(self.CONFIG_FLAG)[2].strip()
if v:
try:
cfg = json.loads(v)
except Exception:
pass
if cfg:
self.title = cfg.get("title", self.title)
self.icon_idx = cfg.get("icon_idx", self.icon_idx)
def save(self):
if not self.path:
return
cfg = {
"title": self.title,
"icon_idx": self.icon_idx,
}
if self.url:
cfg.update({
"url": self.url,
})
cfg_ln = u"%s %s" % (self.CONFIG_FLAG, json.dumps(cfg).replace("\n", "").replace("\r", ""))
c = self.content
if not repr(c).startswith("u"):
c = c.decode("utf-8")
c = u"%s\n%s" % (cfg_ln, c)
open(self.path, "wb").write(c.encode("utf-8"))
def getTitle(self):
return self.title or self.fn if self.fn != DEFAULT_HOSTS_FN else self.fn
def setTitle(self, title):
self.title = title
self.save()
def setIcon(self, icon_idx):
self.icon_idx = icon_idx
self.save()
def setContent(self, c, save=True):
self.content = c #co.encode(c)
# 查看第一行是否为配置内容
# 第一行以 #SwitchHost 开头表示为配置信息
a = [i.strip() for i in c.split("\n")]
if a[0].startswith(self.CONFIG_FLAG):
self.getConfig(a[0])
self.content = "\n".join(a[1:])
if save:
self.save()
def getContent(self):
c = self.content
if not repr(c).startswith("u"):
try:
cd = chardet.detect(c)
c = c.decode(cd.get("encoding", "utf-8"))
except Exception:
c = c.decode("utf-8")
return c
| null |
legacy/v1/libs/cls_Hosts.py
|
cls_Hosts.py
|
py
| 3,162 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "common_operations.decode",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "common_operations.httpExists",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "chardet.detect",
"line_number": 132,
"usage_type": "call"
}
] |
24096253
|
import os
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import config
from utils import figure_utils
from utils.typical_pair_utils import get_joint_plot_x_y
fontsize = 6
mpl.rcParams['font.size'] = fontsize
mpl.rcParams['lines.linewidth'] = 1.0
mpl.rcParams['legend.frameon'] = False
mpl.rcParams['legend.fontsize'] = 'small'
fig = plt.figure(figsize=(6, 6.))
outer_grid = gridspec.GridSpec(3,2, width_ratios=[1,1],wspace=0.3,figure=fig)
plt.subplots_adjust(wspace=0.25, hspace=0.6)
ex1 = 'Prevotella_copri_61740' # lack of close pairs
ex2 = 'Lachnospiraceae_bacterium_51870' # lack of typical pairs
# ex2 = 'Roseburia_inulinivorans_61943' # lack of close pairs
ex3 = 'Barnesiella_intestinihominis_62208' # minimal spread in joint plot
ex4 = 'Bacteroides_caccae_53434' # uneven distribution along cf
ex5 = 'Bacteroides_vulgatus_57955' # population structure
ex6 = 'Eubacterium_rectale_56927' # population structure
species = [ex1, ex2, ex3, ex4, ex5, ex6]
for i in range(3):
for j in range(2):
inner_grid = gridspec.GridSpecFromSubplotSpec(1,2, width_ratios=[4,1],wspace=0.2,subplot_spec=outer_grid[i, j])
scatter_ax = fig.add_subplot(inner_grid[0])
marg_ax = fig.add_subplot(inner_grid[1], sharey=scatter_ax)
plt.setp(marg_ax.get_yticklabels(), visible=False)
plt.setp(marg_ax.get_yticklabels(minor=True), visible=False)
species_name = species[i*2+j]
x,y = get_joint_plot_x_y(species_name)
y = y * 100
xs = np.linspace(0.01, 1, 100)
ys = -np.log(xs) / config.first_pass_block_size * 100
scatter_ax.plot(xs, ys, '--r', zorder=1, label='random mutations')
scatter_ax.scatter(x, y, s=0.6, linewidth=0, zorder=2, rasterized=True)
marg_ax.hist(y, orientation='horizontal', bins=100, alpha=0.6)
marg_ax.set_xscale('log')
if ('copri' in species_name) or ('Roseburia' in species_name) or ('Lachnospiraceae' in species_name):
marg_ax.set_xticks([1, 10, 100])
else:
marg_ax.set_xticks([1, 100])
scatter_ax.set_xlabel('Fraction of identical blocks')
scatter_ax.set_ylabel('Pairwise syn divergence (%)')
scatter_ax.legend()
scatter_ax.set_title(figure_utils.get_pretty_species_name(species_name))
fig.savefig(os.path.join(config.figure_directory, 'supp', 'supp_joint_examples.pdf'), bbox_inches='tight', dpi=600)
| null |
plotting_for_publication/supp_plot_example_cf_pd_joint.py
|
supp_plot_example_cf_pd_joint.py
|
py
| 2,496 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.rcParams",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpecFromSubplotSpec",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.setp",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.setp",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "utils.typical_pair_utils.get_joint_plot_x_y",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "config.first_pass_block_size",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "utils.figure_utils.get_pretty_species_name",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "utils.figure_utils",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "config.figure_directory",
"line_number": 63,
"usage_type": "attribute"
}
] |
207704944
|
import os
import numpy as np
import sys
import urllib
import math
from pyspark import SparkConf
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
from pyspark import SparkContext, SparkConf
sc = SparkContext.getOrCreate()
import re
from collections import Counter
from string import punctuation
#To run this code type python3 naive_bayes.py <train_url_location> <test_url_location> <dataset_size> <1>
#The following code preprocess the text and lables set for the large/small/vsmall sets.
#It stores it under X_train_clean.txt, y_train_clean.txt, X_train_clean.txt and y_test_clean.txt (Optional)
#in the same folder where it is called. It will automatically collect the data and create these four files
########################################################################################################################
def fix_doc(x):
#basic preprocessing - removing the same stuff from project 0 but later we will add more
#example we can import punctuations from string and use it but later :>
#https://docs.python.org/2/library/re.html read more here
x=re.sub('\.|,|:|;|\'|\!|\?|"','',x)
#x=re.sub('!|"|#|$|%|&|\'|(|)|*|+|,|-|.|/|:|;|<|=|>|?|@|[|\|]|^|_|`|{|||}|~','',x)
#x=x.strip(".,:;'!?")
# next we will do maulik's stuff before making it lower
# uncomment to use also maybe verify my logic
#x=re.sub("[ ([A-Z][a-zA-Z]*\s*)+]","",x)
#next we change to lower case
#and remove quote and amp
x=x.lower()
x=re.sub('"|&','',x)
#last step remove everything which is not a letter and
x=re.sub('[^a-z]',' ',x)
x=re.sub("\s\s+|\+"," ", x)
a=""
x=x.split(" ")
for i in range(0,len(x)):
if(x[i] not in stopWords.value and (len(x[i])>2)):
a=a+x[i]+" "
a=a.strip()
return a
#######################################################################################################################
def getcats(x):
#This function scans the preprocessed files and returns data containing all the four classes or all the adta containing CAT
#which is the suffix of the four classes.
x=str(x, "utf-8")
x=x.split(",")
fitered_data=[]
for i in x:
if ("CAT" or "cat") in i:
fitered_data.append(i.lower())
return fitered_data
######################################################################################################################
def split_data(x):
#splits the key value pair into multiple rows if the value has multiple values eg,
#input [0,(first,second)] --> [[0,first],[0,second]]
#Some words in the text belong to two different classes.In such a case, the data is saved twice as belonging to a different
#in each case.
combined=[]
for j in range(0,len(x)):
key=x[j][0]
value=x[j][1]
for i in range(0,len(value)):
single_row=[]
single_row.append(key)
single_row.append(value[i])
combined.append(single_row)
return (combined)
######################################################################################################################
######################################################################################################################
#def create_array(x):
# ccat=
######################################################################################################################
def create_array1(x,a,b,c,d):
#calculates the log of the word frequencies
ccat=1
ecat=1
gcat=1
mcat=1
for k in range(0,len(x)):
if((x[k])=='c'):
ccat=ccat+(float)(x[k+1])
elif((x[k])=='e'):
ecat=ecat+(float)(x[k+1])
elif((x[k])=='m'):
mcat=mcat+(float)(x[k+1])
elif((x[k])=='g'):
gcat=gcat+(float)(x[k+1])
ccat=math.log(ccat/(a+vocab.value))
ecat=math.log(ecat/(c+vocab.value))
mcat=math.log(mcat/(b+vocab.value))
gcat=math.log(gcat/(d+vocab.value))
return [ccat,mcat,ecat,gcat]
def predict_class(x,a,b,c,d):
#returns a prediction for the X_test tokens by using naive-bayes model
class_values=group_by_class.value
product=[0,0,0,0]
for cc,v in x.items():
word=cc
ccat=0
ecat=0
gcat=0
mcat=0
for j in range(0,1):
if(word in class_values):
ccat=(float)(class_values[word][0])
mcat=(float)(class_values[word][1])
ecat=(float)(class_values[word][2])
gcat=(float)(class_values[word][3])
product[0]=product[0]+(ccat*((float)(v)))
product[1]=product[1]+(mcat*((float)(v)))
product[2]=product[2]+(ecat*((float)(v)))
product[3]=product[3]+(gcat*((float)(v)))
product[0]=product[0]+a
product[1]=product[1]+b
product[2]=product[2]+c
product[3]=product[3]+d
best=np.argmax(product)
if(best==0):
return "CCAT"
#return str(product[0])
elif(best==1):
return "MCAT"
#return str(product[1])
elif(best==2):
return "ECAT"
#return str(product[2])
else:
return "GCAT"
#return str(product[3])
#####################################################################################
#This function reads the Stop Words from the file containing stopwords. "stopWords.txt"
def read_stop_words():
with open("stopWordList.txt") as f:
readStopWords = [x.strip('\n').encode("utf-8") for x in f.readlines()]
for i in range(0,len(readStopWords)):
readStopWords[i]=str(readStopWords[i], "utf-8")
return readStopWords
if __name__ == "__main__":
if len(sys.argv) != 5:
print("Usage: project_1.py <url_train> <url_test> <vsmall/small/large> <1>", file=sys.stderr)
exit(-1)
spark = SparkSession\
.builder\
.appName("naive_bayes")\
.getOrCreate()
#create stopwords later
stopWords = sc.broadcast(read_stop_words())
#print(stopWords.value)
base_url_train=(sys.argv[1])
base_url_test=(sys.argv[2])
dataset_size=(sys.argv[3])
flag=((int)(sys.argv[4]))
#make local copy these are the raw files
x=(str("wget "+base_url_train+"X_train_"+dataset_size+" -O X_to_train.txt"))
os.system(x)
x=(str("wget "+base_url_train+"y_train_"+dataset_size+" -O y_to_train.txt"))
os.system(x)
x=(str("wget "+base_url_test+"X_test_"+dataset_size+" -O X_to_test.txt"))
os.system(x)
#we convert the text to utf-8 "the common unicode scheme" according to python docs
X_train=(sc.textFile("X_to_train.txt").map(lambda x:x.encode("utf-8")))
y_train=(sc.textFile("y_to_train.txt").map(lambda x:x.encode("utf-8")))
X_test=(sc.textFile("X_to_test.txt").map(lambda x:x.encode("utf-8")))
X_train=X_train.zipWithIndex().map(lambda x:(x[1],fix_doc(str(x[0], "utf-8"))))
X_test=X_test.zipWithIndex().map(lambda x:(x[1],fix_doc(str(x[0], "utf-8"))))
y_train=(y_train.map(getcats)\
.zipWithIndex().map(lambda x:(x[1],x[0]))).collect()
#splitting the X_train and y train's with multiple copies
y_train=sc.parallelize(split_data(y_train))
train_complete=X_train.join(y_train).map(lambda x:(x[1][0],x[1][1]))
#seperates the X_train and y_train
X_train=train_complete.map(lambda x:x[0])
y_train=train_complete.map(lambda x:x[1])
#calculates the total number of documents in different document class
temp=y_train.map(lambda x:(x,1)).reduceByKey(lambda a,b:a+b).collect()
print (temp)
tot_ccat=temp[0][1]
tot_mcat=temp[1][1]
tot_ecat=temp[2][1]
tot_gcat=temp[3][1]
#calculates the probability of a document belonging to a single class
prior_ccat=(math.log(((float)(tot_ccat))/((float)(tot_ccat+tot_gcat+tot_mcat+tot_ecat))))
prior_gcat=(math.log(((float)(tot_gcat))/((float)(tot_ccat+tot_gcat+tot_mcat+tot_ecat))))
prior_ecat=(math.log(((float)(tot_ecat))/((float)(tot_ccat+tot_gcat+tot_mcat+tot_ecat))))
prior_mcat=(math.log(((float)(tot_mcat))/((float)(tot_ccat+tot_gcat+tot_mcat+tot_ecat))))
#del(temp)
#generates the vocab by collecting all the unique words from all the documents of the training set
vocab=(X_train.flatMap(lambda x:x.split(" ")).map(lambda x:re.sub(",","", x)).distinct().map(lambda x:(1,1)).reduceByKey(lambda a,b:a+b).collect())[0][1]
#print(vocab)
vocab=sc.broadcast(vocab)
#splits the words from X_train dataset into tuples like (hello ccat,1) and (hello gcat,1) and then reduces and finds individual word count perclass
group_by_class=train_complete.map(lambda x:(x[1],x[0])).reduceByKey(lambda a,b:a+b).map(lambda x:([(x[0],z) for z in x[1].split(" ")]))\
.flatMap(lambda x:(x)).map(lambda x:(x,1)).reduceByKey(lambda a,b:a+b)
temp=group_by_class.map(lambda x:(x[0][0],1)).reduceByKey(lambda a,b:a+b).collect()
print (temp)
tot_ccat=temp[0][1]
tot_mcat=temp[1][1]
tot_ecat=temp[2][1]
tot_gcat=temp[3][1]
#converts the string doc matrix eg (hello, ccat 1 gcat 1) to hello,[1,1,0,0]
group_by_class=group_by_class.map(lambda x:(x[0][1],(str(x[0][0])+" "+str(x[1])+" "))).reduceByKey(lambda a,b:str(a)+str(b))\
.map(lambda x:(x[0],str(x[1]).replace("cat","").split(" "))).map(lambda x:(x[0],create_array1(x[1],tot_ccat,tot_mcat,tot_ecat,tot_gcat))).collect()
group_by_class=sc.broadcast(dict(group_by_class))
#predicts on the testing set
y_test=X_test.map(lambda x:Counter(x[1].split(" "))).map(lambda x:predict_class(x,prior_ccat,prior_mcat,prior_ecat,prior_gcat))
#saveing to file
submit=open('y_test.txt',"w")
y_test=y_test.collect()
counter=0
for i in y_test:
counter=counter+1
#print(counter)
submit.write("%s\n" % i)
submit.close()
| null |
gru/p1/naive_bayes.py
|
naive_bayes.py
|
py
| 10,266 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyspark.SparkContext.getOrCreate",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 299,
"usage_type": "call"
}
] |
531398356
|
"""
* ProjectBio v1.0
* Copyright 2014-2015 Web2all.
* Licensed under Share2Create
* Author: Senthilkumar M <allaboutsenthil.appspot.com>
*/
"""
import jinja2
import os
import webapp2
from datetime import datetime
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
jinja_environment = \
jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_DIR))
class BaseHandler(webapp2.RequestHandler):
@webapp2.cached_property
def jinja2(self):
return jinja2.get_jinja2(app=self.app)
def render_template(
self,
filename,
template_values,
**template_args
):
template = jinja_environment.get_template(filename)
self.response.out.write(template.render(template_values))
class MainPage(BaseHandler):
def get(self):
self.render_template('index.html', {})
app = webapp2.WSGIApplication([
('/', MainPage)
],
debug=True)
| null |
main.py
|
main.py
|
py
| 957 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "jinja2.get_jinja2",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "webapp2.cached_property",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "webapp2.WSGIApplication",
"line_number": 42,
"usage_type": "call"
}
] |
286486033
|
##################################################################
# Experiments for event-enhanced knowledge graph embeddings
#
# How to run:
#
# To train the embeddings for a given knowledge graph and event dataset
# python ekl_experiment.py --dir 'path/to/dir' --...
# Up to now there is no flag to switch to GPU support, but this should be
# easy to change when needed
#
# Requirements:
#
# - Python 2.7
# - Tensorflow 0.12.1
# - numpy 1.12
# - rdflib 4.1.2
# - pandas
import csv
import numpy as np
import tensorflow as tf
from rdflib import URIRef, RDFS
from models.RESCAL import RESCAL
from models.TEKE import TEKE
from models.TransE import TransE
from models.TransH import TransH
from models.model import ranking_error_triples, l2_similarity, bernoulli_probs
from models.pre_training import EmbeddingPreTrainer, TEKEPreparation
from event_models.LinearEventModel import Skipgram, ConcatenationFull, ConcatenationCause, Average
from event_models.Autoencoder import ConvolutionalAutoEncoder, LSTMAutoencoder
from prep.batch_generators import SkipgramBatchGenerator, TripleBatchGenerator, PredictiveEventBatchGenerator, FuturePredictiveBatchGenerator, AutoEncoderBatchGenerator
from prep.etl import prepare_sequences, message_index
from prep.preprocessing import PreProcessor
from experiments.experiment_helper import slice_ontology, get_kg_statistics, get_low_dim_embs, get_zero_shot_scenario, \
cross_parameter_eval, TranslationModels, embs_to_df, Parameters, evaluate_on_test
if __name__ == '__main__':
####### PATH PARAMETERS ########
base_path = "../traffic_data/"
path_to_store_model = base_path + "Embeddings/"
path_to_events = base_path + "Sequences/"
path_to_kg = base_path + "Ontology/traffic_individuals.xml"
path_to_store_sequences = base_path + "Sequences/"
path_to_store_embeddings = base_path + "Embeddings/"
traffic_data = True
path_to_sequence = base_path + 'Sequences/sequence.txt'
preprocessor = PreProcessor(path_to_kg)
tk = None
bern_probs = None
num_sequences = None
if traffic_data:
max_events = 4000
exclude_rels = [RDFS.comment]
preprocessor = PreProcessor(path_to_kg)
excluded_events = preprocessor.load_unique_msgs_from_txt(base_path + 'unique_msgs.txt', max_events=max_events)
excluded_events = [URIRef(('http://www.siemens.com/citypulse#' + e)) for e in excluded_events]
amberg_params = None
else:
exclude_rels = ['http://www.siemens.com/ontology/demonstrator#tagAlias']
max_events = None
max_seq = None
# sequence window size in minutes
window_size = 1.0
amberg_params = (path_to_events, max_events)
for event_layer in [ConcatenationCause]: #[ConcatenationFull, ConcatenationCause, ConvolutionalAutoEncoder]:
for shared in [True]:
# set fixed random seed
rnd = np.random.RandomState(43)
preprocessor.load_knowledge_graph(format='xml', exclude_rels=exclude_rels, amberg_params=amberg_params,
excluded_entities=excluded_events, clean_schema=False)
vocab_size = preprocessor.get_vocab_size()
unique_msgs = preprocessor.get_unique_msgs()
ent_dict = preprocessor.get_ent_dict()
rel_dict = preprocessor.get_rel_dict()
g = preprocessor.get_kg()
print("Read {0} number of triples".format(len(g)))
get_kg_statistics(g)
# zero_shot_prop = 0.25
# zero_shot_entity = URIRef('http://www.siemens.com/ontology/demonstrator#Event') #URIRef('http://purl.oclc.org/NET/ssnx/ssn#Device')
# zero_shot_relation = URIRef(RDF.type) # URIRef('http://www.loa-cnr.it/ontologies/DUL.owl#follows') # URIRef('http://www.siemens.com/ontology/demonstrator#involvedEquipment') URIRef('http://www.loa-cnr.it/ontologies/DUL.owl#hasPart')
# zero_shot_triples, kg_prop = get_zero_shot_scenario(rnd, g, zero_shot_entity, zero_shot_relation, zero_shot_prop)
zero_shot_triples = []
######### Model selection ##########
model_type = TranslationModels.TEKE
# "Skipgram", "Concat", "RNN"
# event_layer = ConvolutionalAutoEncoder
store_embeddings = False
######### Hyper-Parameters #########
param_dict = {}
param_dict['embedding_size'] = [80]
param_dict['seq_data_size'] = [1.0]
param_dict['batch_size'] = [32] # [32, 64, 128]
param_dict['learning_rate'] = [0.01] # [0.5, 0.8, 1.0]
param_dict['lambd'] = [0.001] # regularizer (RESCAL)
param_dict['alpha'] = [1.0] # event embedding weighting
eval_step_size = 2000
num_epochs = 80
num_negative_triples = 2
test_proportion = 0.08
validation_proportion = 0.05
bernoulli = True
fnsim = l2_similarity
# Train dev test splitting
g_train, g_valid, g_test = slice_ontology(rnd, g, validation_proportion, test_proportion, zero_shot_triples)
train_size = len(g_train)
valid_size = len(g_valid)
test_size = len(g_test)
print("Train size: ", train_size)
print("Valid size: ", valid_size)
print("Test size: ", test_size)
# Event layer parameters
if event_layer is not None:
param_dict['num_skips'] = [4] # [2, 4]
param_dict['num_sampled'] = [10] # [5, 9]
# shared = True
# param_dict['batch_size_sg'] = [2] # [128, 512]
pre_train = False
# also used for TEKE
pre_train_steps = 30000
pre_train_embeddings = base_path + "Embeddings/supplied_embeddings"
if traffic_data:
sequences = preprocessor.prepare_sequences(path_to_sequence, use_dict=True)
else:
merged = preprocessor.get_merged()
sequences = prepare_sequences(merged, message_index,
unique_msgs, window_size, max_seq, g_train)
num_sequences = len(sequences)
num_entities = len(ent_dict)
num_relations = len(rel_dict)
print("Num entities:", num_entities)
print("Num relations:", num_relations)
print("Event entity percentage: {0} prct".format(100.0 * vocab_size / num_entities))
if bernoulli:
bern_probs = bernoulli_probs(g, rel_dict)
# free some memory
g = None
model_name = TranslationModels.get_model_name(event_layer, model_type)
overall_best_performance = np.inf
best_param_list = []
train_tg = TripleBatchGenerator(g_train, ent_dict, rel_dict, num_negative_triples, rnd, bern_probs=bern_probs)
valid_tg = TripleBatchGenerator(g_valid, ent_dict, rel_dict, 1, rnd, sample_negative=False)
test_tg = TripleBatchGenerator(g_test, ent_dict, rel_dict, 1, rnd, sample_negative=False)
print(test_tg.next(5))
# Loop trough all hyper-paramter combinations
param_combs = cross_parameter_eval(param_dict)
for comb_num, tmp_param_dict in enumerate(param_combs):
params = Parameters(**tmp_param_dict)
num_steps = (train_size / params.batch_size) * num_epochs
print("Progress: {0} prct".format(int((100.0 * comb_num) / len(param_combs))))
print("Embedding size: ", params.embedding_size)
print("Batch size: ", params.batch_size)
filter_triples = valid_tg.all_triples
if event_layer is not None:
if traffic_data:
batch_size_sg = 32 # (len(sequences[0]) * num_epochs) / num_steps
else:
batch_size_sg = 11 # (sum([len(sequences[i]) for i in range(num_sequences)]) * num_epochs) / num_steps
print("Batch size sg:", batch_size_sg)
num_skips = params.num_skips
num_sampled = params.num_sampled
if event_layer == Skipgram:
sg = SkipgramBatchGenerator(sequences, num_skips, rnd)
elif event_layer == ConvolutionalAutoEncoder:
sg = AutoEncoderBatchGenerator(sequences, num_skips, rnd)
elif event_layer == ConcatenationFull:
sg = FuturePredictiveBatchGenerator(sequences, num_skips, rnd)
num_skips = 2 * num_skips
elif event_layer == ConcatenationCause:
sg = PredictiveEventBatchGenerator(sequences, num_skips, rnd)
event_model = event_layer(num_entities, vocab_size, params.embedding_size, num_skips, shared=shared,
alpha=params.alpha)
else:
batch_size_sg = 0
num_sampled = 0
event_model = None
pre_train = False
# Model Selection
if model_type == TranslationModels.Trans_E:
param_list = [num_entities, num_relations, params.embedding_size, params.batch_size,
batch_size_sg, num_sampled, vocab_size, fnsim, params.learning_rate,
event_model]
model = TransE(*param_list)
elif model_type == TranslationModels.Trans_H:
param_list = [num_entities, num_relations, params.embedding_size, params.batch_size,
batch_size_sg, num_sampled, vocab_size, params.learning_rate, event_model,
params.lambd]
model = TransH(*param_list)
elif model_type == TranslationModels.RESCAL:
param_list = [num_entities, num_relations, params.embedding_size, params.batch_size,
batch_size_sg, num_sampled, vocab_size, params.learning_rate, event_model,
params.lambd]
model = RESCAL(*param_list)
elif model_type == TranslationModels.TEKE:
pre_trainer = EmbeddingPreTrainer(unique_msgs, SkipgramBatchGenerator(sequences, num_skips, rnd),
pre_train_embeddings)
initE = pre_trainer.get(pre_train_steps, params.embedding_size, batch_size_sg, num_sampled, vocab_size,
num_entities)
tk = TEKEPreparation(sequences, initE, num_entities)
param_list = [num_entities, num_relations, params.embedding_size, params.batch_size, fnsim, tk]
model = TEKE(*param_list)
# Build tensorflow computation graph
tf.reset_default_graph()
# tf.set_random_seed(23)
with tf.Session() as session:
model.create_graph()
saver = tf.train.Saver(model.variables())
tf.global_variables_initializer().run()
if event_model is not None and not event_model.shared and model_type != TranslationModels.TEKE:
# set the non-event entities in the non-shared event embeddings to zero
session.run([event_model.update])
print('Initialized graph')
average_loss = 0
mean_rank_list = []
hits_10_list = []
loss_list = []
# Initialize some / event entities with supplied embeddings
if pre_train and model_type != TranslationModels.TEKE:
# TODO: adapt to selected event_model for pre-training
pre_trainer = EmbeddingPreTrainer(unique_msgs, SkipgramBatchGenerator(sequences, num_skips, rnd),
pre_train_embeddings)
initE = pre_trainer.get(pre_train_steps, params.embedding_size, batch_size_sg, num_sampled, vocab_size,
num_entities)
session.run(model.assign_initial(initE))
if store_embeddings:
entity_embs = []
relation_embs = []
# Steps loop
for b in range(1, num_steps + 1):
# triple batches
batch_pos, batch_neg = train_tg.next(params.batch_size)
valid_batch_pos, _ = valid_tg.next(valid_size)
feed_dict = {
model.inpl: batch_pos[1, :], model.inpr: batch_pos[0, :], model.inpo: batch_pos[2, :],
model.inpln: batch_neg[1, :], model.inprn: batch_neg[0, :], model.inpon: batch_neg[2, :],
model.global_step: b
}
if event_model is not None and not model_type == TranslationModels.TEKE:
# Event batches
batch_x, batch_y = sg.next(batch_size_sg)
batch_y = np.array(batch_y).reshape((batch_size_sg, 1))
feed_dict[model.train_inputs] = batch_x
feed_dict[model.train_labels] = batch_y
# One train step in mini-batch
_, l = session.run(model.train(), feed_dict=feed_dict)
average_loss += l
# Run post-ops: regularization etc.
session.run(model.post_ops())
# Evaluate on validation set
if b % eval_step_size == 0:
# get valid batches for scoring
valid_inpl = valid_batch_pos[1, :]
valid_inpr = valid_batch_pos[0, :]
valid_inpo = valid_batch_pos[2, :]
scores_l, scores_r = model.scores(session, valid_inpl, valid_inpr, valid_inpo)
errl, errr = ranking_error_triples(filter_triples, scores_l, scores_r, valid_inpl,
valid_inpo, valid_inpr)
hits_10 = np.mean(np.asarray(errl + errr) <= 10) * 100
mean_rank = np.mean(np.asarray(errl + errr))
mean_rank_list.append(mean_rank)
hits_10_list.append(hits_10)
if b > 0:
average_loss = average_loss / eval_step_size
loss_list.append(average_loss)
if store_embeddings:
entity_embs.append(session.run(model.E))
relation_embs.append(session.run(model.R))
# The average loss is an estimate of the loss over the last eval_step_size batches.
print('Average loss at step {0}: {1}'.format(b, average_loss))
print('\t Validation Hits10: ', hits_10)
print('\t Validation MeanRank: ', mean_rank)
average_loss = 0
if overall_best_performance > mean_rank:
overall_best_performance = mean_rank
print("Saving overall best model with MeanRank: {0} and hits {1}".format(mean_rank, hits_10))
save_path_global = saver.save(session, path_to_store_model + 'tf_model')
best_param_list = param_list
reverse_entity_dictionary = dict(zip(ent_dict.values(), ent_dict.keys()))
reverse_relation_dictionary = dict(zip(rel_dict.values(), rel_dict.keys()))
# save embeddings to disk
if store_embeddings:
for i in range(len(entity_embs)):
if i % 50 == 0:
df_embs = get_low_dim_embs(entity_embs[i], reverse_entity_dictionary)
df_embs.to_csv(path_to_store_embeddings + "entity_embeddings_low" + str(i) + ".csv", sep=',',
encoding='utf-8')
df_r_embs = get_low_dim_embs(relation_embs[i], reverse_relation_dictionary)
df_r_embs.to_csv(path_to_store_embeddings + "relation_embeddings" + str(i) + ".csv", sep=',',
encoding='utf-8')
# TODO: only of best model (not last)
df_embs = embs_to_df(entity_embs[len(entity_embs)-1], reverse_entity_dictionary)
df_embs.to_csv(path_to_store_embeddings + "entity_embeddings" + '_last_cleaned' + ".csv", sep=',',
encoding='utf-8')
# Reset graph, load best model and apply to test data set
with open(base_path + 'evaluation_parameters_' + model_name + str(shared) +
'_best.csv', "wb") as eval_file:
writer = csv.writer(eval_file)
results, relation_results = evaluate_on_test(model_type, best_param_list, test_tg, save_path_global, test_size,
reverse_relation_dictionary)
writer.writerow (
["relation", "embedding_size", "batch_size", "learning_rate", "num_skips", "num_sampled",
"batch_size_sg", "mean_rank", "mrr", "hits_top_10", "hits_top_3", "hits_top_1"]
)
writer.writerow(
['all', params.embedding_size, params.batch_size, params.learning_rate, num_skips, num_sampled,
batch_size_sg, results[0], results[1], results[2], results[3], results[4]]
)
for rel in relation_results:
writer.writerow (
[rel, params.embedding_size, params.batch_size, params.learning_rate, num_skips, num_sampled,
batch_size_sg, relation_results[rel]['MeanRank'], relation_results[rel]['MRR'],
relation_results[rel]['Hits@10'], relation_results[rel]['Hits@3'], relation_results[rel]['Hits@1']]
)
| null |
experiments/experiment_loop.py
|
experiment_loop.py
|
py
| 19,069 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "prep.preprocessing.PreProcessor",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "rdflib.RDFS.comment",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "rdflib.RDFS",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "prep.preprocessing.PreProcessor",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "rdflib.URIRef",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "event_models.LinearEventModel.ConcatenationCause",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "experiments.experiment_helper.get_kg_statistics",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.TranslationModels.TEKE",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "experiments.experiment_helper.TranslationModels",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "models.model.l2_similarity",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "experiments.experiment_helper.slice_ontology",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "prep.etl.prepare_sequences",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "prep.etl.message_index",
"line_number": 141,
"usage_type": "argument"
},
{
"api_name": "models.model.bernoulli_probs",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.TranslationModels.get_model_name",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.TranslationModels",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "prep.batch_generators.TripleBatchGenerator",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "prep.batch_generators.TripleBatchGenerator",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "prep.batch_generators.TripleBatchGenerator",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.cross_parameter_eval",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.Parameters",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "event_models.LinearEventModel.Skipgram",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "prep.batch_generators.SkipgramBatchGenerator",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "event_models.Autoencoder.ConvolutionalAutoEncoder",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "prep.batch_generators.AutoEncoderBatchGenerator",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "event_models.LinearEventModel.ConcatenationFull",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "prep.batch_generators.FuturePredictiveBatchGenerator",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "event_models.LinearEventModel.ConcatenationCause",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "prep.batch_generators.PredictiveEventBatchGenerator",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.TranslationModels.Trans_E",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "experiments.experiment_helper.TranslationModels",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "models.TransE.TransE",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.TranslationModels.Trans_H",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "experiments.experiment_helper.TranslationModels",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "models.TransH.TransH",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.TranslationModels.RESCAL",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "experiments.experiment_helper.TranslationModels",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "models.RESCAL.RESCAL",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.TranslationModels.TEKE",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "experiments.experiment_helper.TranslationModels",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "models.pre_training.EmbeddingPreTrainer",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "prep.batch_generators.SkipgramBatchGenerator",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "models.pre_training.TEKEPreparation",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "models.TEKE.TEKE",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "tensorflow.reset_default_graph",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.TranslationModels.TEKE",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "experiments.experiment_helper.TranslationModels",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "experiments.experiment_helper.TranslationModels.TEKE",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "experiments.experiment_helper.TranslationModels",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "models.pre_training.EmbeddingPreTrainer",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "prep.batch_generators.SkipgramBatchGenerator",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.TranslationModels.TEKE",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "experiments.experiment_helper.TranslationModels",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "models.model.ranking_error_triples",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.get_low_dim_embs",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.get_low_dim_embs",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.embs_to_df",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "experiments.experiment_helper.evaluate_on_test",
"line_number": 342,
"usage_type": "call"
}
] |
554186201
|
import pymongo
from pymongo import MongoClient
import datetime
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from string import Template
MY_ADDRESS = "[email protected]"
## Current template email could remove username or replace it with identikey
emailText = "Hey $name!! Come back and finish your quiz on $subject. Enter your email: $email into the box on the Day 2 return page to resume your quiz!!! quiz-me.co/#/returnPage"
emailText2 = "Hey $name!! Come back and finish the final section of the tutorial. Enter your email: $email into the box on the day 3 return page to resume your quiz!!! quiz-me.co/#/day3"
## Connect to the email server
server=smtplib.SMTP()
server.connect('localhost')
## function to send email
def sendEmail(fromaddr, toaddr, body):
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Come Back to QuizMe!!!"
msg.attach(MIMEText(body, 'plain'))
server.send_message(msg)
del msg
## Get the time renage of when users should be sent a email. Currently an hour but prod will be 24 hours
lastDay = datetime.datetime.utcnow() - datetime.timedelta(hours = 23)
lastWeek = datetime.datetime.utcnow() - datetime.timedelta(days = 8)
client = MongoClient()
db = client.quizme
## Old code being kept here for reference currently will delete before prod
##for dic in db.test.find({ 'date' : {"$lte" : lastHour }}):
## print(dic['user'])
## print("Fin")
## db.test.findAndModify({query: {user: "testUser2"}, update: {$set : {emailSent : 1}}})
## Find all users who took a quiz over the alloted time ago and send those users an email if they haven't been sent one yet. Might want to check if the user has already returned
for user in db.tutorial.find({ "$and" : [ {'date' : {"$lte" : lastDay }}, {'emailSent' : 0}] }):
print(user['name'])
email = Template(emailText)
sendEmail(MY_ADDRESS, user['identikey'], email.substitute(name =user['name'], subject=user['subject'], email=user['identikey']))
db.tutorial.update({"_id" : user['_id']},{"$set" : {'emailSent' : 1}})
for user in db.tutorial.find({ "$and" : [ {'date' : {"$lte" : lastWeek }}, {'oneWeekEmail' : 0}] }):
print(user['name'])
email2 = Template(emailText2)
sendEmail(MY_ADDRESS, user['identikey'], email2.substitute(name =user['name'], email=user['identikey']))
db.tutorial.update({"_id" : user['_id']},{"$set" : {'oneWeekEmail' : 1}})
print("Fin")
server.quit()
| null |
Email.py
|
Email.py
|
py
| 2,493 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "smtplib.SMTP",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "email.mime.text.MIMEText",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "email.mime.multipart",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "string.Template",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "email.mime.multipart.substitute",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "email.mime.multipart",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "string.Template",
"line_number": 52,
"usage_type": "call"
}
] |
342626607
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
"""
Entrypoint utility for launching distributed torch jobs.
NOTE: All command line arguments are forwarded to the child script,
including those relevant to only this script. If the dependent script
uses argparse, then you can parse command line arguments using
`args = parser.parse_known_args()[0]` instead of `args = parser.parse_args()`.
Usage: dist_run --nproc_per_node=<num allocated GPUs> <path to script> <script args>...
"""
import argparse
import importlib
import logging
import logging.config
import os
import psutil
import signal
import sys
import torch
import torch.multiprocessing as mp
from utils import configure_logging
class SignalHandler:
def __init__(self, child_procs):
self.child_procs = child_procs
def __call__(self, incoming_signal, frame):
print("Signal %d detected in process %d " % ( incoming_signal, os.getpid() ))
print("Forwarding to children " )
for child in self.child_procs:
print("Will now pass the signal %d to child process %d" % ( incoming_signal, child.pid ) )
os.kill( child.pid, incoming_signal)
if incoming_signal in [ signal.SIGUSR1,signal.SIGUSR2 ]:
# This is the most important part - we return silently and will be allowed to keep running.
return
else:
sys.exit(1)
def _set_signal_handlers(child_procs):
signal_handler = SignalHandler(child_procs)
print("Setting signal handlers in process %d" % os.getpid())
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGUSR1, signal_handler)
signal.signal(signal.SIGUSR2, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def _run_function(rank, world_size, script_path):
"""
Entrypoint for the GPU worker process.
Args:
rank (int): The rank of the current worker process.
world_size (int): The total number of worker processes.
script_path (str): Path to the python script to execute.
"""
os.environ['RANK'] = str(rank)
os.environ['LOCAL_RANK'] = str(rank)
configure_logging(rank)
logger = logging.getLogger(__name__)
# Get the name of the script without the extension
script_name = os.path.splitext(os.path.basename(script_path))[0]
script_dir = os.path.dirname(script_path).replace('/', '.')
logger.info('Loading script "{}" as module "{}" and package "{}".'
.format(script_path, script_name, script_dir))
module = importlib.import_module('{}'.format(script_name),
package=script_dir)
module.main()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Distributed launcher for pytorch.")
parser.add_argument('-n', '--nproc_per_node', type=int, default=-1,
help="The number of processes to launch.")
# positional
parser.add_argument("launch_script", type=str,
help="The full path to the single GPU module to run.")
# rest of the arguments for the dest script
#parser.add_argument('training_script_args', nargs=argparse.REMAINDER)
args, rest = parser.parse_known_args()
world_size = args.nproc_per_node
if world_size == -1:
world_size = torch.cuda.device_count()
os.environ['WORLD_SIZE'] = str(world_size)
# Low level parallel constructs actually hurt the performance fairly significantly
# because we already employ a high level of parallelism at higher API layers. So,
# this disables most forms of bad parallelism. FPS improvement for default experiments
# is somewhere around 100-300 fps just with this.
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
script_path = args.launch_script
sys.argv = [script_path] + rest
spawn_context = mp.spawn(_run_function, nprocs=world_size, args=(world_size, script_path), join=False)
_set_signal_handlers(spawn_context.processes)
# Wait for the child processes to exit
while not spawn_context.join():
pass
sys.exit(0)
| null |
dist_run.py
|
dist_run.py
|
py
| 4,163 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.getpid",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.kill",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "signal.SIGUSR1",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "signal.SIGUSR2",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "signal.SIGINT",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "signal.signal",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "signal.SIGUSR1",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "signal.signal",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "signal.SIGUSR2",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "signal.signal",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "signal.SIGTERM",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "utils.configure_logging",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "importlib.import_module",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.cuda.device_count",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "torch.multiprocessing.spawn",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.multiprocessing",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 123,
"usage_type": "call"
}
] |
299064716
|
import nibabel as nib
import glob
import pandas as pd
import numpy as np
from nilearn import datasets as ds
from nilearn.image import resample_img
from nilearn.input_data import NiftiLabelsMasker
import psutil
from matplotlib import pylab as plt
import time
from nilearn.signal import clean
from nilearn import plotting, image
from sklearn.covariance import LedoitWolf
import seaborn as sns
from statsmodels.formula.api import ols
from sklearn.preprocessing import StandardScaler
import community
import networkx as nx
np.random.seed(0)
def dataframize_nii_paths(files):
''' Sort and store nifti paths of gm, wm, and lcr into a dataframe
Parameters
----------
files : list of path to nifti images
'''
# Sort cortex, white matter and LCR file
files_per_sub = {}
for file in files:
split_name = file.split('_')
sub_nb = split_name[3] + '_' + split_name[4] + '_' + split_name[5]
assert '_S_' in sub_nb, "Problem with splitting path"
for file in files:
if sub_nb in file:
if 'smwc1' in file:
smwc1 = file
elif 'smwc2' in file:
smwc2 = file
elif 'smwc3' in file:
smwc3 = file
else:
print('pb with ', file)
Stop
files_per_sub[sub_nb] = [smwc1, smwc2, smwc3]
# store it in a dataframe
df = pd.DataFrame.from_dict(files_per_sub, orient='index')
df.rename(columns={
0: 'greyMatter_path',
1: 'whiteMatter_path',
2: 'LCR_path'}, inplace=True)
# delete duplicates
nb_duplicates = (df.index.duplicated() == True).sum()
df = df[df.index.duplicated() == False]
print('{} duplicates were removed. Df shape = {}'.format(nb_duplicates, df.shape))
return df
def merge_df_by_index(df, df_2, method='outer'):
''' merge 2 dataframe together on index, while making sure that there are no duplicates.
Parameters
----------
df : pandas dataframe
df_2 : pandas dataframe
'''
df_merged = df.join(df_2, how=method)
# delete nan if exists
len1 = df_merged.__len__()
df_merged = df_merged.dropna(axis=0, how='any')
len2 = df_merged.__len__()
print('{} nan were removed'.format(len1 - len2))
# delete duplicates
nb_duplicates = (df_merged.index.duplicated() == True).sum()
df_merged = df_merged[df_merged.index.duplicated() == False]
print('{} duplicates were removed. Df shape = {}'.format(nb_duplicates, df_merged.shape))
return df_merged
def compute_store_tiv(df):
''' compute TIV and store it in the df
Parameters
----------
df : dataframe including the path of gm, wm and lcr per subject
'''
# create new columns to store the tiv
df['TIV'] = '*'
df['TIV_gm'] = '*'
df['TIV_wm'] = '*'
df['TIV_lcr'] = '*'
# store the voxel size for future checking
dim_expected = nib.load(df['greyMatter_path'].iloc[0]).header.values()[15]
# iterate across subject to extract, compute and store the TIV
for ind, row in enumerate(df.index):
# load nifti images
smwc1 = nib.load(df['greyMatter_path'].loc[row])
smwc2 = nib.load(df['whiteMatter_path'].loc[row])
smwc3 = nib.load(df['LCR_path'].loc[row])
# check voxel dimension
assert smwc1.header.values()[15].sum() == dim_expected.sum()
assert smwc2.header.values()[15].sum() == dim_expected.sum()
assert smwc3.header.values()[15].sum() == dim_expected.sum()
# compute TIV
tiv1 = smwc1.get_data().sum()/1000 # grey matter
tiv2 = smwc2.get_data().sum()/1000 # white matter
tiv3 = smwc3.get_data().sum()/1000 # LCR
TIV = tiv1 + tiv2 + tiv3 # total
assert TIV != 0, "Problem with participant {}".format(row)
assert tiv1 != 0, "Problem with participant {}".format(row)
assert tiv2 != 0, "Problem with participant {}".format(row)
assert tiv3 != 0, "Problem with participant {}".format(row)
# online checking
print(ind, ' / ', len(df.index))
print("Sub ", row, " TIV = ", TIV)
# store TIV in df
df['TIV_gm'].loc[row] = tiv1
df['TIV_wm'].loc[row] = tiv2
df['TIV_lcr'].loc[row] = tiv3
df['TIV'].loc[row] = TIV
assert '*' not in df.values, "A tiv value seems to be missing"
return df
def list_outliers(df, percentile=5):
''' list participants with the lowest and highest tiv total, tiv gm, tiv wm and tiv lcr for
manual checking of the volumes.
Parameters
----------
df : dataframe including the TIV + the TIV of gm, wm and lcr per subject.
percentile : int, optional
percentile to which extract the most extreme TIV values
Default=5.
'''
outliers = {}
limit_above = 100 - percentile
limit_below = percentile
outliers['above_TIV'] = df['TIV'][df['TIV'] < np.percentile(df['TIV'], limit_below, interpolation = 'midpoint')]
outliers['below_TIV'] = df['TIV'][df['TIV'] > np.percentile(df['TIV'], limit_above, interpolation = 'midpoint')]
outliers['above_TIV_lcr'] = df['TIV_lcr'][df['TIV_lcr'] < np.percentile(df['TIV_lcr'], limit_below, interpolation = 'midpoint')]
outliers['below_TIV_lcr'] = df['TIV_lcr'][df['TIV_lcr'] > np.percentile(df['TIV_lcr'], limit_above, interpolation = 'midpoint')]
outliers['above_TIV_gm'] = df['TIV_gm'][df['TIV_gm'] < np.percentile(df['TIV_gm'], limit_below, interpolation = 'midpoint')]
outliers['below_TIV_gm'] = df['TIV_gm'][df['TIV_gm'] > np.percentile(df['TIV_gm'], limit_above, interpolation = 'midpoint')]
outliers['above_TIV_wm'] = df['TIV_wm'][df['TIV_wm'] < np.percentile(df['TIV_wm'], limit_below, interpolation = 'midpoint')]
outliers['below_TIV_wm'] = df['TIV_wm'][df['TIV_wm'] > np.percentile(df['TIV_wm'], limit_above, interpolation = 'midpoint')]
print(outliers)
return outliers
################################ CLASSICAL STATISTICS TIV AGE AND SEX #########################
def plot_tif_info(df):
''' Check and plot age/sex/tiv relationships.
Parameters
----------
df : dataframe including TIV, TIV_gm, TIV_wm, TIV_lcr, Sex, and Age.
'''
age = df['Age']
sex = df['Sex']
sex[sex == 'M'] = 1
sex[sex == 'F'] = 0
tiv = df['TIV']
tiv_gm = df['TIV_gm']
tiv_wm = df['TIV_wm']
tiv_lcr = df['TIV_lcr']
# check age // tiv
X = age.values.astype('float64')
Y = tiv.values.astype('float64')
# Create a data frame containing all the relevant variables
data = pd.DataFrame({'x': X, 'y': Y})
model = ols("y ~ x", data).fit()
# print(model.summary())
pvalAgeTIV = model.f_pvalue
# check age // tiv_gm
X = age.values.astype('float64')
Y = tiv_gm.values.astype('float64')
# Create a data frame containing all the relevant variables
data = pd.DataFrame({'x': X, 'y': Y})
model = ols("y ~ x", data).fit()
# print(model.summary())
pvalAgeTIVgm = model.f_pvalue
# check age // tiv_wm
X = age.values.astype('float64')
Y = tiv_wm.values.astype('float64')
# Create a data frame containing all the relevant variables
data = pd.DataFrame({'x': X, 'y': Y})
model = ols("y ~ x", data).fit()
# print(model.summary())
pvalAgeTIVwm = model.f_pvalue
# check age // tiv_lcr
X = age.values.astype('float64')
Y = tiv_lcr.values.astype('float64')
# Create a data frame containing all the relevant variables
data = pd.DataFrame({'x': X, 'y': Y})
model = ols("y ~ x", data).fit()
# print(model.summary())
pvalAgeTIVlcr = model.f_pvalue
# check sex // tiv
X = sex.values.astype(str)
Y = tiv.values.astype('float64')
# Create a data frame containing all the relevant variables
data = pd.DataFrame({'x': X, 'y': Y})
model = ols("y ~ C(x)", data).fit()
# print(model.summary())
pvalSexTIV = model.f_pvalue
# check sex // tiv_gm
X = sex.values.astype(str)
Y = tiv_gm.values.astype('float64')
# Create a data frame containing all the relevant variables
data = pd.DataFrame({'x': X, 'y': Y})
model = ols("y ~ C(x)", data).fit()
# print(model.summary())
pvalSexTIVgm = model.f_pvalue
# check sex // tiv_wm
X = sex.values.astype(str)
Y = tiv_wm.values.astype('float64')
# Create a data frame containing all the relevant variables
data = pd.DataFrame({'x': X, 'y': Y})
model = ols("y ~ C(x)", data).fit()
# print(model.summary())
pvalSexTIVwm = model.f_pvalue
# check sex // tiv_lcr
X = sex.values.astype(str)
Y = tiv_lcr.values.astype('float64')
# Create a data frame containing all the relevant variables
data = pd.DataFrame({'x': X, 'y': Y})
model = ols("y ~ C(x)", data).fit()
# print(model.summary())
pvalSexTIVlcr = model.f_pvalue
# only age and TIV
df['Sex'][df['Sex'] == 1] = 'M'
df['Sex'][df['Sex'] == 0] = 'F'
data = pd.DataFrame({'sex': df['Sex'].values.astype(str),
'Age':df['Age'].values.astype('float64'),
'TIV': df['TIV'].values.astype('float64')})
fig, ax = plt.subplots()
ax = sns.pairplot(data, hue='sex', kind='reg')
ax.fig.text(0.65, 0.85,"Age-TIV p={}".format(pvalAgeTIV), fontsize=9)
ax.fig.text(0.65, 0.77,"Sex-TIV p={}".format(pvalSexTIV), fontsize=9)
plt.savefig("lMCI/lMCI_agesextiv.png")
plt.show()
ax = sns.violinplot(x="sex", y="TIV",
data=data, palette="Set2", split=True,
scale="count", inner="stick", scale_hue=False)
plt.savefig("lMCI/lMCI_sextiv.png")
plt.show()
# all in
df['Sex'][df['Sex'] == 1] = 'M'
df['Sex'][df['Sex'] == 0] = 'F'
data = pd.DataFrame({'sex': df['Sex'].values.astype(str),
'Age':df['Age'].values.astype('float64'),
'TIV_gm': df['TIV_gm'].values.astype('float64'),
'TIV_wm': df['TIV_wm'].values.astype('float64'),
'TIV_lcr': df['TIV_lcr'].values.astype('float64')})
fig, ax = plt.subplots()
ax = sns.pairplot(data, hue='sex', kind='reg')
ax.fig.text(0.65, 0.83,"Age-TIV_gm p={}".format(pvalAgeTIVgm), fontsize=9)
ax.fig.text(0.65, 0.81,"Age-TIV_wm p={}".format(pvalAgeTIVwm), fontsize=9)
ax.fig.text(0.65, 0.79,"Age-TIV_lcr p={}".format(pvalAgeTIVlcr), fontsize=9)
ax.fig.text(0.65, 0.75,"Sex-TIV_gm p={}".format(pvalSexTIVgm), fontsize=9)
ax.fig.text(0.65, 0.73,"Sex-TIV_wm p={}".format(pvalSexTIVwm), fontsize=9)
ax.fig.text(0.65, 0.71,"Sex-TIV_lcr p={}".format(pvalSexTIVlcr), fontsize=9)
plt.savefig("lMCI/lMCI_agesextiv_details.png")
plt.show()
def fit_atlas(atlas, df, saving_path=None, strategy='sum', show=0, labels=None):
''' masking of the nifti of the participant cortex to extract ROI from atlas.
Parameters
----------
atlas : mask used to extract ROIs
must be file from nilearn.datasets.fetch_atlas_[...].
df : dataframe including a 'greyMatter_path' column per subject (path to nifti cortex file).
saving_path : str, optional
if not None, save the dataframe with grey matter quantity per ROI per subject.
strategy : define how the quantity of grey matter per voxel is summarized for each ROI
Must be one of: sum, mean, median, minimum, maximum, variance, standard_deviation
Default='sum'.
show : 0 or 1
plot the atlas onto the grey matter nifti file of the first subject to check the fit
Default=0
'''
# extract grey matter volume for atlas ROI
atlas_filename = atlas.maps
# first subject nifti load to get affine and check mask fit
tmp_nii = nib.load(df["greyMatter_path"].values[0])
# show the mask fit onto a participant brain
if show == 1:
plotting.plot_img(tmp_nii).add_overlay(atlas_filename, cmap=plotting.cm.black_blue)
plt.show()
# shared affine for mask and cortex scan
ratlas_nii = resample_img(
atlas.maps, target_affine=tmp_nii.affine, interpolation='nearest')
# the grey matter volume will be stored in the FS list
FS = []
# iterate through each subject
for i_nii, nii_path in enumerate(df["greyMatter_path"].values):
# check memory
print('The CPU usage is: ', psutil.cpu_percent(4))
print(i_nii, ' / ', df["greyMatter_path"].__len__())
# summarize grey matter quantity into each ROI
nii = nib.load(nii_path)
masker = NiftiLabelsMasker(labels_img=ratlas_nii, standardize=False, strategy=strategy)
# extract each roi grey matter quantity as list (of length the number of ROIs)
cur_FS = masker.fit_transform(nii)
FS.append(cur_FS)
print(cur_FS)
FS = np.array(FS).squeeze()
df_FS = pd.DataFrame(index=df.index, columns=labels, data=FS)
# check quality of dataframization
assert df_FS.iloc[-1].values.sum() == FS[-1].sum()
# remove duplicates
assert (df_FS.index.duplicated() == True).sum() == 0
# save if asked
if saving_path != None:
df_FS.to_excel(saving_path)
FS = None # empty memory
return df_FS
def clean_signal(df, nb_rois, labels):
''' clean signal using nilearn.signal.clean function.
Regressed out variance that could be explained by the factors “age” and "TIV".
Parameters
----------
df : pandas dataframe including the signal with ROIs numbered from 0 to len(nb_rois)
plus a column for 'TIV' and for 'Age'.
It should also include a column for 'TIV_gm', 'TIV_wm', 'TIV_lcr', 'Group', 'Sex'.
nb_rois : the number of ROIs from the used atlas.
'''
# extract signal
FS = df[labels].values
# extract confound
confounds = df[['TIV', 'Age']].values
# clean signal from confound explained variance
FS_cleaned = clean(FS, confounds=confounds, detrend=False)
# restore into a dataframe
df_cleaned = pd.DataFrame(columns=labels, index=df.index, data=FS_cleaned)
info = df[['TIV', 'TIV_gm', 'TIV_wm', 'TIV_lcr', 'Group', 'Sex', 'Age', 'ADNI_MEM', 'ADNI_EF']]
df_cleaned = merge_df_by_index(info, df_cleaned)
# check quality of dataframization
assert FS_cleaned[0][10] == df_cleaned[labels[10]].iloc[0]
return df_cleaned
def plot_matrice(df, labels, title, saving_path=None, show=1):
''' Plot the matrices
Parameters
----------
df : pandas dataframe including the signal with ROIs numbered from 0 to len(nb_rois)
labels : lst
atlas ROIs name
title : str
Title for the plot
saving_path : str
Path to save the covariance matrix plot
show : 0 or 1
plot the matrices
Default=1
'''
sns.set_theme(style="white")
mask = np.triu(np.ones_like(df, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(df, mask=mask, cmap=cmap, center=0,
square=True, linewidths=.5, xticklabels=True, yticklabels=True, cbar_kws={"shrink": .5})
plt.title(title)
plt.tight_layout()
if saving_path != None:
plt.savefig(saving_path)
if show == 1:
plt.show()
def permutation_testing(df1, df2, model, nb_rois, prec=False, n_perm = 1000):
''' Permutation testing to check for significance between two covariation matrices
Parameters
----------
df1 : pandas dataframe including the signal with ROIs numbered from 0 to len(nb_rois)
df2 : pandas dataframe including the signal with ROIs numbered from 0 to len(nb_rois)
model: function to apply to get the covariance, must be a .fit() method
nb_rois : the number of ROIs from the used atlas.
labels : lst
atlas ROIs name
prec : Boolean
Compute the significance test for the precision matrix
if set to True, output contains 2 matrices, the cov and the prec significance matrix
Default=True
n_perm : int
Permutation iteration number
Default=1000
'''
np.random.seed(0)
h_precs, h_covs = [], []
FS_1 = df1[range(nb_rois+1)].values # h0 distribution
FS_2 = df2[range(nb_rois+1)].values
n_low_samples = len(FS_1)
for p in range(n_perm):
print('Bootstrapping iteration: {}/{}'.format(p + 1, n_perm))
new_inds = np.random.randint(0, n_low_samples, n_low_samples)
bs_sample = FS_1[new_inds]
gsc_1 = model.fit(bs_sample)
h_covs.append(gsc_1.covariance_)
if prec==True:
h_precs.append(gsc_1.precision_)
tril_inds = np.tril_indices_from(h_covs[0])
margin = (1. / n_perm) * 100 / 2
covs_ravel = np.array([cov[tril_inds] for cov in h_covs])
cov_test_high = cov_high[tril_inds]
cov_sign1 = cov_test_high < np.percentile(covs_ravel, margin, axis=0)
cov_sign2 = cov_test_high > np.percentile(covs_ravel, 100 - margin, axis=0)
cov_sign = np.zeros_like(cov_high)
cov_sign[tril_inds] = np.logical_or(cov_sign1, cov_sign2)
if prec==True:
precs_ravel = np.array([prec[tril_inds] for prec in h_precs])
prec_test_high = prec_high[tril_inds]
prec_sign1 = prec_test_high < np.percentile(precs_ravel, margin, axis=0)
prec_sign2 = prec_test_high > np.percentile(precs_ravel, 100 - margin, axis=0)
prec_sign = np.zeros_like(prec_high)
prec_sign[tril_inds] = np.logical_or(prec_sign1, prec_sign2)
return cov_sign, prec_sign
return cov_sign
def louvainize(df_cov, title,saving_path=None):
''' Compute network graph, then louvain community
and reorganized it into a matrix, and plot it
Parameters
----------
df_cov : pandas dataframe of the covariance matrix (n_roi*n_roi)
saving_path : str
if not None, saved at the given path
title : str
title for thz final matrix
'''
# compute the best partition
G = nx.from_numpy_matrix(df_cov.values)
nx.draw(G, with_labels=True)
partition = community.best_partition(G, random_state=0)
louvain = np.zeros(df_cov.values.shape).astype(df_cov.values.dtype)
labels = df_cov.columns
labels_new_order = []
i = 0
# iterate through all created community
for values in np.unique(list(partition.values())):
# iterate through each ROI
for key in partition:
if partition[key] == values:
louvain[i] = df_cov.values[key]
labels_new_order.append(labels[key])
i += 1
# check positionning from original matrix to louvain matri
# get index of first roi linked to community 0
index_roi_com0_louvain = list(partition.values()).index(0)
# get nb of roi in community 0
nb_com0 = np.unique(list(partition.values()), return_counts=True)[1][0]
# # get index of first roi linked to community 1
index_roi_com1_louvain = list(partition.values()).index(1)
assert louvain[0].sum() == df_cov.values[index_roi_com0_louvain].sum()
assert louvain[nb_com0].sum() == df_cov.values[index_roi_com1_louvain].sum()
df_louvain = pd.DataFrame(index=labels_new_order, columns=labels_new_order, data=louvain)
df_louvain.to_excel("lMCI/df_{}.xlsx".format(title))
plot_matrice(df_louvain, labels_new_order, title, saving_path=saving_path, show=0)
def return_all_plot(df_FS):
''' Compute covariance matrices and plot them
Parameters
----------
df_FS : dataframe of the time series per ROI per subject.
'''
# # clean for age and tiv
# df_FS_cleaned = clean_signal(df_FS, nb_rois, labels)
# ledoiwolf covariance
labels = df_FS.columns
matrix = LedoitWolf().fit(df_FS)
cov = matrix.covariance_
df_ledoit_cov = pd.DataFrame(index=labels, columns=labels, data=cov)
df_ledoit_cov.to_excel("lMCI/ledoiwolf_cov.xlsx")
plot_matrice(df_ledoit_cov, labels, "ledoiwolf_cov", saving_path="lMCI/ledoiwolf_cov.png", show=0)
louvainize(df_ledoit_cov, "Louvain_LedoitWolf", "lMCI/Louvain_LedoitWolf.png")
prec = matrix.precision_
df_prec = pd.DataFrame(index=labels, columns=labels, data=prec)
df_prec.to_excel("lMCI/ledoiwolf_prec.xlsx")
plot_matrice(df_prec, labels, "ledoiwolf_prec", saving_path="lMCI/ledoiwolf_prec.png", show=0)
louvainize(df_prec, "Louvain_LedoitWolf_prec", "lMCI/Louvain_LedoitWolf_prec.png")
# pearson
pearson = np.corrcoef(df_FS.values.T)
df_pearson = pd.DataFrame(index=labels, columns=labels, data=pearson)
df_pearson.to_excel("lMCI/pearson.xlsx")
plot_matrice(df_pearson, labels, "pearson", saving_path="lMCI/pearson.png", show=0)
louvainize(df_pearson, "Louvain_Pearson", "lMCI/Louvain_Pearson.png")
# covariance
cov = np.cov(df_FS.values.T)
df_cov = pd.DataFrame(index=labels, columns=labels, data=cov)
df_cov.to_excel("lMCI/cov.xlsx")
plot_matrice(df_cov, labels, "cov", saving_path="lMCI/cov.png", show=0)
louvainize(df_cov, "Louvain_cov", "lMCI/Louvain_cov.png")
# extract all file from folder
files = glob.glob('C:\\Users\\lefortb211\\Downloads\\ADNI_lMCI_smwc\\*')
# Sort and store nifti paths of gm, wm, and lcr into a dataframe
df = dataframize_nii_paths(files)
# compute TIV and store it in the df
df = compute_store_tiv(df)
# list most extrame values for TIV, tiv gm, tiv wm, tiv lcr
outliers = list_outliers(df)
# add age group and sex information
df_demog = pd.read_csv("lMCI/lMCI_3T_6_23_2021.csv")
df_demog = df_demog[['Subject', 'Group', 'Sex', 'Age']] # subject, group, sex, age
df_demog = df_demog.set_index('Subject')
df = merge_df_by_index(df, df_demog)
# add executive scores (file -containing each diag info- in CN folder to avoid duplicates)
df_executive = pd.read_excel("CN/score_ex.xlsx")
df_executive = df_executive.set_index('PTID')
df = merge_df_by_index(df, df_executive)
df_high = df[df['ADNI_EF'] >= df['ADNI_EF'].median()]
df_low = df[df['ADNI_EF'] < df['ADNI_EF'].median()]
dic['lMCI'] = [df_high['ADNI_EF'].values, df_low['ADNI_EF'].values]
# plot relationships between tiv age and sex
plot_tif_info(df)
atlas = ds.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm', symmetric_split=True)
labels = atlas.labels[1:]
nb_rois = len(labels)
df_FS = fit_atlas(atlas, df, saving_path='lMCI/df_FS.xlsx', labels=labels)
# standardize values
df_FS_ss = pd.DataFrame(columns=df_FS.columns, index=df_FS.index, data=StandardScaler().fit_transform(df_FS.values))
info = df[['TIV', 'TIV_gm', 'TIV_wm', 'TIV_lcr', 'Group', 'Sex', 'Age', 'ADNI_MEM', 'ADNI_EF']]
df = merge_df_by_index(info, df_FS_ss)
return_all_plot(df_FS_ss)
plt.close('all')
###############################
### CHECKING LOUVAIN COMMUs ###
###############################
def niftiise_louvain_community(df_cov, saving_path=None):
''' Compute network graph, then louvain community
and save as nifti image
Parameters
----------
df_cov : pandas dataframe of the covariance matrix (n_roi*n_roi)
saving_path : str
if not None, saved at the given path as .nii
'''
# reload atlas
atlas = ds.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm', symmetric_split=True)
labels = atlas.labels[1:]
# compute the best partition
G = nx.from_numpy_matrix(df_cov.values)
nx.draw(G, with_labels=True)
partition = community.best_partition(G, random_state=0)
atlas_nii = atlas.maps
voxelData = atlas_nii.get_data()
for ind, label in enumerate(labels):
voxelData[voxelData == ind + 1] = partition[ind] + 100 # +100 to prevent from mix between roi value and partition number
nb_partition = np.unique(list(partition.values())).__len__()
for nb in range(nb_partition):
voxelData[voxelData == 100 + nb] = nb + 1 # get back to partition number from 1 to nb of partition
# save a nifiti image
partition_nifti = nib.Nifti1Image(
voxelData, affine=atlas_nii.affine)
partition_nifti.to_filename(saving_path) # transform as nii and save
print("nb of partition = ", nb_partition)
print("unique partition in final nifti file = ", np.unique(voxelData))
df_ledoiwolf_cov = pd.read_excel("lMCI/ledoiwolf_cov.xlsx")
df_ledoiwolf_cov = df_ledoiwolf_cov.set_index('Unnamed: 0')
niftiise_louvain_community(df_ledoiwolf_cov, saving_path="lMCI/ledoiwolf_cov_partition_nifti.nii")
df_ledoiwolf_prec = pd.read_excel("lMCI/ledoiwolf_prec.xlsx")
df_ledoiwolf_prec = df_ledoiwolf_prec.set_index('Unnamed: 0')
niftiise_louvain_community(df_ledoiwolf_prec, saving_path="lMCI/ledoiwolf_prec_partition_nifti.nii")
df_pearson = pd.read_excel("lMCI/pearson.xlsx")
df_pearson = df_pearson.set_index('Unnamed: 0')
niftiise_louvain_community(df_pearson, saving_path="lMCI/pearson_partition_nifti.nii")
df_cov = pd.read_excel("lMCI/cov.xlsx")
df_cov = df_cov.set_index('Unnamed: 0')
niftiise_louvain_community(df_cov, saving_path="lMCI/cov_partition_nifti.nii")
| null |
v0.1/lMCI/ADNI_lMCI_matrix_refactorized.py
|
ADNI_lMCI_matrix_refactorized.py
|
py
| 23,183 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random.seed",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "nibabel.load",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "nibabel.load",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "nibabel.load",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "nibabel.load",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.subplots",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "seaborn.pairplot",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.savefig",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.show",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "seaborn.violinplot",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.savefig",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.show",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.subplots",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "seaborn.pairplot",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.savefig",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.show",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "nibabel.load",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "nilearn.plotting.plot_img",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "nilearn.plotting",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "nilearn.plotting.cm",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pylab.show",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "nilearn.image.resample_img",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "psutil.cpu_percent",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "nibabel.load",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "nilearn.input_data.NiftiLabelsMasker",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "nilearn.signal.clean",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "seaborn.set_theme",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.triu",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.subplots",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "seaborn.diverging_palette",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.title",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 418,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.tight_layout",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 419,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.savefig",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 421,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.show",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 458,
"usage_type": "attribute"
},
{
"api_name": "numpy.tril_indices_from",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "numpy.logical_or",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "numpy.logical_or",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "networkx.from_numpy_matrix",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "networkx.draw",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "community.best_partition",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "sklearn.covariance.LedoitWolf",
"line_number": 545,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 555,
"usage_type": "call"
},
{
"api_name": "numpy.corrcoef",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "numpy.cov",
"line_number": 568,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "nilearn.datasets.fetch_atlas_harvard_oxford",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "nilearn.datasets",
"line_number": 602,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 607,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 607,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.close",
"line_number": 611,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 611,
"usage_type": "name"
},
{
"api_name": "nilearn.datasets.fetch_atlas_harvard_oxford",
"line_number": 632,
"usage_type": "call"
},
{
"api_name": "nilearn.datasets",
"line_number": 632,
"usage_type": "name"
},
{
"api_name": "networkx.from_numpy_matrix",
"line_number": 635,
"usage_type": "call"
},
{
"api_name": "networkx.draw",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "community.best_partition",
"line_number": 637,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 642,
"usage_type": "call"
},
{
"api_name": "nibabel.Nifti1Image",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 651,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 653,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 657,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 661,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 665,
"usage_type": "call"
}
] |
419445144
|
from typing import List, Union, Tuple
import numpy as np
import random
from tqdm import tqdm
class Variable:
def __init__(self, value=None, grad=None):
self.value = value
self.grad = grad
class Polynomial:
def __init__(self, a: List = None):
self.coef = np.array(a)
def __call__(self, x: Union[float, int]) -> Variable:
x = np.array(x)
base = np.repeat(x, len(self.coef)).reshape(x.size, len(self.coef))
exponent = np.arange(0, len(self.coef))
# get the power from base and exponent, power=base^exponent
power = np.power(base, exponent)
# get the result of the polynomial
y = np.sum(np.multiply(self.coef, power), axis=1)
# get the coefficient of the differential polynomial
coef_diff = np.multiply(self.coef[1:], np.arange(1, len(self.coef)))
base_diff = np.repeat(x, len(coef_diff)).reshape(
x.size, len(coef_diff))
exponent_diff = np.arange(0, len(coef_diff))
# get power
power_diff = np.power(base_diff, exponent_diff)
# get the result of the polynomial
y_diff = np.sum(np.multiply(coef_diff, power_diff), axis=1)
return Variable(y, y_diff)
def regression_sgd(x, y, num_samples, num_iterations, batch_size, learning_rate) -> Tuple[np.ndarray, np.ndarray]:
# declare m and b
m = np.empty(num_iterations + 1, dtype=np.float64)
b = np.empty(num_iterations + 1, dtype=np.float64)
# init m and b
m[0] = random.random()
b[0] = random.random()
for iteration in tqdm(range(num_iterations)):
# randomly select batch_size data
batch_index = random.sample(range(0, num_samples), batch_size)
batch_x, batch_y = x[batch_index], y[batch_index]
# get gradient of m and b
m_gradient = 1/num_samples * \
np.sum(2*batch_x*(m[iteration]*batch_x+b[iteration]-batch_y))
b_gradient = 1/num_samples * \
np.sum(2*(m[iteration]*batch_x+b[iteration]-batch_y))
# update parameters by SGD
m[iteration+1] = m[iteration] - learning_rate * m_gradient
b[iteration+1] = b[iteration] - learning_rate * b_gradient
print(f"the final parameters: (m, b)=({m[-1]}, {b[-1]})")
return m, b
| null |
libs/functional.py
|
functional.py
|
py
| 2,287 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "random.random",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 48,
"usage_type": "attribute"
}
] |
168255906
|
from time import time, sleep
from enum import Enum
from sqlalchemy import update
from flask_sqlalchemy import SQLAlchemy
from celery import Celery
import matplotlib.pyplot as plt
import io
import http
import numpy as np
import redis
from helper import *
from forms import *
from models import *
# How to start workers
# celery -A app.celery worker --loglevel=info
import sys
# sys.path.append('/mnt/c/peter_abaqus/Summer-Research-Project/')
# from my_meep.main import wsl_main
# from my_meep.configs import config
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
class ReusableForm(Form):
name = TextField('Name:', validators=[validators.DataRequired()])
@app.route("/", methods=['GET', 'POST'])
def index():
form = ReusableForm(request.form)
print(form.errors)
if request.method == 'POST':
name = request.form['name']
print(name)
if form.validate():
# Save the comment here.
#load the model and create script
message = 'long_task peter'
flash(message)
else:
flash('Error: All the form fields are required. ')
return render_template('index.jinja', form=form)
@celery.task(bind=True)
def long_task(self, sections):
_config = create_configs(sections, config=config)
message = ''
print('hello')
# for i, total, res in wsl_main(web_config=_config):
# i = int(i+1)
# total = int(total)
# if not message:
# message = str(res)
# self.update_state(state='PROGRESS',
# meta={'current': i, 'total': total,
# 'status': message})
# total = np.random.randint(10, 50)
# for i in range(total):
# if not message or np.random.random() < 0.25:
# message = 'hi'
# self.update_state(state='PROGRESS',
# meta={'current': i, 'total': total,
# 'status': message})
# sleep(0.2)
return {'current': i, 'total': total, 'status': 'Task completed!',
'result': 42}
@app.route('/longtask', methods=['POST'])
def longtask():
sections = ['Visualization', 'General',
'Geometry', 'Simulation', 'Source']
task = long_task.apply_async(kwargs={'sections': sections})
return jsonify({}), 202, {'Location': url_for('taskstatus',
task_id=task.id)}
@app.route('/status/<task_id>')
def taskstatus(task_id):
task = long_task.AsyncResult(task_id)
if task.state == 'PENDING':
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending...'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
# something went wrong in the background job
response = {
'state': task.state,
'current': 1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return jsonify(response)
@app.route('/home')
def home():
return render_template('home.html')
def create_configs(sections, config):
for s in sections:
_input_data_list = input_data_list.query.filter_by(section=s).first()
_radio_check_data_list = radio_check_data_list.query.filter_by(
section=s).first()
sections_code_web = ['Visualization', 'General',
'Geometry', 'Simulation', 'Source']
sections_code_config = ['visualization',
'general', 'geo', 'sim', 'source']
index = sections_code_web.index(s)
s = sections_code_config[index]
for v in _input_data_list.values:
config.set(s, v.spec, ', '.join(
[str(v.start), str(v.stop), str(v.steps)]))
for v in _input_data_list.values1:
config.set(s, v.spec, str(v.val))
for v in _radio_check_data_list.radio:
config.set(s, v.spec, v.default)
for v in _radio_check_data_list.check:
config.set(s, v.spec, v.default)
return config
def process(section, _fields):
_input_data_list = input_data_list.query.filter_by(section=section).first()
_radio_check_data_list = radio_check_data_list.query.filter_by(
section=section).first()
if len(_input_data_list.values) == 0:
for key, val in _fields['input_range'].items():
_input_data_list.values.append(input_data(
spec=key, start=val[0], stop=val[1], steps=val[2]))
for key, val in _fields['input'].items():
_input_data_list.values1.append(input_data1(spec=key, val=val))
for key, val in _fields['radio'].items():
_radio_check_data_list.radio.append(
radio_data(spec=key, default=val[0]))
for key, val in _fields['check'].items():
_radio_check_data_list.check.append(
check_data(spec=key, default=val[0]))
form = input_form_list(request.form, obj=_input_data_list)
form.active = section
radio_list = []
if request.method != 'POST':
for i, iter in enumerate(_fields['radio'].items()):
key, val = iter
class f(Form):
name = key
setattr(f, key, fields.RadioField(
label=key, default=_radio_check_data_list.radio[i].default, choices=val[1], _name=key))
f_obj = f()
# f_obj.a.default = val[0]
# f_obj.__init__()
radio_list.append(f_obj)
elif form.validate():
for i, iter in enumerate(_fields['radio'].items()):
key, val = iter
class f(Form):
name = key
setattr(f, key, fields.RadioField(
label=key, default=request.form[key], choices=val[1], _name=key))
f_obj = f()
# f_obj.a.default = val[0]
# f_obj.__init__()
radio_list.append(f_obj)
if request.method == 'POST' and form.validate():
for i, d in enumerate(_radio_check_data_list.radio):
_radio_check_data_list.radio[i].default = request.form[_radio_check_data_list.radio[i].spec]
form.populate_obj(_input_data_list)
db.session.commit()
# print('form is: ')
# for key in form:
# print(key)
# print('Val form is:')
# for key in request.form:
# print(key, request.form[key])
# print(_input_data_list.values[0].start)
# flash("Saved Changes")
elif request.method == 'POST':
print('error is', form.errors)
# print('Val form is:')
# for key in request.form:
# print(key, request.form[key])
# for k in request.form['radio-0']:
# print('error', k)
if request.method == 'POST' and form.validate() and request.form['action'] == 'Simulate':
return ('', http.HTTPStatus.NO_CONTENT)
return render_template('features.html', title=section, form=form, radio_list=radio_list)
@app.route('/visualization', methods=['GET', 'POST'])
def visualization():
info = {
'check': {
},
'radio': {
'structure': ('False', ('True', 'False')),
'transiant': ('False', ('True', 'False')),
'rms': ('True', ('True', 'False')),
'view_only_particles': ('True', ('True', 'False')),
'log_res': ('False', ('True', 'False')),
},
'input_range': {
'viz_offset': [0, 0, 0]
},
'input': {
'3d_plotting_axis': 1,
'frame_speed': 200,
}
}
return process('Visualization', info)
@app.route('/geometry', methods=['GET', 'POST'])
def geometry():
info = {
'check': {
},
'radio': {
# 'shape_types_dummy': ('cube', ('sphere', 'triangle', 'hexagon', 'cube'))
},
'input_range': {
'particle_size': [0.05, 0.12, 1],
'x_loc': [0, -3.4, 1],
'distance': [1, 3, 1],
'fill_factor': [0.5, 0.7, 1],
'std': [0.1, 0.3, 1],
'solid_center': [-2, 0, 0],
'cell_size': [10, 10, 10],
'rotation': [0, 60, 1]
},
'input': {
'pml_thick': 0.5,
'num_particles': 2,
}
}
return process('Geometry', info)
@app.route('/simulation', methods=['GET', 'POST'])
def simulation():
info = {
'check': {
},
'radio': {
'sim': ('checker', ('checker', 'simple shape', 'voronoi'))
},
'input_range': {
},
'input': {
'dimension': 2,
'resolution': 60,
'change_res': 1,
'time': 1000,
'out_every': 0.2,
'save_every': 30
}
}
return process('Simulation', info)
@app.route('/general', methods=['GET', 'POST'])
def general():
info = {
'check': {
},
'radio': {
'verbals': ('True', ('True', 'False')),
'gen vor': ('False', ('True', 'False')),
'meep sim': ('True', ('True', 'False')),
'gen gmsh': ('False', ('True', 'False')),
'process inp': ('False', ('True', 'False')),
'clean array': ('False', ('True', 'False')),
'sim abq': ('True', ('True', 'False')),
},
'input_range': {
},
'input': {
}
}
return process('General', info)
@app.route('/source', methods=['GET', 'POST'])
def source():
info = {
'check': {
},
'radio': {
'mode': ('normal', ('normal', 'gaussian', 'far_field_transform', 'waveguide')),
},
"input_range": {
'size': [0, 10, 0],
'center': [4, 0, 0],
'near_flux_loc': [3.5, 0, 0],
'far_flux_loc': [-4.5, 0, 0],
'flux_size': [0, 9, 0],
},
'input': {
'fcen': 0.8167,
'tilt_angle': 0,
'sigma': 2,
'amp': 100,
'flux_nfreq': 100,
'fwidth': 2,
'flux_width': 0.8,
}
}
return process('Source', info)
@app.route('/pricing', methods=['GET', 'POST'])
def pricing():
return render_template('pricing.html')
@app.route('/sim_image_data', methods=['GET'])
def sim_image_data():
# a = [1,2,3]
# b = [4,5,6]
# plt.plot(a, b)
# bytes_image = io.BytesIO()
# plt.savefig(bytes_image, format='png')
# bytes_image.seek(0)
# return bytes_image
# bytes_obj = get_correlation_matrix_as_bytes()
r = redis.Redis(host = 'localhost', port = 6379, db=0)
bytes_obj = io.BytesIO(r.get('RMS image'))
bytes_obj.seek(0)
return send_file(bytes_obj,
attachment_filename='plot.png',
mimetype='image/png')
@app.route('/result', methods=['GET'])
def result():
r = redis.Redis(host = 'localhost', port = 6379, db=0)
bytes_obj = io.BytesIO(r.get('Current result'))
bytes_obj.seek(0)
return send_file(bytes_obj,
attachment_filename='result.xlsx', as_attachment=True)
# - - - Execute - - -
def prep_db():
db.drop_all()
db.create_all()
sections = ['Visualization', 'General', 'Geometry', 'Simulation', 'Source']
for s in sections:
_input_data_list = input_data_list(section=s)
_radio_check_data_list = radio_check_data_list(section=s)
db.session.add(_input_data_list)
db.session.add(_radio_check_data_list)
db.session.commit()
if __name__ == '__main__':
prep_db()
app.run(debug=True, host='0.0.0.0')
| null |
webapp/app.py
|
app.py
|
py
| 12,512 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "celery.Celery",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "celery.conf.update",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "celery.conf",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "celery.task",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "http.HTTPStatus",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "redis.Redis",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "redis.Redis",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 403,
"usage_type": "call"
}
] |
95173720
|
import requests
from PIL import Image
import numpy as np
ENDPOINT_URL = "http://0.0.0.0:80/infer"
def infer():
image = np.asarray(Image.open('resources/yorkshire_terrier.jpg')).astype(np.float32)
data ={'image': image.tolist()}
response = requests.post(ENDPOINT_URL, json = data)
response.raise_for_status()
print(response.json())
if __name__ =="__main__":
infer()
| null |
9. How to use uWSGI and Nginx to serve a Deep Learning model/app/client.py
|
client.py
|
py
| 393 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.asarray",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 10,
"usage_type": "call"
}
] |
457015893
|
#!/usr/bin/env python
# Copyright (c) 2014, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Brian Torres-Gil <[email protected]>
"""Panorama object
For functions specific to Panorama
"""
# import modules
import logging
# import other parts of this pandevice package
import pandevice.base
class Panorama(pandevice.base.PanDevice):
def __init__(self,
hostname,
api_username=None,
api_password=None,
api_key=None,
port=443,
serial=None,
classify_exceptions=False):
super(Panorama, self).__init__(hostname, api_username, api_password, api_key, classify_exceptions=classify_exceptions)
# create a class logger
self._logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
def commit_all(self, sync=False, sync_all=True, exception=False, cmd=None):
self._logger.debug("Commit-all initiated on device: %s" % (self.hostname,))
return self._commit(sync=sync,
sync_all=sync_all,
commit_all=True,
exception=exception,
cmd=cmd)
# XXX: I don't think this method is even needed
def create_device_group(self, devicegroup, devices=None):
""" Create a device-group and optionally add devices to it
:param devicegroup: String, The device-group name
:param devices: PanDevice or List of PanDevices to add to the device-group
:return: None
"""
self._logger.debug("Create device-group: %s" % (devicegroup,))
if devices is not None:
self.set_device_group(devicegroup, devices, exclusive=True)
else:
self.xapi.set(pandevice.XPATH_DEVICE_GROUPS + "/entry[@name='%s']" % (devicegroup,))
def set_device_group(self, devicegroup, devices, exclusive=False):
""" For Panorama, set the device group for a device
:param devicegroup: String, Device-group to set devices to
:param devices: PanDevice or List of PanDevices
:param exclusive: Device-group should contain ONLY these devices
:return: None
"""
# TODO: Implement 'exclusive'
self._logger.debug("Set device-group to '%s'" % (devicegroup))
if issubclass(devices.__class__, pandevice.base.PanDevice):
devices = [devices]
device_refresh_needed = False
for device in devices:
if device.serial is None or device.devicegroup is None:
device_refresh_needed = True
break
if device_refresh_needed:
self.refresh_devices_from_panorama(devices)
# All devices have serial numbers now, so start setting devicegroup
for device in devices:
# If the device was in a group, and that group changed, pull it out of the current group
if device.devicegroup != devicegroup and \
device.devicegroup is not None:
self._logger.debug("Moving device %s out of device-group %s" % (device.hostname, device.devicegroup))
self.set_config_changed()
self.xapi.delete(
pandevice.XPATH_DEVICE_GROUPS +
"/entry[@name='%s']/devices"
"/entry[@name='%s']"
% (device.devicegroup, device.serial)
)
device.devicegroup = None
# If assigning device to a new group
if devicegroup is not None:
self.set_config_changed()
self._logger.debug("Moving device %s into device-group %s" % (device.hostname, devicegroup))
self.xapi.set(
pandevice.XPATH_DEVICE_GROUPS +
"/entry[@name='%s']/devices" % (devicegroup,),
"<entry name='%s'/>" % (device.serial,)
)
device.devicegroup = devicegroup
| null |
bin/lib/pandevice/pandevice/panorama.py
|
panorama.py
|
py
| 4,741 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandevice.base.base",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pandevice.base",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pandevice.base.XPATH_DEVICE_GROUPS",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "pandevice.base",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "pandevice.base.base",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "pandevice.base",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "pandevice.base.XPATH_DEVICE_GROUPS",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "pandevice.base",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "pandevice.base.XPATH_DEVICE_GROUPS",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "pandevice.base",
"line_number": 107,
"usage_type": "name"
}
] |
21859955
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import postgres.fields
import decimal
import django.db.models.deletion
import django.core.serializers.json
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Crawl',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('notes', models.TextField(blank=True)),
],
options={
'verbose_name': 'Crawl',
'verbose_name_plural': 'Crawls',
},
),
migrations.CreateModel(
name='RelatedObject',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
'verbose_name': 'Related Object',
'verbose_name_plural': 'Related Objects',
},
),
migrations.CreateModel(
name='URLInspection',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('url', models.URLField(max_length=500)),
('response_meta', postgres.fields.JSONField(default={}, decode_kwargs={'parse_float': decimal.Decimal}, encode_kwargs={'cls': django.core.serializers.json.DjangoJSONEncoder})),
('exists', models.NullBooleanField(help_text='URL resource exists')),
('last_visited', models.DateTimeField(null=True, blank=True, help_text='Datetime when the URL was last visited')),
('crawl', models.ForeignKey(null=True, blank=True, on_delete=django.db.models.deletion.SET_NULL, to='crawler.Crawl')),
],
options={
'verbose_name': 'URL Inspection',
'verbose_name_plural': 'URL Inspections',
},
),
migrations.AddField(
model_name='relatedobject',
name='inspection',
field=models.OneToOneField(related_name='related', to='crawler.URLInspection'),
),
]
| null |
crawler/migrations/0001_initial.py
|
0001_initial.py
|
py
| 2,753 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.db.models.PositiveIntegerField",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.db.models.URLField",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "postgres.fields.fields.JSONField",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "postgres.fields.fields",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "postgres.fields",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "django.db.core",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.db.models.NullBooleanField",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.db.db",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 63,
"usage_type": "name"
}
] |
121250594
|
import logging
import re
from datetime import date
from marcottimls.etl.base import BaseCSV
from marcottimls.models import (Countries, Clubs, Competitions, DomesticCompetitions, InternationalCompetitions,
Seasons, CompetitionSeasons, Persons, Players, NameOrderType, PositionType,
ConfederationType)
logger = logging.getLogger(__name__)
class CountryIngest(BaseCSV):
BATCH_SIZE = 50
def parse_file(self, rows):
insertion_list = []
inserts = 0
logger.info("Ingesting Countries...")
for keys in rows:
country_name = "{Name}".format(**keys).strip().decode('utf-8')
confederation = "{Confederation}".format(**keys).strip()
if not self.record_exists(Countries, name=country_name):
country_dict = dict(name=country_name, confederation=ConfederationType.from_string(confederation))
insertion_list.append(Countries(**country_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, CountryIngest.BATCH_SIZE)
inserts += inserted
self.session.add_all(insertion_list)
inserts += len(insertion_list)
logger.info("Total of {0} Country records inserted and committed to database".format(inserts))
logger.info("Country Ingestion complete.")
class CompetitionIngest(BaseCSV):
BATCH_SIZE = 20
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Competitions...")
for keys in rows:
competition_name = self.column_unicode("Name", **keys)
level = self.column_int("Level", **keys)
country_name = self.column_unicode("Country", **keys)
confederation_name = self.column_unicode("Confederation", **keys)
if all(var is not None for var in [country_name, confederation_name]):
logger.error(u"Cannot insert Competition record for {}: "
u"Country and Confederation defined".format(competition_name))
continue
else:
comp_record = None
if country_name is not None:
country_id = self.get_id(Countries, name=country_name)
comp_dict = dict(name=competition_name, level=level, country_id=country_id)
if country_id is None:
logger.error(u"Cannot insert Domestic Competition record for {}: "
u"Country {} not found".format(competition_name, country_name))
continue
elif not self.record_exists(DomesticCompetitions, **comp_dict):
comp_record = DomesticCompetitions(**comp_dict)
elif confederation_name is not None:
try:
confederation = ConfederationType.from_string(confederation_name)
except ValueError:
logger.error(u"Cannot insert International Competition record for {}: "
u"Confederation {} not found".format(competition_name, confederation_name))
continue
comp_dict = dict(name=competition_name, level=level, confederation=confederation)
if not self.record_exists(InternationalCompetitions, **comp_dict):
comp_record = InternationalCompetitions(**comp_dict)
else:
logger.error(u"Cannot insert Competition record: Neither Country nor Confederation defined")
continue
if comp_record is not None:
insertion_list.append(comp_record)
logger.debug(u"Adding Competition record: {}".format(comp_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, CompetitionIngest.BATCH_SIZE)
inserts += inserted
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Competition records inserted and committed to database".format(inserts))
logger.info("Competition Ingestion complete.")
class CompetitionSeasonIngest(BaseCSV):
BATCH_SIZE = 10
def parse_file(self, rows):
inserts = 0
insertion_list = []
for keys in rows:
competition_name = self.column_unicode("Competition", **keys)
season_name = self.column("Season", **keys)
start_date_iso = self.column("Start", **keys)
end_date_iso = self.column("End", **keys)
matchdays = self.column_int("Matchdays", **keys)
start_date = date(*tuple(int(x) for x in start_date_iso.split('-'))) if start_date_iso else None
end_date = date(*tuple(int(x) for x in end_date_iso.split('-'))) if end_date_iso else None
competition_id = self.get_id(Competitions, name=competition_name)
if competition_id is None:
logger.error(u"Cannot insert Competition Season record: "
u"Competition {} not in database".format(competition_name))
continue
season_id = self.get_id(Seasons, name=season_name)
if season_id is None:
logger.error(u"Cannot insert Competition Season record: "
u"Season {} not in database".format(season_name))
continue
compseason_dict = dict(competition_id=competition_id, season_id=season_id, start_date=start_date,
end_date=end_date, matchdays=matchdays)
if not self.record_exists(CompetitionSeasons, **compseason_dict):
insertion_list.append(CompetitionSeasons(**compseason_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, CompetitionSeasonIngest.BATCH_SIZE)
inserts += inserted
if inserted and not inserts % CompetitionSeasonIngest.BATCH_SIZE:
logger.info("{} records inserted".format(inserts))
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Competition Season records inserted and committed to database".format(inserts))
logger.info("Competition Season Ingestion complete.")
class ClubIngest(BaseCSV):
BATCH_SIZE = 50
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Clubs...")
for keys in rows:
club_name = self.column_unicode("Name", **keys)
club_symbol = self.column("Symbol", **keys)
country_name = self.column_unicode("Country", **keys)
if country_name is None:
logger.error(u"Cannot insert Club record for {}: Country required".format(club_name))
continue
else:
country_id = self.get_id(Countries, name=country_name)
club_dict = dict(name=club_name, symbol=club_symbol, country_id=country_id)
if country_id is None:
logger.error(u"Cannot insert Club record {}: "
u"Country {} not in database".format(club_dict, country_name))
elif not self.record_exists(Clubs, **club_dict):
insertion_list.append(Clubs(**club_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, ClubIngest.BATCH_SIZE)
inserts += inserted
if inserted and not inserts % ClubIngest.BATCH_SIZE:
logger.info("{} records inserted".format(inserts))
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Club records inserted and committed to database".format(inserts))
logger.info("Club Ingestion complete.")
class PersonIngest(BaseCSV):
def parse_file(self, rows):
raise NotImplementedError
def get_person_data(self, **keys):
first_name = self.column_unicode("First Name", **keys)
known_first_name = self.column_unicode("Known First Name", **keys)
middle_name = self.column_unicode("Middle Name", **keys)
last_name = self.column_unicode("Last Name", **keys)
second_last_name = self.column_unicode("Second Last Name", **keys)
nickname = self.column_unicode("Nickname", **keys)
date_of_birth = self.column("Birthdate", **keys)
order = self.column("Name Order", **keys) or "Western"
name_order = NameOrderType.from_string(order)
birth_date = date(*tuple(int(x) for x in date_of_birth.split('-'))) if date_of_birth else None
person_dict = {field: value for (field, value) in zip(
['first_name', 'known_first_name', 'middle_name', 'last_name',
'second_last_name', 'nick_name', 'birth_date', 'order'],
[first_name, known_first_name, middle_name, last_name,
second_last_name, nickname, birth_date, name_order]) if value is not None}
return person_dict
class PlayerIngest(PersonIngest):
BATCH_SIZE = 200
def parse_file(self, rows):
inserts = 0
logger.info("Ingesting Players...")
for keys in rows:
person_dict = self.get_person_data(**keys)
position_chars = self.column("Position", **keys)
country_name = self.column_unicode("Country", **keys)
country_id = self.get_id(Countries, name=country_name)
if country_id is None:
logger.error(u"Cannot insert Player record {}: "
u"Country {} not in database".format(person_dict, country_name))
continue
position = [PositionType.unknown, None]
if position_chars is not None:
position_codes = re.split(r'[;,:\-/]', position_chars)
for i, code in enumerate(position_codes):
position[i] = PositionType.from_string(code)
person_dict = dict(country_id=country_id, **person_dict)
if not self.record_exists(Players, **person_dict):
if not self.record_exists(Persons, **person_dict):
player_record = Players(primary_position=position[0], secondary_position=position[1],
**person_dict)
else:
person_id = self.get_id(Persons, **person_dict)
player_record = Players(person_id=person_id, primary_position=position[0],
secondary_position=position[1])
self.session.add(player_record)
self.session.commit()
inserts += 1
if inserts % PlayerIngest.BATCH_SIZE == 0:
logger.info("{} records inserted".format(inserts))
logger.info("Total {} Player records inserted and committed to database".format(inserts))
logger.info("Player Ingestion complete.")
| null |
marcottimls/etl/overview.py
|
overview.py
|
py
| 11,271 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "marcottimls.etl.base.BaseCSV",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "marcottimls.models.Countries",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.ConfederationType.from_string",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "marcottimls.models.ConfederationType",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "marcottimls.models.Countries",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "marcottimls.etl.base.BaseCSV",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "marcottimls.models.Countries",
"line_number": 56,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.DomesticCompetitions",
"line_number": 62,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.DomesticCompetitions",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "marcottimls.models.ConfederationType.from_string",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "marcottimls.models.ConfederationType",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "marcottimls.models.InternationalCompetitions",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.InternationalCompetitions",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "marcottimls.etl.base.BaseCSV",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "marcottimls.models.Competitions",
"line_number": 106,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.Seasons",
"line_number": 111,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.CompetitionSeasons",
"line_number": 118,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.CompetitionSeasons",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "marcottimls.etl.base.BaseCSV",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "marcottimls.models.Countries",
"line_number": 148,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.Clubs",
"line_number": 153,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.Clubs",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "marcottimls.etl.base.BaseCSV",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "marcottimls.models.NameOrderType.from_string",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "marcottimls.models.NameOrderType",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "marcottimls.models.Countries",
"line_number": 204,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.PositionType.unknown",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "marcottimls.models.PositionType",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "re.split",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "marcottimls.models.PositionType.from_string",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "marcottimls.models.PositionType",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "marcottimls.models.Players",
"line_number": 217,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.Persons",
"line_number": 218,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.Players",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "marcottimls.models.Persons",
"line_number": 222,
"usage_type": "argument"
},
{
"api_name": "marcottimls.models.Players",
"line_number": 223,
"usage_type": "call"
}
] |
124396611
|
"""
:py:mod:`pymco.listeners`
-------------------------
stomp.py listeners for python-mcollective.
"""
import functools
import threading
import time
from stomp import listener
class CurrentHostPortListener(listener.ConnectionListener):
"""Listener tracking current host and port.
Some connectors, like ActiveMQ connector, may provide different user and
password for each host, so we need track the current host and port in order
to be able to get the right user and password when logging.
"""
def __init__(self, *args, **kwargs):
self.current_host = None
self.curent_port = None
def on_connecting(self, host_and_port):
"""Track current host and port.
:arg host_and_port: A two-tuple with host as first element and port
as the second.
"""
self.current_host, self.current_port = host_and_port
def get_host(self):
"""Return current host.
:return: current host.
"""
return self.current_host
def get_port(self):
"""Return current host.
:return: current port.
"""
return self.current_port
class ResponseListener(listener.ConnectionListener):
"""Listener that waits for a message response.
:arg config: :py:class:`pymco.config.Config` instance.
:arg count: number of expected messages.
:arg timeout: seconds we should wait for messages.
:arg condition: by default a :py:class:`threading.Condition` object
for synchronization purposes, but you can use any object
implementing the :py:meth:`wait` method and accepting a ``timeout``
argument.
"""
def __init__(self, config, count, timeout=30, condition=None):
self.config = config
self._security = None
self.timeout = timeout
if not condition:
condition = threading.Condition()
self.condition = condition
self.received = 0
self.responses = []
self.count = count
@property
def security(self):
"""Security provider property"""
if not self._security:
self._security = self.config.get_security()
return self._security
def on_message(self, headers, body):
"""Received messages hook.
:arg headers: message headers.
:arg body: message body.
"""
self.condition.acquire()
self.responses.append(self.security.decode(body))
self.received += 1
self.condition.notify()
self.condition.release()
def wait_on_message(self):
"""Wait until we get a message.
:return: ``self``.
"""
self.condition.acquire()
self._wait_loop(self.timeout)
self.condition.release()
self.received = 0
return self
def _wait_loop(self, timeout):
while self.received < self.count:
init_time = time.time()
self.condition.wait(timeout)
current_time = time.time()
timeout -= (current_time - init_time)
if timeout <= 0:
break
SingleResponseListener = functools.partial(ResponseListener, count=1)
| null |
pymco/listener.py
|
listener.py
|
py
| 3,180 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "stomp.listener.ConnectionListener",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "stomp.listener",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "stomp.listener.ConnectionListener",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "stomp.listener",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "threading.Condition",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 111,
"usage_type": "call"
}
] |
260602147
|
# -*- coding: utf-8 -*-
import dpkt,sys,json,time,socket
from collections import OrderedDict
# packets = {
# port_number: [
# { syn_flag_time: num, syn_flag_ack_time: num, delay: num },
# ...
# port_number: [ ... ],
# }
packets = OrderedDict()
MAIN_IP = "202.229.23.46"
before_time = 0
diff_time = 0
time_minute = 0
count = 1
pcr = dpkt.pcap.Reader(sys.stdin)
ip_src_list = []
ip_dst_list = []
for t, buf in pcr:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
tcp = ip.data
ip_src_addr = socket.inet_ntoa(ip.src)
ip_dst_addr = socket.inet_ntoa(ip.dst)
syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0
ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0
if not syn_flag:
continue
# デバッグ用
ip_src_list.append(ip_src_addr)
# IP分布が一番高いものをMAIN_IPにセット
if (ip_src_addr != MAIN_IP) and (ip_dst_addr != MAIN_IP) :
continue
port_number = tcp.dport if ack_flag else tcp.sport
if port_number not in packets:
packets[port_number] = []
contents = packets[port_number]
if not ack_flag: # SYN
contents.append({'syn_flag_time': t })
else: # SYN ACK
try:
content = contents[-1]
content['syn_flag_ack_time'] = t
content['ack_ip_src_addr'] = ip_src_addr
content['delay'] = content['syn_flag_ack_time'] - content['syn_flag_time']
except IndexError:
pass
if before_time:
try:
#print ("t: {}".format(t))
#print ("before_time: {}".format(before_time))
content['time_minute'] = time_minute
diff_time = t - before_time
#print ("diff_time: {}".format(diff_time))
if diff_time > 100:
time_minute += 2
except NameError:
pass
before_time = t
# IPアドレスの分布
# 一番多いIPアドレスをMAIN_IPに代入
"""
ip_and_counts = {}
for word in ip_src_list:
if ip_and_counts.has_key(word):
ip_and_counts[word] += 1
else:
ip_and_counts[word] = 1
for w, c in sorted(ip_and_counts.iteritems(), key=lambda x: x[1], reverse=True):
print ("{}: {}".format(w,c))
"""
print (json.dumps(packets, indent=4))
# Set all delays to the variable `all_delays`
all_time = []
ip_src_dict = OrderedDict()
for contents in packets.values():
for content in contents:
try:
if content['ack_ip_src_addr'] not in ip_src_dict:
ip_src_dict[content['ack_ip_src_addr']] = []
except:
pass
if 'delay' in content:
all_delay = ip_src_dict[content['ack_ip_src_addr']]
all_delay.append({'delay':content['delay']})
all_time = all_delay[-1]
all_time['access_time'] = content['time_minute']
#print json.dumps(ip_src_dict,indent=4)
| null |
calculate.py
|
calculate.py
|
py
| 2,984 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.OrderedDict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dpkt.pcap.Reader",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "dpkt.pcap",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "dpkt.ethernet.Ethernet",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "dpkt.ethernet",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "socket.inet_ntoa",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "socket.inet_ntoa",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "dpkt.tcp",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "dpkt.tcp",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 87,
"usage_type": "call"
}
] |
541020941
|
#!/usr/bin/env python3
# coding: utf8
import datetime
from collections import namedtuple
import cardiologs.common.model.wave as wave
import cardiologs.common.math.streamstatistics as cardiomath
def ms_to_secs(ms):
"""
Convert milliseconds to seconds
"""
return ms / 1000
def ms_to_days(ms):
"""
Convert milliseconds to days
"""
return ms_to_secs(ms) / (3600 * 24)
Max = namedtuple('Max', ['value', 'time'])
Min = namedtuple('Min', ['value', 'time'])
Summary = namedtuple('Summary', ['average', 'min', 'max'])
class Measurements(object):
"""
Aggregate various measures together
"""
_P = list()
_QRS = list()
_INV = list()
_T = list()
_heartbit_periods = list()
def __init__(self, timeoffset):
self._period = cardiomath.PeriodComputer()
self._average = cardiomath.MovingAverage()
self._max = cardiomath.MovingMax()
self._min = cardiomath.MovingMin()
self._timeoffset = timeoffset
self._time_last_sampling = timeoffset
self._time_max = None
self._time_min = None
def update(self, w):
if w.type == wave.WaveType.QRS:
last_sampling = datetime.timedelta(days=ms_to_days(w.timing.onset),
seconds=ms_to_secs(w.timing.onset),
milliseconds=w.timing.onset)
self._time_last_sampling = self._timeoffset + last_sampling
period = self._period(w.timing.onset)
if period != 0:
self._average(period)
if self._min(period):
sample_date = datetime.timedelta(days=ms_to_days(w.timing.onset),
seconds=ms_to_secs(w.timing.onset),
milliseconds=w.timing.onset)
self._time_min = self._timeoffset + sample_date
if self._max(period):
sample_date = datetime.timedelta(days=ms_to_days(w.timing.onset),
seconds=ms_to_secs(w.timing.onset),
milliseconds=w.timing.onset)
self._time_max = self._timeoffset + sample_date
self._heartbit_periods.append((period, w.timing.onset))
if w.type == wave.WaveType.P:
self._P.append(w)
elif w.type == wave.WaveType.QRS:
self._QRS.append(w)
elif w.type == wave.WaveType.INV:
self._INV.append(w)
elif w.type == wave.WaveType.T:
self._T.append(w)
@property
def summary(self):
return Summary(average=self._average.value,
min=Min(value=self._min.value, time=self._time_min),
max=Max(value=self._max.value, time=self._time_max))
@property
def P(self):
return self._P
@property
def QRS(self):
return self._QRS
@property
def INV(self):
return self._INV
@property
def T(self):
return self._T
@property
def time_last_sampling(self):
return self._time_last_sampling
@property
def heartbit_periods(self):
return self._heartbit_periods
| null |
cardiologs/measure.py
|
measure.py
|
py
| 3,327 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.namedtuple",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cardiologs.common.math.streamstatistics.PeriodComputer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cardiologs.common.math.streamstatistics",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "cardiologs.common.math.streamstatistics.MovingAverage",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cardiologs.common.math.streamstatistics",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "cardiologs.common.math.streamstatistics.MovingMax",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cardiologs.common.math.streamstatistics",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "cardiologs.common.math.streamstatistics.MovingMin",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cardiologs.common.math.streamstatistics",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "cardiologs.common.model.wave.WaveType",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "cardiologs.common.model.wave",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cardiologs.common.model.wave.WaveType",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "cardiologs.common.model.wave",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "cardiologs.common.model.wave.WaveType",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "cardiologs.common.model.wave",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "cardiologs.common.model.wave.WaveType",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "cardiologs.common.model.wave",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "cardiologs.common.model.wave.WaveType",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "cardiologs.common.model.wave",
"line_number": 79,
"usage_type": "name"
}
] |
342113249
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MatchRecord(Model):
"""MatchRecord.
:param wikipedia_score: (optional) If a well-known item with Wikipedia
link is recognized, a decimal number denoting the confidence level of the
Wikipedia info will be returned.
:type wikipedia_score: float
:param entity_type_score: (optional) If an entity type is recognized, a
decimal number denoting the confidence level of the entity type will be
returned.
:type entity_type_score: float
:param text: Entity text as appears in the request.
:type text: str
:param offset: Start position (in Unicode characters) for the entity match
text.
:type offset: int
:param length: Length (in Unicode characters) for the entity match text.
:type length: int
"""
_attribute_map = {
'wikipedia_score': {'key': 'wikipediaScore', 'type': 'float'},
'entity_type_score': {'key': 'entityTypeScore', 'type': 'float'},
'text': {'key': 'text', 'type': 'str'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
}
def __init__(self, *, wikipedia_score: float=None, entity_type_score: float=None, text: str=None, offset: int=None, length: int=None, **kwargs) -> None:
super(MatchRecord, self).__init__(**kwargs)
self.wikipedia_score = wikipedia_score
self.entity_type_score = entity_type_score
self.text = text
self.offset = offset
self.length = length
| null |
sdk/cognitiveservices/azure-cognitiveservices-language-textanalytics/azure/cognitiveservices/language/textanalytics/models/match_record_py3.py
|
match_record_py3.py
|
py
| 2,003 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "msrest.serialization.Model",
"line_number": 15,
"usage_type": "name"
}
] |
651561274
|
def load_data():
import pandas as pd
import os
import ujson
df = pd.read_csv('data/dataset_1/Video_Games_Sales_as_at_22_Dec_2016.csv')
#print df
#genres = df.transpose().groupby('Global_Sales')[['Action','Adventure','Fighting','Misc','Platform','Puzzle','Racing','Role-Playing','Shooter','Simulation','Sports','Strategy']].sum()
regions = df.groupby('Genre')[['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum()
systems = df.groupby('Platform')[['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum()
publishers = df.groupby('Publisher')[['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum()
years = df.groupby('Year_of_Release')[['NA_Sales','EU_Sales','JP_Sales','Other_Sales','Global_Sales']].sum()
names = df.groupby('Name')[['NA_Sales','EU_Sales','JP_Sales','Other_Sales','Global_Sales']].sum()
#print years
output = {}
output['regions'] = regions.to_dict()
output['genres'] = regions.transpose().to_dict()
output['systems'] = systems.transpose().to_dict()
output['publishers'] = publishers.transpose().to_dict()
#output['genres'] = genres.to_dict()
output['years'] = years.to_dict()
output['names'] = names.to_dict()
nameKeys = output['names']['Global_Sales'].keys()
#output['namesearchlist'] = [n + ' | ' + ','.join(df[df['Name'].isin([n])].dropna()['Platform'].unique()) + ' | ' + ','.join(df[df['Name'].isin([n])].dropna()['Publisher'].unique()) + ' | ' + ','.join(df[df['Name'].isin([n])].dropna()['Genre'].unique()) for n in nameKeys]
output['namelist'] = nameKeys
output['genrelist'] = output['genres'].keys()
output['publisherlist'] = output['publishers'].keys()
output['systemlist'] = output['systems'].keys()
#ourput['name_list'] = output['names']['Global_Sales'].keys()
return ujson.dumps(output)
def update_data(req):
import pandas as pd
import os
import ujson
df = pd.read_csv('data/dataset_1/Video_Games_Sales_as_at_22_Dec_2016.csv')
genre = existsOrEmptyList(req.args.get('genre'),req.args.get('splitter'))
system = existsOrEmptyList(req.args.get('system'),req.args.get('splitter'))
publisher = existsOrEmptyList(req.args.get('publisher'),req.args.get('splitter'))
game = existsOrEmptyList(req.args.get('game'),req.args.get('splitter'))
udf = df
tag_total = len(genre) + len(system) + len(publisher) + len(game)
if tag_total > 0:
udf = df[df['Genre'].isin(genre) | df['Platform'].isin(system) | df['Publisher'].isin(publisher) | df['Name'].isin(game)]
sdf = udf.fillna(0)
sales = {n : sdf[sdf['Genre'].isin([n])][['Global_Sales']].sum() for n in [g for g in sdf['Genre'].unique() if str(g) != '0']}
#sales = udf[['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum().to_dict()
years = udf.groupby('Year_of_Release')[['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum()
output = {}
output['subset'] = {}
output['ratings'] = {}
if tag_total > 0:
for g in genre:
output['subset'][g] = udf[udf['Genre'].isin([g])][['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum().to_dict()
output['ratings'][g] = udf[udf['Genre'].isin([g])][['Critic_Score','User_Score']].mean().dropna().to_dict()
for g in system:
output['subset'][g] = udf[udf['Platform'].isin([g])][['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum().to_dict()
output['ratings'][g] = udf[udf['Platform'].isin([g])][['Critic_Score','User_Score']].mean().dropna().to_dict()
for g in publisher:
output['subset'][g] = udf[udf['Publisher'].isin([g])][['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum().to_dict()
output['ratings'][g] = udf[udf['Publisher'].isin([g])][['Critic_Score','User_Score']].mean().dropna().to_dict()
for g in game:
output['subset'][g] = udf[udf['Name'].isin([g])][['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum().to_dict()
output['ratings'][g] = udf[udf['Name'].isin([g])][['Critic_Score','User_Score']].mean().dropna().to_dict()
else:
output['subset']['Total'] = udf[['NA_Sales','EU_Sales','JP_Sales','Other_Sales']].sum().to_dict()
output['sales'] = sales
output['years'] = years.to_dict()
#print udf[['Critic_Score']].dropna().sum()
#print output
return ujson.dumps(output)
def existsOrEmptyList(v,d):
output = []
if v is not None:
output = str(v).split(d)
return output
| null |
gamedata.py
|
gamedata.py
|
py
| 4,518 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "ujson.dumps",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "ujson.dumps",
"line_number": 93,
"usage_type": "call"
}
] |
206839824
|
from tm1637 import TM1637
import RPi.GPIO as GPIO
import multiprocessing, time
class WorkoutBuddy():
REST_TIME = 90
CLK_PIN = 24
DIO_PIN = 23
INPUT_PIN = 17
MAX_SETS = 5
_time = 0
_set = 1
_thread = None
_tm = None
_gpio = None
# Debouncing
_accept_input = True
def __init__(self):
# Use board numbering scheme
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.INPUT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(
self.INPUT_PIN,
GPIO.FALLING,
callback=self.set_completed,
bouncetime=200
)
# Write and init display
self._tm = TM1637(clk=self.CLK_PIN, dio=self.DIO_PIN)
self._time = self.REST_TIME
self.display()
def __str__(self):
return f"{self._time}, {self._set}"
def display(self):
self._tm.numbers(self._time, self._set)
def set_completed(self, channel):
if self._thread is not None:
self._thread.terminate()
# Press button to reset to 1st set
if self._set == self.MAX_SETS:
self._set = 1
self._time = self.REST_TIME
self.display()
else:
self._set += 1
self.rest_between_sets()
def stop_thread(self):
self._thread.terminate()
def rest_between_sets(self):
self._time = self.REST_TIME
self._thread = multiprocessing.Process(target=self.countdown_thread)
self._thread.start()
def countdown_thread(self):
self.display()
while self._time > 0:
time.sleep(1)
self._time -= 1
self.display()
if __name__ == "__main__":
wb = WorkoutBuddy()
while True:
pass # Spinlock
| null |
main.py
|
main.py
|
py
| 1,870 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "RPi.GPIO.setmode",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BCM",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setwarnings",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.IN",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.PUD_UP",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.add_event_detect",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.FALLING",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "tm1637.TM1637",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 71,
"usage_type": "call"
}
] |
287768271
|
import sys, os, argparse
from os.path import join, isdir, isfile, split
sys.path.insert(0, 'caffe/python')
import caffe
from caffe import layers as L, params as P
from caffe.coord_map import crop
import numpy as np
from math import ceil
parser = argparse.ArgumentParser(description='Training hed.')
parser.add_argument('--nfeat', type=int, help='number features', default=11)
parser.add_argument('--bias', type=bool, default=True)
args = parser.parse_args()
tmp_dir = 'tmp'
if not isdir(tmp_dir):
os.makedirs(tmp_dir)
def conv_relu(bottom, nout, ks=3, stride=1, pad=1, mult=[1,1,2,0]):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, weight_filler=dict(type='msra'),
param=[dict(lr_mult=mult[0], decay_mult=mult[1]), dict(lr_mult=mult[2], decay_mult=mult[3])])
return conv, L.ReLU(conv, in_place=True)
def max_pool(bottom, ks=2, stride=2):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
def conv1x1(bottom, nout=1, lr=[0.01, 1, 0.02, 0], wf=dict(type="constant")):
if args.bias:
return L.Convolution(bottom, kernel_size=1, num_output=nout, weight_filler=wf,
param=[dict(lr_mult=lr[0], decay_mult=lr[1]), dict(lr_mult=lr[2], decay_mult=lr[3])])
else:
return L.Convolution(bottom, kernel_size=1, num_output=nout, weight_filler=wf,
bias_term=False, param=[dict(lr_mult=lr[0], decay_mult=lr[1])])
def upsample(bottom, stride, nout=1, name=None):
s, k, pad = stride, 2 * stride, int(ceil(stride-1)/2)
if not name:
name = "upsample%d"%s
return L.Deconvolution(bottom, name=name, convolution_param=dict(num_output=nout, bias_term=False,
kernel_size=k, stride=s, pad=pad, weight_filler = dict(type="bilinear"), group=nout),
param=[dict(lr_mult=0, decay_mult=0)])
def net(split):
n = caffe.NetSpec()
# loss_param = dict(normalization=P.Loss.VALID)
loss_param = dict(normalize=False)
if split=='train':
data_params = dict(mean=(104.00699, 116.66877, 122.67892))
data_params['root'] = 'data/HED-BSDS_PASCAL'
data_params['source'] = "bsds_pascal_train_pair.lst"
data_params['shuffle'] = True
data_params['ignore_label'] = -1
n.data, n.label = L.Python(module='pylayer', layer='ImageLabelmapDataLayer', ntop=2, \
param_str=str(data_params))
if data_params.has_key('ignore_label'):
loss_param['ignore_label'] = int(data_params['ignore_label'])
elif split == 'test':
n.data = L.Input(name = 'data', input_param=dict(shape=dict(dim=[1,3,200,200])))
else:
raise Exception("Invalid phase")
n.conv1_1, n.relu1_1 = conv_relu(n.data, 64, pad=1)
n.conv1_2, n.relu1_2 = conv_relu(n.relu1_1, 64)
n.pool1 = max_pool(n.relu1_2)
n.conv2_1, n.relu2_1 = conv_relu(n.pool1, 128)
n.conv2_2, n.relu2_2 = conv_relu(n.relu2_1, 128)
n.pool2 = max_pool(n.relu2_2)
n.conv3_1, n.relu3_1 = conv_relu(n.pool2, 256)
n.conv3_2, n.relu3_2 = conv_relu(n.relu3_1, 256)
n.conv3_3, n.relu3_3 = conv_relu(n.relu3_2, 256)
n.pool3 = max_pool(n.relu3_3)
n.conv4_1, n.relu4_1 = conv_relu(n.pool3, 512)
n.conv4_2, n.relu4_2 = conv_relu(n.relu4_1, 512)
n.conv4_3, n.relu4_3 = conv_relu(n.relu4_2, 512)
n.pool4 = max_pool(n.relu4_3)
n.conv5_1, n.relu5_1 = conv_relu(n.pool4, 512, mult=[100,1,200,0])
n.conv5_2, n.relu5_2 = conv_relu(n.relu5_1, 512, mult=[100,1,200,0])
n.conv5_3, n.relu5_3 = conv_relu(n.relu5_2, 512, mult=[100,1,200,0])
## w1
n.w1_1top = conv1x1(n.conv1_1, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w1_2top = conv1x1(n.conv1_2, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
## w2
n.w2_1top = conv1x1(n.conv2_1, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w2_2top = conv1x1(n.conv2_2, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w2_1down = conv1x1(n.conv2_1, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w2_2down = conv1x1(n.conv2_2, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
## w3
n.w3_1top = conv1x1(n.conv3_1, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w3_2top = conv1x1(n.conv3_2, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w3_3top = conv1x1(n.conv3_3, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w3_1down = conv1x1(n.conv3_1, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w3_2down = conv1x1(n.conv3_2, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w3_3down = conv1x1(n.conv3_3, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
## w4
n.w4_1top = conv1x1(n.conv4_1, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w4_2top = conv1x1(n.conv4_2, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w4_3top = conv1x1(n.conv4_3, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w4_1down = conv1x1(n.conv4_1, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w4_2down = conv1x1(n.conv4_2, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w4_3down = conv1x1(n.conv4_3, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
## w5
n.w5_1down = conv1x1(n.conv5_1, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w5_2down = conv1x1(n.conv5_2, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.w5_3down = conv1x1(n.conv5_3, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
## upsample wx_xdown
n.w2_1down_up = upsample(n.w2_1down, nout=args.nfeat, stride=2, name='upsample2_1')
n.w2_2down_up = upsample(n.w2_2down, nout=args.nfeat, stride=2, name='upsample2_2')
n.w3_1down_up = upsample(n.w3_1down, nout=args.nfeat, stride=2, name='upsample3_1')
n.w3_2down_up = upsample(n.w3_2down, nout=args.nfeat, stride=2, name='upsample3_2')
n.w3_3down_up = upsample(n.w3_3down, nout=args.nfeat, stride=2, name='upsample3_3')
n.w4_1down_up = upsample(n.w4_1down, nout=args.nfeat, stride=2, name='upsample4_1')
n.w4_2down_up = upsample(n.w4_2down, nout=args.nfeat, stride=2, name='upsample4_2')
n.w4_3down_up = upsample(n.w4_3down, nout=args.nfeat, stride=2, name='upsample4_3')
n.w5_1down_up = upsample(n.w5_1down, nout=args.nfeat, stride=2, name='upsample5_1')
n.w5_2down_up = upsample(n.w5_2down, nout=args.nfeat, stride=2, name='upsample5_2')
n.w5_3down_up = upsample(n.w5_3down, nout=args.nfeat, stride=2, name='upsample5_3')
## crop wx_xdown_up
n.w2_1down_up_crop = crop(n.w2_1down_up, n.w1_1top)
n.w2_2down_up_crop = crop(n.w2_2down_up, n.w1_1top)
n.w3_1down_up_crop = crop(n.w3_1down_up, n.w2_1top)
n.w3_2down_up_crop = crop(n.w3_2down_up, n.w2_1top)
n.w3_3down_up_crop = crop(n.w3_3down_up, n.w2_1top)
n.w4_1down_up_crop = crop(n.w4_1down_up, n.w3_1top)
n.w4_2down_up_crop = crop(n.w4_2down_up, n.w3_1top)
n.w4_3down_up_crop = crop(n.w4_3down_up, n.w3_1top)
n.w5_1down_up_crop = crop(n.w5_1down_up, n.w4_1top)
n.w5_2down_up_crop = crop(n.w5_2down_up, n.w4_1top)
n.w5_3down_up_crop = crop(n.w5_3down_up, n.w4_1top)
## fuse
n.h1s1_2 = L.Eltwise(n.w1_1top, n.w1_2top, n.w2_1down_up_crop, n.w2_2down_up_crop)
n.h1s2_3 = L.Eltwise(n.w2_1top, n.w2_2top, n.w3_1down_up_crop, n.w3_2down_up_crop, n.w3_3down_up_crop)
n.h1s3_4 = L.Eltwise(n.w3_1top, n.w3_2top, n.w3_3top, \
n.w4_1down_up_crop, n.w4_2down_up_crop, n.w4_3down_up_crop)
n.h1s4_5 = L.Eltwise(n.w4_1top, n.w4_2top, n.w4_3top, \
n.w5_1down_up_crop, n.w5_2down_up_crop, n.w5_3down_up_crop)
## score h1sx_x
n.score_h1s1_2 = conv1x1(n.h1s1_2, lr=[0.01, 1, 0.02, 0], wf=dict(type='gaussian', std=0.001))
n.score_h1s2_3 = conv1x1(n.h1s2_3, lr=[0.01, 1, 0.02, 0], wf=dict(type='gaussian', std=0.001))
n.score_h1s3_4 = conv1x1(n.h1s3_4, lr=[0.01, 1, 0.02, 0], wf=dict(type='gaussian', std=0.001))
n.score_h1s4_5 = conv1x1(n.h1s4_5, lr=[0.01, 1, 0.02, 0], wf=dict(type='gaussian', std=0.001))
## upsample score
n.upscore_h1s2_3 = upsample(n.score_h1s2_3, stride=2, name='upscore_h1s2_3')
n.upscore_h1s3_4 = upsample(n.score_h1s3_4, stride=4, name='upscore_h1s2_4')
n.upscore_h1s4_5 = upsample(n.score_h1s4_5, stride=8, name='upscore_h1s4_5')
## crop upscore_h1sx_x
n.crop_h1s1_2 = crop(n.score_h1s1_2, n.data)
n.crop_h1s2_3 = crop(n.upscore_h1s2_3, n.data)
n.crop_h1s3_4 = crop(n.upscore_h1s3_4, n.data)
n.crop_h1s4_5 = crop(n.upscore_h1s4_5, n.data)
## fuse
n.h1_concat = L.Concat(n.crop_h1s1_2,
n.crop_h1s2_3,
n.crop_h1s3_4,
n.crop_h1s4_5,
concat_param=dict({'concat_dim':1}))
n.h1_fuse = conv1x1(n.h1_concat, lr=[0.001, 1, 0.002, 0], wf=dict(type='constant', value=float(1)/4))
if split == 'train':
n.loss_h1s1_2 = L.SigmoidCrossEntropyLoss(n.crop_h1s1_2, n.label, loss_param=loss_param)
n.loss_h1s2_3 = L.SigmoidCrossEntropyLoss(n.crop_h1s2_3, n.label, loss_param=loss_param)
n.loss_h1s3_4 = L.SigmoidCrossEntropyLoss(n.crop_h1s3_4, n.label, loss_param=loss_param)
n.loss_h1s4_5 = L.SigmoidCrossEntropyLoss(n.crop_h1s4_5, n.label, loss_param=loss_param)
n.loss_h1_fuse = L.SigmoidCrossEntropyLoss(n.h1_fuse, n.label, loss_param=loss_param)
else:
n.sigmoid_h1s1_2 = L.Sigmoid(n.crop_h1s1_2)
n.sigmoid_h1s2_3 = L.Sigmoid(n.crop_h1s2_3)
n.sigmoid_h1s3_4 = L.Sigmoid(n.crop_h1s3_4)
n.sigmoid_h1s4_5 = L.Sigmoid(n.crop_h1s4_5)
n.sigmoid_h1_fuse = L.Sigmoid(n.h1_fuse)
## H2: conv h1sx_x for H2 fusing
n.h1s1_2top = conv1x1(n.h1s1_2, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.h1s2_3top = conv1x1(n.h1s2_3, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.h1s2_3down = conv1x1(n.h1s2_3, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.h1s3_4top = conv1x1(n.h1s3_4, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.h1s3_4down = conv1x1(n.h1s3_4, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
n.h1s4_5down = conv1x1(n.h1s4_5, nout=args.nfeat, lr=[0.1, 1, 0.2, 0], wf=dict(type='gaussian', std=0.001))
## upsample H2
n.h1s2_3upsample = upsample(n.h1s2_3down, nout=args.nfeat, stride=2, name='upsample_h1s2_3')
n.h1s3_4upsample = upsample(n.h1s3_4down, nout=args.nfeat, stride=2, name='upsample_h1s3_4')
n.h1s4_5upsample = upsample(n.h1s4_5down, nout=args.nfeat, stride=2, name='upsample_h1s4_5')
## Crop H2
n.h1s2_3crop = crop(n.h1s2_3upsample, n.h1s1_2top)
n.h1s3_4crop = crop(n.h1s3_4upsample, n.h1s2_3top)
n.h1s4_5crop = crop(n.h1s4_5upsample, n.h1s3_4top)
## fuse H2
n.h2s1_2_3 = L.Eltwise(n.h1s1_2top, n.h1s2_3crop)
n.h2s2_3_4 = L.Eltwise(n.h1s2_3top, n.h1s3_4crop)
n.h2s3_4_5 = L.Eltwise(n.h1s3_4top, n.h1s4_5crop)
## score H2
n.score_h2s1_2_3 = conv1x1(n.h2s1_2_3, lr=[0.01, 1, 0.02, 0], wf=dict(type='gaussian', std=0.001))
n.score_h2s2_3_4 = conv1x1(n.h2s2_3_4, lr=[0.01, 1, 0.02, 0], wf=dict(type='gaussian', std=0.001))
n.score_h2s3_4_5 = conv1x1(n.h2s3_4_5, lr=[0.01, 1, 0.02, 0], wf=dict(type='gaussian', std=0.001))
## upsample H2 score
n.score_h2s2_3_4upsample = upsample(n.score_h2s2_3_4, stride=2, name='upscore_h2s2_3_4')
n.score_h2s3_4_5upsample = upsample(n.score_h2s3_4_5, stride=4, name='upscore_h2s3_4_5')
## Crop H2 score
n.score_h2s1_2_3crop = crop(n.score_h2s1_2_3, n.data)
n.score_h2s2_3_4crop = crop(n.score_h2s2_3_4upsample, n.data)
n.score_h2s3_4_5crop = crop(n.score_h2s3_4_5upsample, n.data)
# concat H2
n.h2_concat = L.Concat(n.score_h2s1_2_3crop, n.score_h2s2_3_4crop, n.score_h2s3_4_5crop,\
concat_param=dict({'concat_dim':1}))
n.h2_fuse = conv1x1(n.h2_concat, lr=[0.001, 1, 0.002, 0], wf=dict(type='constant', value=0.333))
if split == 'train':
n.loss_h2s1_2_3 = L.SigmoidCrossEntropyLoss(n.score_h2s1_2_3crop, n.label, loss_param=loss_param)
n.loss_h2s2_3_4 = L.SigmoidCrossEntropyLoss(n.score_h2s2_3_4crop, n.label, loss_param=loss_param)
n.loss_h2s3_4_5 = L.SigmoidCrossEntropyLoss(n.score_h2s3_4_5crop, n.label, loss_param=loss_param)
n.loss_h2_fuse = L.SigmoidCrossEntropyLoss(n.h2_fuse, n.label, loss_param=loss_param)
else:
n.sigmoid_h2s1_2_3 = L.Sigmoid(n.score_h2s1_2_3crop)
n.sigmoid_h2s2_3_4 = L.Sigmoid(n.score_h2s2_3_4crop)
n.sigmoid_h2s3_4_5 = L.Sigmoid(n.score_h2s3_4_5crop)
n.sigmoid_h2_fuse = L.Sigmoid(n.h2_fuse)
# Concat H1 and H2
n.h1h2_concat = L.Concat(n.score_h2s1_2_3crop, n.score_h2s2_3_4crop, n.score_h2s3_4_5crop,
n.crop_h1s1_2, n.crop_h1s2_3, n.crop_h1s3_4, n.crop_h1s4_5,
concat_param=dict({'concat_dim': 1}))
n.h1h2_fuse = conv1x1(n.h1h2_concat, lr=[0.001, 1, 0.002, 0], wf=dict(type='constant', value=float(1)/7))
if split == 'train':
n.loss_h1h2_fuse = L.SigmoidCrossEntropyLoss(n.h1h2_fuse, n.label, loss_param=loss_param)
else:
n.sigmoid_h1h2_fuse = L.Sigmoid(n.h1h2_fuse)
return n.to_proto()
def make_net():
fpath = join(tmp_dir, "h2feat%d_train.pt"%args.nfeat)
with open(fpath, 'w') as f:
f.write(str(net('train')))
fpath = join(tmp_dir, "h2feat%d_test.pt"%args.nfeat)
with open(fpath, 'w') as f:
f.write(str(net('test')))
def make_solver():
sp = {}
fpath = join(tmp_dir, "h1feat%d_train.pt"%args.nfeat)
sp['net'] = '"' + fpath + '"'
sp['base_lr'] = '0.000001'
sp['lr_policy'] = '"step"'
sp['momentum'] = '0.9'
sp['weight_decay'] = '0.0002'
sp['iter_size'] = '10'
sp['stepsize'] = '20000'
sp['display'] = '10'
sp['snapshot'] = '2000'
sp['snapshot_prefix'] = '"snapshot/h2feat%d"'%args.nfeat
sp['gamma'] = '0.1'
sp['max_iter'] = '40000'
sp['solver_mode'] = 'GPU'
fpath = join(tmp_dir, "h2feat%d_solver.pt"%args.nfeat)
f = open(fpath, 'w')
for k, v in sorted(sp.items()):
if not(type(v) is str):
raise TypeError('All solver parameters must be strings')
f.write('%s: %s\n'%(k, v))
f.close()
def make_all():
make_net()
make_solver()
if __name__ == '__main__':
make_all()
| null |
zeaky/model/h2.py
|
h2.py
|
py
| 14,338 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.insert",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "caffe.layers.Convolution",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "caffe.layers.ReLU",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Pooling",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "caffe.params.Pooling",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "caffe.params",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Convolution",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Convolution",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "caffe.layers.Deconvolution",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "caffe.NetSpec",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Python",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Input",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "caffe.layers.Eltwise",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Eltwise",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Eltwise",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Eltwise",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "caffe.layers.Concat",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "caffe.layers.Eltwise",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Eltwise",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Eltwise",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "caffe.coord_map.crop",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "caffe.layers.Concat",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Concat",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "caffe.layers.SigmoidCrossEntropyLoss",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "caffe.layers.Sigmoid",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "caffe.layers",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 263,
"usage_type": "call"
}
] |
262500749
|
# uncompyle6 version 3.6.7
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]
# Embedded file name: build/bdist.linux-x86_64/egg/pyp_beagle/beagle_pdf.py
# Compiled at: 2019-07-16 04:25:49
import logging, numpy as np, os, json
from matplotlib import rc
from matplotlib.colors import colorConverter
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
from collections import OrderedDict
from astropy.io import fits
from getdist import plots, MCSamples
from beagle_utils import BeagleDirectories, prepare_plot_saving, plot_exists
class PDF(object):
def __init__(self, params_file, **kwargs):
with open(params_file) as (f):
self.adjust_params = json.load(f, object_pairs_hook=OrderedDict)
self.mock_catalogue = kwargs.get('mock_catalogue')
self.single_solutions = None
if kwargs.get('plot_single_solution') is not None:
self.single_solutions = OrderedDict()
with fits.open(kwargs.get('plot_single_solution')) as (f):
self.single_solutions['ID'] = f[1].data['ID']
self.single_solutions['row'] = f[1].data['row_index']
self.triangle_font_size = kwargs.get('fontsize')
return
def plot_triangle(self, ID, params_to_plot=None, suffix=None, replot=False, M_star=False, show=False):
"""
Draw a "triangle plot" with the 1D and 2D posterior probability
Parameters
----------
ID : str or int
Object ID
M_star : bool, optional
If set, the routine will plot the mass currenly locked into stars
instead of the mass of star formed (i.e., the plotted mass will
accout for the return fraction)
"""
if suffix is None:
plot_name = str(ID) + '_BEAGLE_triangle.pdf'
else:
plot_name = str(ID) + '_BEAGLE_triangle_' + suffix + '.pdf'
if plot_exists(plot_name) and not replot and not show:
logging.warning('The plot "' + plot_name + '" already exists. \n Exiting the function.')
return
else:
fits_file = os.path.join(BeagleDirectories.results_dir, str(ID) + '_' + BeagleDirectories.suffix + '.fits.gz')
hdulist = fits.open(fits_file)
param_values = OrderedDict()
for key, value in self.adjust_params.iteritems():
extName = 'POSTERIOR PDF'
if 'extName' in value:
extName = value['extName']
colName = key
if 'colName' in value:
colName = value['colName']
param_values[key] = hdulist[extName].data[colName]
probability = hdulist['posterior pdf'].data['probability']
n_rows = probability.size
if params_to_plot is None:
_params_to_plot = list()
for key, value in self.adjust_params.iteritems():
_params_to_plot.append(key)
else:
_params_to_plot = params_to_plot
if M_star and 'mass' in _params_to_plot:
param_values['mass'][:] = np.log10(hdulist['galaxy properties'].data['M_star'][:])
nParamsToPlot = len(_params_to_plot)
names = list()
labels = list()
ranges = dict()
samps = np.zeros((n_rows, nParamsToPlot))
keys = list()
j = 0
for key, par in self.adjust_params.iteritems():
keys.append(key)
for par_name in _params_to_plot:
if key == par_name:
names.append(key)
label = par['label'].replace('$', '')
labels.append(label)
samps[:, j] = param_values[key]
ranges.update({key: par['range']})
if 'log' in par:
if par['log']:
samps[:, j] = np.log10(param_values[key])
ranges.update({key: np.log10(par['range'])})
j += 1
break
settings = {'contours': [
0.68, 0.95, 0.99],
'range_ND_contour': 1,
'range_confidence': 0.001,
'fine_bins': 200,
'fine_bins_2d': 80,
'smooth_scale_1D': 0.3,
'smooth_scale_2D': 0.5,
'tight_gap_fraction': 0.15}
samples = MCSamples(samples=samps, names=names, ranges=ranges, weights=probability, labels=labels, settings=settings)
g = plots.getSubplotPlotter()
g.settings.num_plot_contours = 3
g.settings.prob_y_ticks = True
if self.triangle_font_size is None:
g.settings.lab_fontsize = 7 + 4 * g.settings.subplot_size_inch
g.settings.axes_fontsize = 4 + 4 * g.settings.subplot_size_inch
else:
g.settings.lab_fontsize = self.triangle_font_size
g.settings.axes_fontsize = self.triangle_font_size
line_args = {'lw': 2, 'color': colorConverter.to_rgb('#006FED')}
g.triangle_plot(samples, filled=True, line_args=line_args)
g.fig.subplots_adjust(wspace=0.1, hspace=0.1)
prune = 'both'
for i in range(len(names)):
for i2 in range(i, len(names)):
_ax = g._subplot(i, i2)
_ax.xaxis.set_major_locator(plt.MaxNLocator(3, prune=prune))
_ax.yaxis.set_major_locator(plt.MaxNLocator(3, prune=prune))
for i, ax in enumerate([ g.subplots[(i, i)] for i in range(nParamsToPlot) ]):
par_name = keys[i]
if i < nParamsToPlot - 1:
ax.tick_params(which='both', labelbottom=False, top=True, labeltop=True, left=False, labelleft=False)
else:
ax.tick_params(which='both', labelbottom=True, top=True, labeltop=False, left=False, labelleft=False)
y0, y1 = ax.get_ylim()
lev = samples.get1DDensity(par_name).getLimits(settings['contours'][0])
ax.add_patch(Rectangle((lev[0], y0), lev[1] - lev[0], y1 - y0, facecolor='grey', alpha=0.5))
if self.mock_catalogue is not None:
name = names[i]
value = self.mock_catalogue.get_param_values(ID, (name,))
if 'log' in self.adjust_params[name]:
if self.adjust_params[name]['log']:
value = np.log10(value)
ax.plot(value, y0 + (y1 - y0) * 0.05, marker='D', ms=8, color='green')
if self.single_solutions is not None:
row = self.single_solutions['row'][(self.single_solutions['ID'] == ID)]
for i in range(nParamsToPlot):
parX = keys[i]
valueX = param_values[parX][row]
if 'log' in self.adjust_params[parX]:
if self.adjust_params[parX]['log']:
valueX = np.log10(valueX)
for j in range(nParamsToPlot):
ax = g.subplots[(i, j)]
if ax is None:
continue
if i == j:
ax.plot(valueX, y0 + (y1 - y0) * 0.05, marker='*', ms=12, color='darkorange')
else:
parY = keys[j]
valueY = param_values[parY][row]
if 'log' in self.adjust_params[parY]:
if self.adjust_params[parY]['log']:
valueY = np.log10(valueY)
ax.plot(valueY, valueX, marker='*', ms=12, color='darkorange')
if show:
plt.show()
else:
name = prepare_plot_saving(plot_name)
g.export(name)
plt.close()
hdulist.close()
return
| null |
pycfiles/pyq-4.2.1.tar/beagle_pdf.py
|
beagle_pdf.py
|
py
| 8,269 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "beagle_utils.plot_exists",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "beagle_utils.BeagleDirectories.results_dir",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "beagle_utils.BeagleDirectories",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "beagle_utils.BeagleDirectories.suffix",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "getdist.MCSamples",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "getdist.plots.getSubplotPlotter",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "getdist.plots",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.colorConverter.to_rgb",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.colorConverter",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.MaxNLocator",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.MaxNLocator",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.patches.Rectangle",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "beagle_utils.prepare_plot_saving",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 173,
"usage_type": "name"
}
] |
198475545
|
"""
This program is free software.
Feel free to fix, modify, or remake it.
https://github.com/lk-lkaz/ldr4pov
"""
import bpy
import math
import mathutils
from bpy.props import (
FloatProperty,
IntProperty,
EnumProperty,
)
bl_info = {
"name": "ldr4pov_Tools_AssemblyAnimation",
"description": "Make a assembly animation by Rigid Body simulation",
"author": "lk.lkaz",
"version": (0, 0, 1),
"blender": (2, 67, 0),
"location": "View3D > Tools",
#"warning": "",
#"wiki_url": "http://",
#"tracker_url": "http://",
"category": "3D View"}
class panel_layout(bpy.types.Panel):
bl_label = "Assembly Animation Tool"
#bl_idname = ""
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
#bl_context = ""
def draw(self, context):
scn = bpy.context.scene
layout = self.layout
layout.label(text="Step0:")
col = layout.column()
col.operator("object.initial_setup", text="Set indicators")
layout.label(text="Step1:")
col = layout.column(align=True)
col.operator("object.set_passive", text="Add PASSIVE")
layout.label(text="Step2:")
col = layout.column(align=True)
col.prop(scn, "ldr4pov_gravity_strength", text="Gravity Strength")
#col.prop(scn, "ldr4pov_sort_vector")
col.prop(scn, "ldr4pov_interval", text="Interval")
col.prop(scn, "ldr4pov_bake_quality", text="Accuracy")
if scn.ldr4pov_bake_quality == "0": # Custom
col.prop(scn.rigidbody_world, "steps_per_second", text="steps_per_second")
col.prop(scn, "ldr4pov_simulation_frames", text="Frames")
col.prop(scn, "ldr4pov_bake_step", text="Frame Step")
"""
#lk? Doesn't work. "AttributeError: Writing to ID classes in this context is not allowed:..."
#lk? Moved to fall_simulation()
elif scn.ldr4pov_bake_quality == "1": # Default
scn.rigidbody_world.steps_per_second = 60
scn.ldr4pov_simulation_frames = 48 # 2sec in 24fps
scn.ldr4pov_bake_step = 2
elif scn.ldr4pov_bake_quality == "2": # Low
scn.rigidbody_world.steps_per_second = 30
scn.ldr4pov_simulation_frames = 24 # 1sec in 24fps
scn.ldr4pov_bake_step = 3
"""
layout.label(text="Step3:")
row = layout.row(align=True)
row.scale_y = 3.0
row.operator("object.fall_simulation", text="Start")
"""
layout.label(text="Step4:")
col = layout.column(align=True)
col.operator("object.reverse_frames", text="Reverse Frames")
"""
class initial_setup(bpy.types.Operator):
bl_idname = "object.initial_setup"
bl_label = "initial_setup"
bl_description = "..."
bl_option = ("REGISTER", "UNDO")
def execute(self, context):
# Make indicators of gravity and sort vector.
if "ldr4pov_gravity_indicator" not in bpy.data.objects:
# Avoid conflict #lk?
if "Cone" in bpy.data.objects:
bpy.data.objects["Cone"].name = "ldr4pov_cone"
# Cone as ldr4pov_gravity_indicator
bpy.ops.mesh.primitive_cone_add(vertices=6, radius1=1, radius2=0, depth=2, enter_editmode=True, location=(0,0,0))
bpy.ops.transform.translate(value=(0,0,10)) # Edit mode
bpy.ops.object.editmode_toggle()
o = bpy.data.objects["Cone"]
o.name = "ldr4pov_gravity_indicator"
o.lock_location = (True,True,True)
#lk? material?
# Avoid conflict #lk?
if "ldr4pov_cone" in bpy.data.objects:
bpy.data.objects["ldr4pov_cone"].name = "Cone"
else:
# Reset location and rotation
o = bpy.data.objects["ldr4pov_gravity_indicator"]
o.location = (0,0,0)
o.rotation_euler = (0,0,0)
if "ldr4pov_sort_indicator" not in bpy.data.objects:
# Avoid conflict #lk?
if "Icosphere" in bpy.data.objects:
bpy.data.objects["Icosphere"].name = "ldr4pov_icosphere"
# Ico_Sphere as ldr4pov_sort_indicator
bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=1, size=1, enter_editmode=True, location=(0,0,0))
bpy.ops.transform.translate(value=(0,0,8)) # Edit mode
bpy.ops.object.editmode_toggle()
o = bpy.data.objects["Icosphere"]
o.name = "ldr4pov_sort_indicator"
o.lock_location=(True,True,True)
#lk? material?
# Avoid conflict #lk?
if "ldr4pov_icosphere" in bpy.data.objects:
bpy.data.objects["ldr4pov_icosphere"].name = "Icosphere"
else:
# Reset location and rotation
o = bpy.data.objects["ldr4pov_sort_indicator"]
o.location = (0,0,0)
o.rotation_euler = (0,0,0)
# Select indicators
bpy.ops.object.select_all(action="DESELECT")
bpy.data.objects["ldr4pov_gravity_indicator"].select = True
bpy.data.objects["ldr4pov_sort_indicator"].select = True
return{"FINISHED"}
class set_passive(bpy.types.Operator):
bl_idname = "object.set_passive"
bl_label = "set_passive"
bl_description = "Add selected objects as PASSIVE (collision_shape = 'CONVEX_HULL')"
bl_option = ("REGISTER", "UNDO")
def execute(self, context):
scn = bpy.context.scene
# Deselect indicators
if "ldr4pov_gravity_indicator" in bpy.data.objects:
bpy.data.objects["ldr4pov_gravity_indicator"].select = False
if "ldr4pov_sort_indicator" in bpy.data.objects:
bpy.data.objects["ldr4pov_sort_indicator"].select = False
# Deselect non-mesh object, and no-polygon object
list1 = [o for o in bpy.context.selected_objects if o.type == "MESH" and len(o.data.polygons) != 0]
if len(list1) == 0:
return{"FINISHED"}
bpy.ops.object.select_all(action="DESELECT")
for o in list1:
o.select = True
# Add "PASSIVE"
scn.objects.active = list1[0]
#bpy.ops.object.origin_set(type="ORIGIN_CENTER_OF_MASS")
bpy.ops.rigidbody.objects_add(type="PASSIVE")
bpy.context.object.rigid_body.collision_shape = "CONVEX_HULL"
bpy.ops.rigidbody.object_settings_copy()
return{"FINISHED"}
class fall_simulation(bpy.types.Operator):
bl_idname = "object.fall_simulation"
bl_label = "fall_simulation"
bl_description = "..."
bl_option = ("REGISTER", "UNDO")
def execute(self, context):
scn = bpy.context.scene
# step1--------------------------------------------------------------------------------------------------------------
# Initial settings
# Set accuracy
if scn.ldr4pov_bake_quality == "1": # Default
scn.rigidbody_world.steps_per_second = 60
scn.ldr4pov_simulation_frames = 48 # 2sec in 24fps
scn.ldr4pov_bake_step = 2
elif scn.ldr4pov_bake_quality == "2": # Low
scn.rigidbody_world.steps_per_second = 30
scn.ldr4pov_simulation_frames = 24 # 1sec in 24fps
scn.ldr4pov_bake_step = 3
# Get vector settings from indicators
if "ldr4pov_gravity_indicator" in bpy.data.objects:
o = bpy.data.objects["ldr4pov_gravity_indicator"]
o.location = (0,0,0)
ldr4pov_gravity_vector = o.matrix_world * mathutils.Vector((0,0,1)) / o.scale[2]
scn.gravity = ldr4pov_gravity_vector * scn.ldr4pov_gravity_strength
# Deselect indicator
o.select = False
if "ldr4pov_sort_indicator" in bpy.data.objects:
o = bpy.data.objects["ldr4pov_sort_indicator"]
o.location = (0,0,0)
ldr4pov_sort_vector = o.matrix_world * mathutils.Vector((0,0,1))
# Deselect indicator
o.select = False
else:
ldr4pov_sort_vector = scn.gravity
# step2--------------------------------------------------------------------------------------------------------------
# Make a sorted list
# Make a list of passive objects
list1 = [o for o in bpy.context.selected_objects if o.rigid_body is not None and o.rigid_body.type == "PASSIVE"]
if len(list1) == 0:
return{"FINISHED"}
# Sort the list by ldr4pov_sort_vector
list1.sort(key=lambda o: o.location * ldr4pov_sort_vector, reverse = True)
# step3--------------------------------------------------------------------------------------------------------------
# Rigid body simulation
bpy.ops.object.select_all(action="DESELECT")
frame_start = scn.frame_current +1
frame_end = scn.frame_current + scn.ldr4pov_simulation_frames -1
for o in list1:
o.select = True
scn.objects.active = o
o.rigid_body.type = "ACTIVE"
# Save current location to keyframe
bpy.ops.anim.keyframe_insert_menu(type="LocRotScale")
# Start simulation
scn.rigidbody_world.point_cache.frame_start = frame_start
scn.rigidbody_world.point_cache.frame_end = frame_end
bpy.ops.rigidbody.bake_to_keyframes(frame_start=frame_start, frame_end=frame_end, step=scn.ldr4pov_bake_step)
#bpy.ops.ptcache.free_bake_all()
#lk? add passive?
#scn.objects.active.rigid_body.type = "PASSIVE"
#scn.objects.active.rigid_body.kinematic = True
o.select = False
# Move frames for next
scn.frame_current += scn.ldr4pov_interval
frame_start += scn.ldr4pov_interval
frame_end += scn.ldr4pov_interval
# step4--------------------------------------------------------------------------------------------------------------
# Correct f-curve for eternal falling
# Select objects
#bpy.ops.object.select_all(action="DESELECT")
for o in list1:
o.select = True
# Set f-curve extrapolation_type to "LINEAR"
bpy.context.area.type="GRAPH_EDITOR"
bpy.ops.graph.extrapolation_type(type="LINEAR")
bpy.context.area.type="VIEW_3D"
# step5--------------------------------------------------------------------------------------------------------------
scn.frame_end = frame_end - scn.ldr4pov_interval
scn.frame_current = frame_start -1
return{"FINISHED"}
# Make a reverse movie #lk?
class reverse_frames(bpy.types.Operator):
bl_idname = "object.reverse_frames"
bl_label = "reverse_frames"
bl_description = "..."
bl_option = ("REGISTER", "UNDO")
def execute(self, context):
scn = bpy.context.scene
#scn.frame_start = 1
return{"FINISHED"}
def register():
bpy.utils.register_class(panel_layout)
bpy.utils.register_class(initial_setup)
bpy.utils.register_class(set_passive)
bpy.utils.register_class(fall_simulation)
bpy.utils.register_class(reverse_frames)
# Parameters
bpy.types.Scene.ldr4pov_gravity_strength = FloatProperty(name="gravity_strength", description="...", default = 10.0, min=0.0)
#bpy.types.Scene.ldr4pov_sort_vector = FloatVectorProperty(name="sort_vector", description="...", default = (0.0, 0.0, 1.0))
bpy.types.Scene.ldr4pov_interval = IntProperty(name="interval", description="...", default=5, min=0, step=1)
bpy.types.Scene.ldr4pov_simulation_frames = IntProperty(name="simulation_frames", description="...", default=48, min=3, step=1)
bpy.types.Scene.ldr4pov_bake_step = IntProperty(name="bake_step", description="\"Frame Step\" of Bake To Kyeframes", default=2, min=1, step=1)
bpy.types.Scene.ldr4pov_bake_quality = EnumProperty(
name="",
description="...",
items=[("1","Default","..."),
("2","Low","..."),
("0","Custom","...")],
default="1")
def unregister():
bpy.utils.unregister_class(panel_layout)
bpy.utils.unregister_class(initial_setup)
bpy.utils.unregister_class(set_passive)
bpy.utils.unregister_class(fall_simulation)
bpy.utils.unregister_class(reverse_frames)
# Parameters
del bpy.types.Scene.ldr4pov_gravity_strength
#del bpy.types.Scene.ldr4pov_sort_vector
del bpy.types.Scene.ldr4pov_interval
del bpy.types.Scene.ldr4pov_simulation_frames
del bpy.types.Scene.ldr4pov_bake_step
del bpy.types.Scene.ldr4pov_bake_quality
if __name__ == "__main__":
register()
| null |
ldr4pov_Tools_AssemblyAnimation.py
|
ldr4pov_Tools_AssemblyAnimation.py
|
py
| 12,728 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bpy.types",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.mesh.primitive_cone_add",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.transform.translate",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.editmode_toggle",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.mesh.primitive_ico_sphere_add",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.transform.translate",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.editmode_toggle",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.select_all",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.select_all",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.rigidbody.objects_add",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.rigidbody.object_settings_copy",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "mathutils.Vector",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "bpy.data",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "mathutils.Vector",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "bpy.context",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.select_all",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.anim.keyframe_insert_menu",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.rigidbody.bake_to_keyframes",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.graph.extrapolation_type",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.register_class",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 314,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.register_class",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.register_class",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.register_class",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 317,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.register_class",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.FloatProperty",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 323,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.IntProperty",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 324,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.IntProperty",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.IntProperty",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.EnumProperty",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "bpy.utils.unregister_class",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.unregister_class",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.unregister_class",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 338,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.unregister_class",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.unregister_class",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 340,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 343,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 345,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 348,
"usage_type": "attribute"
}
] |
398176661
|
import datetime
import pytz
import julian
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import numpy as np
from collections import OrderedDict
# maping from attributes of Image Class (as named in the fits file header) to names which will be displayed in html
dict_attributes = OrderedDict()
dict_attributes['date_observed'] = 'Date Observed'
dict_attributes['jd'] = 'JD'
dict_attributes['filter_used'] = 'Filter'
dict_attributes['exposure'] = 'Exposure'
dict_attributes['air_mass'] = 'Air Mass'
dict_attributes['ccd_temp'] = 'CCD Temp'
dict_attributes['image_type'] = 'Image Type'
dict_attributes['focus_value'] = 'Focus Value'
dict_attributes['fwhm'] = 'FWHM'
dict_attributes['lim_mag'] = 'Lim Mag'
dict_attributes['psf_mag'] = 'PSF Mag'
dict_attributes['psf_merr'] = 'PSF Error'
dict_attributes['apr_mag'] = 'Apr Mag'
dict_attributes['apr_merr'] = 'Apr Error'
dict_attributes['tel_alt'] = 'Tel Alt'
dict_attributes['tel_az'] = 'Tel Az'
dict_attributes['ref_ra'] = 'Ref RA'
dict_attributes['ref_dec'] = 'Ref DEC'
dict_attributes['tar_ra'] = 'Tar RA'
dict_attributes['tar_dec'] = 'Tar DEC'
dict_attributes['tar_name'] = 'Target'
real_valued_attributes = ['jd', 'exposure', 'air_mass', 'ccd_temp']
string_valued_attributes = ['tar_name','date_observed', 'filter_used']
#attributes in the query form
form_attributes = { 'String Valued' : string_valued_attributes, 'Real Valued' : real_valued_attributes }
# given a date observed give the max_jd and min jd corresponding to it
def date_in_ist2min_max_jd(date_observed):
date_time_object_ist = datetime.datetime.strptime(date_observed,"%Y-%m-%d")
date_time_object_ist = date_time_object_ist.replace(tzinfo=pytz.timezone('Asia/Calcutta'))
date_time_object_ist_noon = date_time_object_ist.replace(hour=12, minute=00)
date_time_object_utc = date_time_object_ist_noon.astimezone(pytz.timezone('UTC'))
min_jd = julian.to_jd(date_time_object_utc, fmt = 'jd')
max_jd = min_jd+1
return min_jd,max_jd
def list_images2list_filepaths(list_images):
list_filepaths = []
for image in list_images:
list_filepaths.append(image.filepath)
return list_filepaths
def rect2polygon(x_end,y_end,n_of_div):
n = n_of_div
corners = np.array([[0,0],[x_end,0],[x_end,y_end],[0,y_end]])
output = np.zeros((4*n,2))
for i in range(4):
x_points = np.linspace(corners[i][0], corners[(i+1)%4][0], n+1)[:-1]
y_points = np.linspace(corners[i][1], corners[(i+1)%4][1], n+1)[:-1]
output[i*n:(i+1)*n] = np.concatenate((x_points.reshape((n,1)) ,y_points.reshape((n,1))) ,axis=1)
return output
def boundry_points(x_end, y_end, wcs, n_of_div):
polygon_pixel_points = rect2polygon(x_end, y_end, n_of_div)
sky_coords = SkyCoord.from_pixel(polygon_pixel_points[:,0], polygon_pixel_points[:,1], wcs)
ra = np.array(sky_coords.ra.value)
dec = np.array(sky_coords.dec.value)
l = 4*n_of_div
coords = (np.concatenate((ra.reshape((l,1)) ,dec.reshape((l,1))) ,axis=1) ).tolist()
return(str(coords))
| null |
images_query_interface/common_utils.py
|
common_utils.py
|
py
| 3,079 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.OrderedDict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "pytz.timezone",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pytz.timezone",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "julian.to_jd",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "astropy.coordinates.SkyCoord.from_pixel",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 81,
"usage_type": "call"
}
] |
309401901
|
# Copyright 2013 – present by the SalishSeaCast Project contributors
# and The University of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
"""SalishSeaCast worker that creates a tarball of a month's run results and
moves it to remote archival storage. Compression is *not* used for the tarball
because the netCDF files that compose most of it are already highly compressed.
A .index text file containing a list of the files in the tarball is also created
and moved to the remote storage.
"""
import argparse
import functools
import logging
import os
import stat
import tarfile
from pathlib import Path
import arrow
import sysrsync
from nemo_nowcast import NowcastWorker
NAME = "archive_tarball"
logger = logging.getLogger(NAME)
def main():
worker = NowcastWorker(NAME, description=__doc__)
worker.init_cli()
worker.cli.add_argument(
"run_type",
choices={
"nowcast",
"nowcast-green",
"nowcast-agrif",
"hindcast",
},
help="Type of run to archive results files from.",
)
worker.cli.add_argument(
"yyyy_mmm",
type=_arrow_yyyy_mmm,
help="Year and month of run results to archive. Use YYYY-MMM format.",
)
worker.cli.add_argument(
"dest_host",
default="graham-dtn",
help="Name of the host to move tarball and index files to. Default is :kbd:`graham-dtn`.",
)
worker.run(archive_tarball, success, failure)
return worker
def _arrow_yyyy_mmm(string):
"""Convert a YYYY-MMM string to a UTC arrow object or raise
:py:exc:`argparse.ArgumentTypeError`.
The day part of the resulting arrow object is set to 01,
and the time part is set to 00:00:00.
:arg str string: YYYY-MMM string to convert.
:returns: Year-month string converted to a UTC :py:class:`arrow.Arrow` object.
:raises: :py:exc:`argparse.ArgumentTypeError`
"""
try:
return arrow.get(string, "YYYY-MMM")
except arrow.parser.ParserError:
msg = f"unrecognized year-month format: {string} - please use YYYY-MMM"
raise argparse.ArgumentTypeError(msg)
def success(parsed_args):
logger.info(
f'{parsed_args.run_type} {parsed_args.yyyy_mmm.format("*MMMYY").lower()} '
f"results files archived to {parsed_args.dest_host}"
)
msg_type = f"success {parsed_args.run_type}"
return msg_type
def failure(parsed_args):
logger.critical(
f'{parsed_args.run_type} {parsed_args.yyyy_mmm.format("*MMMYY").lower()} '
f"results files archiving to {parsed_args.dest_host} failed"
)
msg_type = f"failure {parsed_args.run_type}"
return msg_type
def archive_tarball(parsed_args, config, *args):
run_type = parsed_args.run_type
yyyy_mmm = parsed_args.yyyy_mmm.format("MMMYY").lower()
dest_host = parsed_args.dest_host
tmp_tarball_dir = Path(config["results tarballs"]["temporary tarball dir"])
run_type_results = (
Path(config["results archive"]["hindcast"]["localhost"])
if run_type == "hindcast"
else Path(config["results archive"][run_type])
)
tarball = tmp_tarball_dir / f"{run_type_results.parts[-1]}-{yyyy_mmm}.tar"
results_path_pattern = run_type_results / f"*{yyyy_mmm}"
logger.info(f"creating {tarball} from {results_path_pattern}/")
_create_tarball(tarball, results_path_pattern)
logger.info(f"creating {tarball.with_suffix('.index')} from {tarball}")
_create_tarball_index(tarball)
dest_dir = Path(config["results tarballs"][dest_host]) / run_type_results.parts[-1]
logger.info(f"rsync-ing {tarball} and index to {dest_host}:{dest_dir}/")
_rsync_to_remote(tarball, dest_host, dest_dir)
_delete_tmp_files(tarball)
return {
"tarball archived": {
"tarball": os.fspath(tarball),
"index": os.fspath(tarball.with_suffix(".index")),
"destination": f"{dest_host}:{dest_dir}/",
}
}
def _create_tarball(tarball, results_path_pattern):
"""
:param :py:class:`pathlib.Path` tarball:
:param :py:class:`pathlib.Path` results_path_pattern:
"""
with tarfile.open(tarball, "w") as tar:
results_dir = results_path_pattern.parent.parent
os.chdir(results_dir)
for p in sorted(
results_path_pattern.parent.glob(results_path_pattern.parts[-1])
):
logger.debug(f"adding {p}/ to {tarball}")
tar.add(p.relative_to(results_dir))
def _create_tarball_index(tarball):
"""
:param :py:class:`pathlib.Path` tarball:
"""
with tarball.with_suffix(".index").open("wt") as f:
with tarfile.open(tarball, "r") as tar:
for m in tar.getmembers():
mode_str = stat.filemode(m.mode)[1:]
mode = f"d{mode_str}" if m.isdir() else f"-{mode_str}"
name = f"{m.name}/" if m.isdir() else m.name
f.write(
f"{mode} {m.gname}/{m.uname} {m.size:>10} "
f"{arrow.get(m.mtime).format('YYYY-MM-DD HH:mm')} {name}\n"
)
def _rsync_to_remote(tarball, dest_host, dest_dir):
"""
:param :py:class:`pathlib.Path` tarball:
:param str dest_host:
:param :py:class:`pathlib.Path` dest_dir:
"""
rsync = functools.partial(
sysrsync.run,
destination_ssh=dest_host,
destination=os.fspath(dest_dir),
options=["-t"],
)
logger.debug(f"rsync-ing {tarball} to {dest_host}:{dest_dir}/")
rsync(source=os.fspath(tarball))
logger.debug(
f"rsync-ing {tarball.with_suffix('.index')} to {dest_host}:{dest_dir}/"
)
rsync(source=os.fspath(tarball.with_suffix(".index")))
def _delete_tmp_files(tarball):
"""
:param :py:class:`pathlib.Path` tarball:
"""
logger.debug(f"deleting {tarball}")
tarball.unlink()
logger.debug(f"deleting {tarball.with_suffix('.index')}")
tarball.with_suffix(".index").unlink()
if __name__ == "__main__":
main() # pragma: no cover
| null |
nowcast/workers/archive_tarball.py
|
archive_tarball.py
|
py
| 6,602 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "nemo_nowcast.NowcastWorker",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "arrow.get",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "arrow.parser",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.fspath",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.fspath",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "tarfile.open",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "tarfile.open",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "stat.filemode",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "arrow.get",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "sysrsync.run",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "os.fspath",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "os.fspath",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "os.fspath",
"line_number": 183,
"usage_type": "call"
}
] |
122876062
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=============================
BRENDA Enzyme Database Parser
=============================
:Author:
Moritz Emanuel Beber
:Date:
2011-01-27
:Copyright:
Copyright(c) 2011 Jacobs University of Bremen. All rights reserved.
:File:
errors.py
"""
import re
import errno
import codecs
from StringIO import StringIO
from collections import defaultdict
class ArgumentError(StandardError):
"""
Error class that is meant to be raised when the arguments provided to a
function are incorrect.
"""
def __init__(self, msg, *args, **kw_args):
"""
Parameters
----------
msg : An unformatted string, i.e., it may contain multiple string
format markers.
Returns
-------
An ArgumentError instance.
Notes
-----
A variable number of arguments may be passed. They will all be used to
format msg. So take care that the number and type of additional
arguments matches the format markers in msg.
Examples
--------
>>> err = errors.ArgumentError("It's too %s outside!", "rainy")
>>> print(err)
It's too rainy outside!
>>> print(err.errno)
22
"""
StandardError.__init__(self, *args, **kw_args)
self.args = (msg,) + args
self.errno = errno.EINVAL
self.strerror = msg % args
def __str__(self):
"""
Returns
-------
strerror : Simply returns the formatted string.
"""
return self.strerror
class Enzyme(object):
"""
An object that encompasses all information about a kind of enzyme uniquely
identified by its EC number.
"""
def __init__(self, ec_number, *args, **kw_args):
"""
Initialisation of an Enzyme instance.
"""
object.__init__(self)
self.ec_number = ec_number
self.organisms = dict()
self.references = dict()
self.entries = dict()
def __str__(self):
return self.ec_number
def __repr__(self):
return self.ec_number
class BRENDAEntryComment(object):
"""
Encapsulates a comment to an entry in a BRENDA information field.
"""
def __init__(self, message, organisms=None, references=None, *args,\
**kw_args):
"""
Initialisation of a BRENDAEntryComment instance.
"""
object.__init__(self)
self.msg = message
self.organisms = organisms
self.references = references
def __str__(self):
return self.msg
def _repr__(self):
return self.msg
class BRENDAEntry(BRENDAEntryComment):
"""
Encapsulates an entry in a BRENDA information field.
"""
def __init__(self, message, organisms=None, references=None,\
information=None, comment=None, *args, **kw_args):
"""
Initialisation of a BRENDAEntryComment instance.
"""
BRENDAEntryComment.__init__(self, message=message, organisms=organisms,
references=references, *args, **kw_args)
self.information = information
self.comment = comment
class BRENDAOrganism(object):
"""
Encapsulates an entry in a BRENDA information field.
"""
_counter = 1
_memory = dict()
def __new__(cls, name, identifier, references, information, comment, *args,
**kw_args):
"""
Ensures the unique instance policy of all organisms.
"""
if cls._memory.has_key((cls, name)):
return cls._memory[(cls, name)]
else:
return object.__new__(cls)
def __init__(self, name, identifier, references, information, comment,
*args, **kw_args):
"""
Initialisation of a BRENDAOrganism instance.
"""
if self.__class__._memory.has_key((self.__class__, name)):
return
object.__init__(self)
self._index = self.__class__._counter
self.__class__._counter += 1
self.name = name
self.identifier = identifier
self.references = references
self.information = information
self.comment = comment
self.__class__._memory[(self.__class__, self.name)] = self
def __str__(self):
return self.name
def __repr__(self):
return "<%s.%s, %d>" % (self.__module__, self.__class__.__name__, self._index)
class BRENDAParser(object):
"""
Encapsulation of parsing a BRENDA database plain text file.
"""
_subsections = {
"ACTIVATING_COMPOUND": "AC",
"APPLICATION": "AP",
"COFACTOR": "CF",
"CLONED": "CL",
"CRYSTALLIZATION": "CR",
"CAS_REGISTRY_NUMBER": "CR",
"ENGINEERING": "EN",
"GENERAL_STABILITY": "GS",
"IC50_VALUE": "IC50",
"INHIBITORS": "IN",
"KI_VALUE": "KI",
"KM_VALUE": "KM",
"LOCALIZATION": "LO",
"METALS_IONS": "ME",
"MOLECULAR_WEIGHT": "MW",
"NATURAL_SUBSTRATE_PRODUCT": "NSP",
"OXIDATION_STABILITY": "OS",
"ORGANIC_SOLVENT_STABILITY": "OSS",
"PH_OPTIMUM": "PHO",
"PH_RANGE": "PHR",
"PH_STABILITY": "PHS",
"PI_VALUE": "PI",
"POSTTRANSLATIONAL_MODIFICATION": "PM",
"PROTEIN": "PR",
"PURIFICATION": "PU",
"REACTION": "RE",
"REFERENCE": "RF",
"RENATURED": "RN",
"RECOMMENDED_NAME": "RN",
"REACTION_TYPE": "RT",
"SPECIFIC_ACTIVITY": "SA",
"SYSTEMATIC_NAME": "SN",
"SUBSTRATE_PRODUCT": "SP",
"STORAGE_STABILITY": "SS",
"SOURCE_TISSUE": "ST",
"SUBUNITS": "SU",
"SYNONYMS": "SY",
"TURNOVER_NUMBER": "TN",
"TEMPERATURE_OPTIMUM": "TO",
"TEMPERATURE_RANGE": "TR",
"TEMPERATURE_STABILITY": "TS"
}
def __init__(self, filename, encoding="iso-8859-1", low_memory=False,
*args, **kw_args):
"""
Initialisation of a BRENDAParser instance.
"""
object.__init__(self)
self._filename = filename
self._file_handle = None
self._low_memory = low_memory
self._encoding = encoding
self._white_space = re.compile(r"\s", re.UNICODE)
self._organisms_tag = re.compile(r"\#(.+?)\#", re.UNICODE)
self._comment_tag = re.compile(r" \((.*)\)", re.UNICODE)
self._reference_tag = re.compile(r"\<(.+?)\>", re.UNICODE)
self._information_pattern = re.compile(r"\{(.*?)\}", re.UNICODE)
self._numbers_pattern = re.compile(r"\d+", re.UNICODE)
self._prot_qualifier = re.compile(r" (\w+) (?=UniProt|Uniprot|"\
"SwissProt|Swissprot|GenBank|Genbank)", re.UNICODE)
self._current = None
self.enzymes = None
self._line_number = None
def __enter__(self):
self._file_handle = codecs.open(self._filename, mode="rb",
encoding=self._encoding)
if not self._low_memory:
tmp = StringIO(self._file_handle.read())
self._file_handle.close()
self._file_handle = tmp
self.enzymes = defaultdict(list)
self._line_number = 0
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Close the file handle.
"""
if not self._file_handle.closed:
self._file_handle.close()
def parse(self):
"""
Parse multiple Enzyme sections.
"""
for line in self._file_handle:
self._line_number += 1
line = line.strip("\n")
if not line:
continue
elif line.startswith("*"):
continue
elif line.startswith("ID"):
self._parse_id(line[2:].strip())
elif line == "PROTEIN":
self._parse_information_field(line, parser=self._parse_protein)
elif line == "REFERENCE":
self._parse_information_field(line, parser=self._parse_reference)
elif line == "///":
# end one enzyme entry
self._current = None
elif line:
self._current.entries[line.lower()] =\
self._parse_information_field(line)
# convert to normal dictionary again
res = dict(self.enzymes)
res["file_encoding"] = self._encoding
return res
def _parse_information_field(self, line, parser=None):
"""
Parse an information field of an enzyme.
"""
field_identifier = self.__class__._subsections.get(line, False)
if not field_identifier:
raise ArgumentError("unrecognised entry: '%s' @ #%d", line,\
self._line_number)
if not parser:
parser = self._parse_generic_entry
entries = list()
record = list()
for line in self._file_handle:
self._line_number += 1
line = line.strip("\n")
mobj = self._white_space.match(line)
if not line:
if record:
entries.append(parser(" ".join(record)))
break
elif line.startswith(field_identifier):
if record:
entries.append(parser(" ".join(record)))
record = list()
record.append(line[len(field_identifier):].strip())
elif mobj:
record.append(line.strip())
else:
raise ArgumentError("unrecognised line: '%s' @ #%d", line,\
self._line_number)
return entries
def _parse_generic_entry(self, text):
"""
Parse an entry of a specific information field.
"""
mobj = self._information_pattern.search(text)
if mobj:
information = mobj.group(1)
text = text[:mobj.start()] + text[mobj.end():]
else:
information = None
mobj = self._comment_tag.search(text)
if mobj:
comment = self._parse_comment(mobj.group(1))
text = text[:mobj.start()] + text[mobj.end():]
else:
comment = None
mobj = self._organisms_tag.search(text)
if mobj:
organisms = [int(match_num.group(0)) for match_num in\
self._numbers_pattern.finditer(mobj.group(1))]
text = text[:mobj.start()] + text[mobj.end():]
else:
organisms = None
mobj = self._reference_tag.search(text)
if mobj:
references = [int(match_num.group(0)) for match_num in\
self._numbers_pattern.finditer(mobj.group(1))]
text = text[:mobj.start()] + text[mobj.end():]
else:
references = None
return BRENDAEntry(text.strip(), organisms, references, information,\
comment)
def _parse_comment(self, text):
"""
Parse an entry of a specific information field.
"""
mobj = self._organisms_tag.search(text)
if mobj:
organisms = [int(match_num.group(0)) for match_num in\
self._numbers_pattern.finditer(mobj.group(1))]
text = text[:mobj.start()] + text[mobj.end():]
else:
organisms = None
mobj = self._reference_tag.search(text)
if mobj:
references = [int(match_num.group(0)) for match_num in\
self._numbers_pattern.finditer(mobj.group(1))]
text = text[:mobj.start()] + text[mobj.end():]
else:
references = None
return BRENDAEntryComment(text.strip(), organisms, references)
def _parse_id(self, text):
"""
"""
mobj = self._comment_tag.search(text)
# for now we ignore comments to the id
if mobj:
comment = self._parse_comment(mobj.group(1))
text = text[:mobj.start()] + text[mobj.end():]
else:
comment = None
text = text.strip()
self._current = Enzyme(text)
ec_num = text.split(".")
for i in range(1, len(ec_num) + 1):
self.enzymes[".".join(ec_num[:i])].append(self._current)
def _parse_protein(self, text):
"""
"""
mobj = self._information_pattern.search(text)
if mobj:
information = mobj.group(1)
text = text[:mobj.start()] + text[mobj.end():]
else:
information = None
mobj = self._comment_tag.search(text)
if mobj:
comment = self._parse_comment(mobj.group(1))
text = text[:mobj.start()] + text[mobj.end():]
else:
comment = None
mobj = self._organisms_tag.search(text)
if mobj:
organism = int(mobj.group(1))
text = text[:mobj.start()] + text[mobj.end():]
else:
raise ArgumentError("organism reference missing: '%s' @ #%d", text,\
self._line_number)
#TODO: remove databank qualifier somehow capture multiple accessors
mobj = self._prot_qualifier.search(text)
if mobj:
identifier = mobj.group(1)
text = text[:mobj.start()] + text[mobj.end():]
else:
identifier = None
mobj = self._reference_tag.search(text)
if mobj:
references = [int(match_num.group(0)) for match_num in\
self._numbers_pattern.finditer(mobj.group(1))]
text = text[:mobj.start()] + text[mobj.end():]
else:
references = None
self._current.organisms[organism] = BRENDAOrganism(text.strip(),
identifier, references, information, comment)
def _parse_reference(self, text):
"""
"""
pass
| null |
brenda_parser.py
|
brenda_parser.py
|
py
| 14,080 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "errno.EINVAL",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "StringIO.StringIO",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 256,
"usage_type": "call"
}
] |
71326390
|
from django.shortcuts import render, redirect
from django.utils.timezone import localtime, now
from django.contrib import auth
from django.contrib.auth.models import User
from prueba.models import *
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from prueba.forms import UserForm,UsuarioForm
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
import os
from django.conf import settings
import random
# from models.py import Usuario
# Create your views here.
def inicio(request):
user = request.user
money = ""
if request.user.is_authenticated:
print(ProfilePic.objects.filter(owner= user).first())
try:
queryCheck = ProfilePic.objects.filter(owner= user).first()
print("queryCheck funciona")
except:
queryCheck = None
print("nada de nada")
try:
queryMoney = Money.objects.filter(owner= user).first()
if request.method == 'POST':
# Agregar money
chance = random.choice(range(10))
queryMoney.money += chance
queryMoney.save()
money = queryMoney.money
except:
a = 1
else:
queryCheck = None
return render(request, 'inicio.html', {'profile_pic': queryCheck, 'money': money})
def descripcion(request):
context = {'nombre':localtime(now())}
return render(request, 'descripcion.html', context)
def loginn(request): #logea super user
if request.method == 'POST':
if ("username" in request.POST.keys()) and ("password" in request.POST.keys()):
user = auth.authenticate(username=request.POST['username'], password=request.POST['password'])
if user is not None and user.is_active:
auth.login(request, user)
return redirect("/inicio/logueado/")
else:
contexto={"error":"error"}
return render(request,"login.html",contexto)
else:
contexto={"error":"error"}
return render(request,"login.html",contexto)
return render(request,"login.html")
""" def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('/perfil')
else:
form = UserCreationForm()
args = {'form': form}
return render(request, 'register.html', args)
"""
"""
def register(request):
registered = False
if request.method == 'POST':
username = request.POST.get('username')
pwd = request.POST.get('password')
confirm_pwd =request.POST.get('confirm')
user_form = UserForm(data=request.POST)
profile_form = UsuarioForm(data=request.POST)
fieldvalue_dict = {}
fieldvalue_dict["username"] = username
fieldvalue_dict["pwd"] = pwd
can_proceed = True
error_message = ''
# Revisa que no esten en blanco
if not username.strip() and can_proceed == True:
can_proceed = False
error_message = 'Please enter Username.'
if not pwd.strip() and can_proceed == True:
can_proceed = False
error_message = 'Please enter password.'
if not confirm_pwd.strip() and can_proceed == True:
can_proceed = False
error_message = 'Please confirm password.'
# Revisa que haya imagen
profile_picture = request.FILES
print(profile_picture)
if 'profile_pic' in profile_picture.keys():
print('profile pic exists.')
else:
print('no pic.')
profile_picture = None
can_proceed = False
if can_proceed == True :
# Revisa si el usuario ya existe
user_by_user_name = None
try:
user_by_user_name = User.objects.get(username=username)
except:
can_proceed = True
if user_by_user_name:
can_proceed = False
error_message = 'Username already exists.'
# Si todo va bien, crea el usuario usando el modelo de django y crea un userProfilo usando el modelo definido en models
if can_proceed == True:
pic = request.FILES['profile_pic']
user = User.objects.create_user(username=username,password=pwd)
user_profile = UserProfile.objects.create(profile_pic= pic,user =user)
#user_profile = UserProfile(user, pic)
registered = True
return render(request,"registration.html",
{'user':user,
'user_profile':user_profile,
'registered':registered})
else:
print(user_form.errors,profile_form.errors)
else:
user_form = UserForm()
profile_form = UsuarioForm()
return render(request,"registration.html",
{'user_form':user_form,
'profile_form':profile_form,
'registered':registered})
"""
def register(request):
registered = False
money = ""
profile_pic = ""
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UsuarioForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
registered = True
usuario = request.POST.get('username')
# Se le añade dinero al usuario
queryUser = User.objects.filter(username = usuario).first()
dinero = Money(owner=queryUser, money = 50)
dinero.save()
# User money
queryMoney = Money.objects.filter(owner= queryUser).first()
if request.method == 'POST':
# Agregar money
print(request.POST)
if request.POST.get('moneymaker.y'):
chance = random.choice(range(10))
queryMoney.money += chance
queryMoney.save()
# Se le agrega foto al usuario:
if 'profile_pic' in request.FILES:
print('found it')
profile_pic = request.FILES['profile_pic']
ext = profile_pic.name.split('.')[-1]
profile_pic.name = "%s.%s" % (queryUser.username, ext)
try:
os.remove(settings.MEDIA_ROOT + "/perfil/profile_pics/"+ profile_pic.name)
except:
a = 1
ProfilePic.objects.filter(owner=queryUser).delete()
foto = ProfilePic(owner=queryUser, title = profile_pic.name, cover=profile_pic)
foto.save()
# Prepara la imagen para mostrar en html
try:
profile_pic = ProfilePic.objects.filter(owner= queryUser).first()
except:
profile_pic = None
money = queryMoney.money
else:
print(user_form.errors)
else:
user_form = UserForm()
profile_form = UsuarioForm()
return render(request,"registration.html",
{'user_form':user_form,
'profile_form':profile_form,
'registered':registered,
'money': money,
'profile_pic' : profile_pic })
def user_login(request):
queryCheck = None
if request.method == 'POST':
if request.POST.get('Login'):
print(request.method)
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request,user)
print(ProfilePic.objects.filter(owner= user).first())
try:
queryCheck = ProfilePic.objects.filter(owner= user).first()
print("queryCheck funciona")
except:
queryCheck = None
print("nada de nada")
if request.user.is_authenticated:
nombre = request.user
# User money
queryMoney = Money.objects.filter(owner= nombre).first()
# Agregar money
print(request.POST)
if request.POST.get('moneymaker.y'):
chance = random.choice(range(10))
queryMoney.money += chance
queryMoney.save()
money = queryMoney.money
return render(request, "login2.html", {'profile_pic': queryCheck, 'money':money})
else:
print("Someone tried to login and failed.")
print("They used username: {} and password: {}".format(username,password))
return HttpResponse("Los datos ingresados no son validos")
else:
return render(request, "login2.html", {'profile_pic': queryCheck})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('/inicio')
@login_required(login_url='/login')
def vista_perfil(request):
user = request.user
friendAdd = False
try:
queryFriends = Friendship.objects.filter(creator=username)
except:
queryFriends = None
try:
queryWaifus = Owned.objects.filter(creator=username)
except:
queryWaifus = None
if request.method == 'POST':
#Friend request
friend_req = request.POST.get('friend')
try:
queryUser = User.objects.filter(username = friend_req).first()
try:
queryFriend = Friendship.objects.filter(creator = user, friend = queryUser).first()
print("queryCheck funciona: "+ queryFriend.username)
except:
queryCheck = None
print("aun no son amigos, pero ahora lo seran")
amistad = Friendship(creator=user, friend = queryUser)
amistad.save()
except:
print("fail, amigo no existe, forever alone")
#Profile pic
if 'profile_pic' in request.FILES:
print('found it')
profile_pic = request.FILES['profile_pic']
ext = profile_pic.name.split('.')[-1]
profile_pic.name = "%s.%s" % (user.username, ext)
try:
os.remove(settings.MEDIA_ROOT + "/perfil/profile_pics/"+ profile_pic.name)
except:
a = 1
ProfilePic.objects.filter(owner=user).delete()
foto = ProfilePic(owner=user, title = profile_pic.name, cover=profile_pic)
foto.save()
# Prepara la imagen para mostrar en html
try:
queryCheck = ProfilePic.objects.filter(owner= user).first()
except:
queryCheck = None
# Prepara para mostrar amigos en html
queryFriend = Friendship.objects.filter(creator = user)
amistad = []
amigos = []
if queryFriend is not None:
for amigo in queryFriend:
if len(amigos) < 5:
amigos.append(amigo.friend.username)
else:
amistad.append(amigos)
amigos = []
amigos.append(amigo.friend.username)
if len(amigos) > 0:
amistad.append(amigos)
# Prepara para mostrar waifus en html
queryWaifu = Owned.objects.filter(creator = user)
coleccion = []
waifus = []
if queryWaifu is not None:
for waifu in queryWaifu:
if len(waifus) < 5:
waifus.append(waifu.waifu.nombre)
else:
coleccion.append(waifus)
waifus = []
waifus.append(waifu.waifu.nombre)
if len(waifus) > 0:
coleccion.append(waifus)
# User money
queryMoney = Money.objects.filter(owner= user).first()
if request.method == 'POST':
# Agregar money
print(request.POST)
if request.POST.get('moneymaker.y'):
chance = random.choice(range(10))
queryMoney.money += chance
queryMoney.save()
money = queryMoney.money
print(coleccion)
return render(request, "perfil.html",
{'amigos':amistad,
'waifus':coleccion,
'profile_pic':queryCheck,
'money':money})
@login_required(login_url='/login')
def gacha(request):
# User money
user = request.user
elegida = ""
queryMoney = Money.objects.filter(owner= user).first()
if request.method == 'POST':
# Agregar money
print(request.POST)
money = queryMoney.money
if request.POST.get('moneymaker.y'):
chance = random.choice(range(10))
queryMoney.money += chance
queryMoney.save()
money = queryMoney.money
if request.POST.get('gacha'):
if money >= 50:
todasLasWaifus = Waifus.objects.all()
print(todasLasWaifus)
elegida = random.choice(todasLasWaifus)
print(elegida)
#Genera waifu
felicidad = Owned(creator = user, waifu = elegida)
felicidad.save()
queryMoney.money -= 50
queryMoney.save()
money = queryMoney.money
money = queryMoney.money
return render(request, 'gacha.html',
{'money': money,
'waifu': elegida})
| null |
teamWaifu/prueba/views.py
|
views.py
|
py
| 14,583 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.choice",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.localtime",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "prueba.forms.UserForm",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "prueba.forms.UsuarioForm",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.filter",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "prueba.forms.UserForm",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "prueba.forms.UsuarioForm",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.logout",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.filter",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 390,
"usage_type": "call"
}
] |
276492671
|
import cv2
import numpy as np
from helpers import apply_clahe
def add_circle_mask(image,x=None,y=None,r=None,type=None):
h,w = image.shape
if x is None or y is None: y,x = h//2,w//2
Y,X = np.ogrid[:h,:w]
if r is None: r = min(h//2,w//2)
mask = (X-x)**2+(Y-y)**2<=r**2
if type==0: image[mask] = 0
elif type==1: image[~mask] = 0
return image
def segment(image):
height,width,_ = image.shape
g_channel = image[:,:,1]
hls_image = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
l_channel = hls_image[:,:,1]
clahe_img = apply_clahe(l_channel)
norm_img = cv2.normalize(clahe_img,None,0,255,cv2.NORM_MINMAX)
_,thresh_img = cv2.threshold(norm_img,150,255,cv2.THRESH_BINARY,)
thresh_img = add_circle_mask(thresh_img,r=350,type=1)
thresh_img = np.multiply(l_channel,np.clip(thresh_img,0,1))
thresh_img = cv2.medianBlur(thresh_img,19)
norm_img = cv2.normalize(g_channel,None,0,255,cv2.NORM_MINMAX)
mark_img = cv2.medianBlur(norm_img,11)
mask_img = cv2.max(norm_img,mark_img)
exudates_img = cv2.subtract(mask_img,mark_img)
exudates_img = cv2.multiply(exudates_img,10)
_, exudates_img = cv2.threshold(exudates_img,150,255,cv2.THRESH_BINARY)
x,y,w,h = 0,30,800,740
mask = np.ones(exudates_img.shape,np.uint8)
mask[y:y+h,x:x+w] = exudates_img[y:y+h,x:x+w]
exudates_img = mask
exudates_img = add_circle_mask(exudates_img,r=390,type=1)
exudates_img = cv2.erode(exudates_img,(9,9))
exudates_img = cv2.dilate(exudates_img,(9,9))
optic_disk = cv2.HoughCircles(thresh_img,cv2.HOUGH_GRADIENT,1,thresh_img.size,param1=200,param2=5,maxRadius=100)
(x,y,r) = np.uint16(np.around(optic_disk))[0][0]
r = r+r//4
# temp_img = g_channel.copy()
# cv2.circle(temp_img, (x, y), r, 255, 5)
exudates_img = add_circle_mask(exudates_img,x,y,r,0)
return exudates_img
| null |
exudates.py
|
exudates.py
|
py
| 1,886 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.ogrid",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HLS",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "helpers.apply_clahe",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.normalize",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.NORM_MINMAX",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.multiply",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.medianBlur",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.normalize",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.NORM_MINMAX",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "cv2.medianBlur",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.max",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.subtract",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.multiply",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "cv2.erode",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cv2.dilate",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.HoughCircles",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.HOUGH_GRADIENT",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint16",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 46,
"usage_type": "call"
}
] |
330762448
|
import logging
import asyncio
from sechome_conf import HOST
if HOST == 'pi':
import RPi.GPIO as gpio
else:
import mock_gpio as gpio
gpio.setwarnings(False)
def setup_gpio(pin):
gpio.setmode(gpio.BOARD)
gpio.setup(pin, gpio.IN)
def cleanup_gpio():
gpio.cleanup()
# 0 == open, 1 == closed
def is_door_open(pin):
return gpio.input(pin) == 0
async def read_door_sensor(pin=18, interval=1):
setup_gpio(pin)
door_state = is_door_open(pin)
print("initial door state: {}".format(door_state))
yield door_state
try:
while True:
await asyncio.sleep(interval)
new_door_state = is_door_open(pin)
if door_state == new_door_state:
continue
else:
door_state = new_door_state
yield door_state
finally:
cleanup_gpio()
| null |
door_sensor.py
|
door_sensor.py
|
py
| 872 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sechome_conf.HOST",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "mock_gpio.setwarnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mock_gpio.setmode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mock_gpio.BOARD",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "mock_gpio.setup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mock_gpio.IN",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mock_gpio.cleanup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mock_gpio.input",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 31,
"usage_type": "call"
}
] |
264069367
|
from flask import Flask, render_template, request
from flask_restful import Resource, Api
import sys
DEBUG = True
TEMPLATE_FOLDER = './frontend/dist/'
STATIC_URL_PATH = ''
STATIC_FOLDER = './frontend/dist/'
app = Flask(__name__, template_folder=TEMPLATE_FOLDER, static_url_path=STATIC_URL_PATH, static_folder=STATIC_FOLDER)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
api = Api(app)
# Create the database file
if DEBUG:
@app.before_first_request
def create_tables():
db.create_all()
# Create the base route to serve the index.html template
@app.route('/')
@app.route('/<sid>')
def home(sid=''):
# skanban_id = request.args.get('sid')
# Print to flask console via stdout:
# print('skanban id in URL variable: {}'.format(sid), file=sys.stdout)
# print('skanban id in URL params: {}'.format(skanban_id), file=sys.stdout)
return render_template("index.html")
# Add resource imports after app is created to resolve possible import conflicts
from api.resources.boards import Board, BoardCollection
from api.resources.lists import List, ListCollection
from api.resources.cards import Card, CardCollection
from api.resources.skanban import Skanban, SkanbanCollection
# Add resources
api.add_resource(BoardCollection, '/boards', '/boards/', '/boards/all/', endpoint='boards_ep')
api.add_resource(Board, '/boards/<int:id>', endpoint='board_ep')
api.add_resource(ListCollection, '/lists', '/lists/', '/lists/all/', endpoint='lists_ep')
api.add_resource(List, '/lists/<int:id>', endpoint='list_ep')
api.add_resource(CardCollection, '/cards', '/cards/', endpoint='cards_ep')
api.add_resource(Card, '/cards/<int:id>', endpoint='card_ep')
api.add_resource(Skanban, '/skanban/<string:sid>', endpoint='skanban_ep')
api.add_resource(SkanbanCollection, '/skanban', '/skanban/', endpoint='skanbans_ep')
# Run main app
if __name__ == "__main__":
from db import db
db.init_app(app)
app.run(port=5000, debug=DEBUG)
| null |
app.py
|
app.py
|
py
| 2,017 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "api.resources.boards.add_resource",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "api.resources.boards.BoardCollection",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "api.resources.boards",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "api.resources.boards.add_resource",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "api.resources.boards.Board",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "api.resources.boards",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "api.resources.boards.add_resource",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "api.resources.lists.ListCollection",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "api.resources.boards",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "api.resources.boards.add_resource",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "api.resources.lists.List",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "api.resources.boards",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "api.resources.boards.add_resource",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "api.resources.cards.CardCollection",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "api.resources.boards",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "api.resources.boards.add_resource",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "api.resources.cards.Card",
"line_number": 44,
"usage_type": "argument"
},
{
"api_name": "api.resources.boards",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "api.resources.boards.add_resource",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "api.resources.skanban.Skanban",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "api.resources.boards",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "api.resources.boards.add_resource",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "api.resources.skanban.SkanbanCollection",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "api.resources.boards",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "db.db.init_app",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 51,
"usage_type": "name"
}
] |
254716357
|
# -*- coding: utf-8 -*-
#FABIEN GENTY
#2017/10
#PROJET LONG VISUALISATEUR DE PROTEINES
# importing librairy
from bokeh.plotting import figure, output_file, show,ColumnDataSource
from bokeh import *
from bokeh.models import HoverTool,Span,Slider, CustomJS,CategoricalColorMapper,Button, DataTable, TableColumn
from bokeh.layouts import row, widgetbox
from bokeh.models.widgets import *
from bokeh.embed import components,autoload_static
import opening_csv as cs
import pandas as pd
import numpy as np
from os.path import dirname, join
def volcano_plot(data_sans):
index = data_sans['access']
x = data_sans['logfc']
y = data_sans['pvalue']
pos = data_sans['pos']
source = ColumnDataSource(
data=dict(
x = x,
y = y,
accession = index,
position = pos
))
color_mapper = CategoricalColorMapper(factors=["up","normal","down"],
palette=['yellow', 'green','blue'])
# dictionnary for the hoover tool
hover = HoverTool(tooltips=[
("accession", "@accession"),
("x","@x"),
("y","@y")
])
yrange=[min(y)-0.1,max(y)+0.1]
xrange = [min(x)-1,max(x)+0.1]
# setting the tools
TOOLS=",pan,wheel_zoom,box_zoom,reset,box_select,lasso_select,previewsave"
# create a new plot with a title and axis labels
p = figure(
y_range=yrange,
x_range = xrange,
x_axis_label = 'log(fc)',
y_axis_label = '-log(pvalue)',
tools = TOOLS,
plot_width = 800,
plot_height = 800)
p.add_tools(hover)
# title modification
p.title.text = "pvalue versus fold-change"
p.title.align = "center"
p.title.text_color = "blue"
p.title.text_font_size = "25px"
#p.title.background_fill_color = "#aaaaee"
#setting the widgets slider
h_slider = Slider(start=yrange[0],end=yrange[1], value=1, step=.1, title="variation of log(pvalue)")
v_slider_right = Slider(start = 0, end = xrange[1], value=0.5, step=.01,title="right fold change")
v_slider_left = Slider(start =xrange[0], end=0, value=-0.5, step=.01,title="left log fold change")
# Horizontal line
hline = Span(location=h_slider.value, dimension='width', line_color='green', line_width=2)
# Vertical line
vline1 = Span(location =v_slider_right.value , dimension='height', line_color='blue', line_width=2)
vline2 = Span(location=v_slider_left.value, dimension='height', line_color='black', line_width=2)
#setting the widgets slider
h_slider = Slider(start=yrange[0],end=yrange[1], value=1, step=.1, title="variation of log(pvalue)")
v_slider_right = Slider(start = 0, end = xrange[1], value=0.5, step=.01,title="right fold change")
v_slider_left = Slider(start =xrange[0], end=0, value=-0.5, step=.01,title="left log fold change")
p.renderers.extend([vline1,vline2, hline])
# add a circle points
p.circle('x','y',source = source,
color=dict(field='position', transform=color_mapper),
legend='position'
)
#setting the code to obain a real time ajustement of value and color
#on th plot
code="""
var data = source.data;
var low = v_slider_left.value;
var up = v_slider_right.value
var back_value = h_slider.value;
x = data['x']
y = data['y']
pos = data['position']
span.location = slider.value
for (i = 0; i < x.length; i++) {
if( (x[i] < low) && (y[i] > back_value)) {
pos[i] = 'down'
} else if ((x[i] > up) && (y[i] > back_value)){
pos[i] = 'up'
} else {
pos[i] = 'normal'
}
}
console.log(source.data)
source.change.emit()
"""
# callback of the sliders
h_slider.callback = CustomJS(args=dict(source=source, span=hline, slider=h_slider, v_slider_left=v_slider_left,h_slider=h_slider,v_slider_right=v_slider_right), code=code)
v_slider_right.callback = CustomJS(args=dict(source=source, span=vline1, slider=v_slider_right, v_slider_left=v_slider_left,h_slider=h_slider,v_slider_right=v_slider_right), code=code)
v_slider_left.callback = CustomJS(args=dict(source=source, span=vline2, slider=v_slider_left, v_slider_left=v_slider_left,h_slider=h_slider,v_slider_right=v_slider_right), code=code)
# creating du tableau des résulats de la selection datacolumn
columns = [TableColumn(field="accession", title="numero d'accession"),
TableColumn(field="x", title="log(fc)"),
TableColumn(field="y", title="-log(pvalue)"),
TableColumn(field="position", title="position"),
]
data_table = DataTable(source=source, columns=columns, width=400, height=280)
# creating of the download button
button = Button(label="Download", button_type="success")
button.callback = CustomJS(args=dict(source=source),code=open(join(dirname(__file__),"static/js/download.js")).read())
layout = row(p, widgetbox(v_slider_left,v_slider_right,h_slider,data_table,button))
return layout
if __name__ == "__main__":
main()
| null |
app/plot.py
|
plot.py
|
py
| 5,123 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bokeh.plotting.ColumnDataSource",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bokeh.models.CategoricalColorMapper",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bokeh.models.HoverTool",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.figure",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Slider",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Slider",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Slider",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Span",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Span",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Span",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Slider",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Slider",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Slider",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "bokeh.models.CustomJS",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "bokeh.models.CustomJS",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "bokeh.models.CustomJS",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "bokeh.models.TableColumn",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "bokeh.models.TableColumn",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "bokeh.models.TableColumn",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "bokeh.models.TableColumn",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "bokeh.models.DataTable",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Button",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "bokeh.models.CustomJS",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "bokeh.layouts.row",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "bokeh.layouts.widgetbox",
"line_number": 134,
"usage_type": "call"
}
] |
190273067
|
import base64
import io
import random
import string
from PIL import Image, ImageFont, ImageDraw
class CaptchaTool(object):
"""
生成图片验证码
"""
def __init__(self, width=50, height=12):
self.width = width
self.height = height
# 新图片对象 RGBA,第四通道为透明图片
self.im = Image.new('RGBA', (width, height), 12)
# 字体
self.font = ImageFont.load_default()
# draw对象
self.draw = ImageDraw.Draw(self.im)
def draw_lines(self, num=3):
"""
划线
"""
for num in range(num):
x1 = random.randint(0, self.width / 2)
y1 = random.randint(0, self.height / 2)
x2 = random.randint(0, self.width)
y2 = random.randint(self.height / 2, self.height)
self.draw.line(((x1, y1), (x2, y2)), fill='white', width=1)
def get_verify_code(self):
"""
生成验证码图形
"""
# 设置随机4位数字验证码
# code = ''.join(random.sample(string.digits, 4))
#code = '0123456789'+string.ascii_letters+'9876543210'
#code = random.sample('0123456789'+string.ascii_letters+'9876543210', 4)
code = ''
code_len = 5
for i in range(code_len):
add = random.choice([random.randrange(10), chr(random.randrange(65, 91)), chr(random.randrange(97, 122))])
code += str(add)
# 绘制字符串
for item in range(code_len):
self.draw.text((2 + random.randint(-2, 2) + 10 * item, 2 + random.randint(-2, 2)),
text=code[item],
fill=(random.randint(160, 200),
random.randint(180, 220),
random.randint(190, 250))
, font=self.font)
# 划线
# self.draw_lines()
# 重新设置图片大小
self.im = self.im.resize((100, 35))
# 图片转为base64字符串
buffered = io.BytesIO()
self.im.save(buffered, format="PNG")
img_str = b"data:image/png;base64," + base64.b64encode(buffered.getvalue())
return img_str, code.lower()
| null |
flaskapp/common/captcha.py
|
captcha.py
|
py
| 2,221 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "PIL.Image.new",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.load_default",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 63,
"usage_type": "call"
}
] |
520011733
|
import numpy as np
import cv2
capture = cv2.VideoCapture(0)
while (1):
val, frame = capture.read()
abu = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
negative = (255 - abu)
cv2.imshow('Gambar Negatif', negative)
if cv2.waitKey(1) & 0xFF == ord('c'):
break
cv2.destroyAllWindows()
capture.release()
| null |
negative.py
|
negative.py
|
py
| 326 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 17,
"usage_type": "call"
}
] |
478624247
|
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Set
from blspy import G1Element
from typing_extensions import Protocol
from chia.server.ws_connection import WSChiaConnection
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.ints import uint32, uint64, uint128
from chia.wallet.util.wallet_types import WalletType
from chia.wallet.wallet_coin_record import WalletCoinRecord
from chia.wallet.wallet_info import WalletInfo
if TYPE_CHECKING:
from chia.wallet.wallet_state_manager import WalletStateManager
class WalletProtocol(Protocol):
@classmethod
def type(cls) -> WalletType:
...
def id(self) -> uint32:
...
async def coin_added(self, coin: Coin, height: uint32, peer: WSChiaConnection) -> None:
...
async def select_coins(
self,
amount: uint64,
exclude: Optional[List[Coin]] = None,
min_coin_amount: Optional[uint64] = None,
max_coin_amount: Optional[uint64] = None,
excluded_coin_amounts: Optional[List[uint64]] = None,
) -> Set[Coin]:
...
async def get_confirmed_balance(self, record_list: Optional[Set[WalletCoinRecord]] = None) -> uint128:
...
async def get_unconfirmed_balance(self, unspent_records: Optional[Set[WalletCoinRecord]] = None) -> uint128:
...
async def get_spendable_balance(self, unspent_records: Optional[Set[WalletCoinRecord]] = None) -> uint128:
...
async def get_pending_change_balance(self) -> uint64:
...
async def get_max_send_amount(self, records: Optional[Set[WalletCoinRecord]] = None) -> uint128:
...
# not all wallet supports this. To signal support, make
# require_derivation_paths() return true
def puzzle_hash_for_pk(self, pubkey: G1Element) -> bytes32:
...
def require_derivation_paths(self) -> bool:
...
def get_name(self) -> str:
...
wallet_info: WalletInfo
# WalletStateManager is only imported for type hinting thus leaving pylint
# unable to process this
wallet_state_manager: WalletStateManager # pylint: disable=used-before-assignment
| null |
chia/wallet/wallet_protocol.py
|
wallet_protocol.py
|
py
| 2,239 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing_extensions.Protocol",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "chia.wallet.util.wallet_types.WalletType",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint32",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "chia.types.blockchain_format.coin.Coin",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint32",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "chia.server.ws_connection.WSChiaConnection",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint64",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "chia.types.blockchain_format.coin.Coin",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint64",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint64",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint64",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "chia.types.blockchain_format.coin.Coin",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "chia.wallet.wallet_coin_record.WalletCoinRecord",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint128",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "chia.wallet.wallet_coin_record.WalletCoinRecord",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint128",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "chia.wallet.wallet_coin_record.WalletCoinRecord",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint128",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint64",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "chia.wallet.wallet_coin_record.WalletCoinRecord",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "chia.util.ints.uint128",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "blspy.G1Element",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "chia.types.blockchain_format.sized_bytes.bytes32",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "chia.wallet.wallet_info.WalletInfo",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "chia.wallet.wallet_state_manager.WalletStateManager",
"line_number": 70,
"usage_type": "name"
}
] |
34445111
|
from pprint import pprint
import requests
TEST_USER_DATA = {
"first_name": "Ash",
"last_name": "Ketchum",
"hobbies": "Catching em all!"
}
URL = 'http://127.0.0.1:5000/users/2'
def update_user():
out = requests.put(URL, json=TEST_USER_DATA)
if out.status_code == 200:
pprint(out.json())
else:
print("Something went wrong while trying to update")
if __name__ == "__main__":
update_user()
| null |
tests/manual/update_users.py
|
update_users.py
|
py
| 435 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.put",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 16,
"usage_type": "call"
}
] |
352961069
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import time
from .base.exchange import *
from .errors import *
import requests
from datetime import datetime
from urllib.parse import urlencode
import time
import calendar
import hmac
import hashlib
import http.client
POLONIEX_REST_URL = 'poloniex.com'
class Poloniex(Exchange):
def __init__(self, apikey, secretkey):
def httpPost(url, resource, params, apikey, sign, *args, **kwargs):
params['nonce'] = int("{:.6f}".format(
time.time()).replace('.', ''))
headers = {
"Key": apikey,
"Sign": hmac.new(sign.encode("utf8"), urlencode(params).encode("utf8"), hashlib.sha512).hexdigest(),
}
return self.session.post('https://' + url + resource,
headers=headers, data=params).json()
super().__init__(apikey, secretkey)
self.session = requests.session()
self.httpPost = httpPost
def markets(self):
print("not implemented!")
def ticker(self, item='USDT_BTC'):
TICKER_RESOURCE = "/public?command=returnTicker"
json = self.session.get('https://' + POLONIEX_REST_URL +
TICKER_RESOURCE).json()
utc = datetime.utcfromtimestamp(time.time())
return Ticker(
timestamp=calendar.timegm(utc.timetuple()),
last=float(json[item]["last"]),
high=float(json[item]["high24hr"]),
low=float(json[item]["low24hr"]),
bid=float(json[item]["highestBid"]),
ask=float(json[item]["lowestAsk"]),
volume=float(json[item]["baseVolume"])
)
def board(self, item='USDT_BTC'):
print("not implemented!")
def order(self, item, order_type, side, price, size, *args, **kwargs):
ORDER_RESOURCE = "/tradingApi"
params = {
"command": side.lower(),
"currencyPair": item,
"rate": str(price),
"amount": str(size),
}
if order_type == "fillOrKill":
params["fillOrKill"] = "1"
json = self.httpPost(POLONIEX_REST_URL,
ORDER_RESOURCE, params, self._apikey, self._secretkey)
return json['orderNumber']
def balance(self):
BALANCE_RESOURCE = "/tradingApi"
params = {
"command": "returnCompleteBalances"
}
json = self.httpPost(POLONIEX_REST_URL,
BALANCE_RESOURCE, params, self._apikey, self._secretkey)
balances = {}
for key in sorted(json.keys()):
balances[key] = [float(json[key]['onOrders']) + float(
json[key]['available']), float(json[key]['available'])]
return balances
| null |
exchanges/poloniex.py
|
poloniex.py
|
py
| 2,810 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.time",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "hmac.new",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "hashlib.sha512",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "requests.session",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "calendar.timegm",
"line_number": 43,
"usage_type": "call"
}
] |
126852498
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('clientes', '0001_initial'),
('locacion', '0001_initial'),
('personas', '__first__'),
('proveedores', '__first__'),
]
operations = [
migrations.CreateModel(
name='Domicilio',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('descripcion', models.CharField(max_length=200, null=True, blank=True)),
('barrio', models.ForeignKey(blank=True, to='locacion.Barrio', null=True)),
('cliente', models.ForeignKey(blank=True, to='clientes.Cliente', null=True)),
('persona', models.ForeignKey(blank=True, to='personas.Persona', null=True)),
('proveedor', models.ForeignKey(blank=True, to='proveedores.Proveedor', null=True)),
],
options={
'verbose_name_plural': 'Domicilios de las Personas',
},
),
migrations.CreateModel(
name='TipoDomicilio',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=70)),
],
options={
'verbose_name_plural': 'Tipos de Domicilios',
},
),
migrations.AddField(
model_name='domicilio',
name='tipo',
field=models.ForeignKey(blank=True, to='domicilios.TipoDomicilio', unique=True),
),
]
| null |
apps/domicilios/migrations/0001_initial.py
|
0001_initial.py
|
py
| 1,733 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 44,
"usage_type": "name"
}
] |
496466706
|
from datetime import datetime
from django.views.generic import TemplateView
from django.contrib.auth.models import User
from articles.models import Article
from blog.models import Post
from events.models import Event
from frontpage.models import CarouselItem
from songs.models import Annotation, Song
class SiteIndex(TemplateView):
template_name = "frontpage/index.html"
POST_COUNT = 1
SONG_COUNT = 8
ANNOTATION_COUNT = 8
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['carousel_items'] = CarouselItem.objects.filter(archived=False)
context['events'] = (Event.items_visible_to(self.request.user)
.filter(datetime__gte=datetime.now())
.order_by('datetime'))
context['posts'] = (Post.items_visible_to(self.request.user)
.order_by('-pub_date')[:SiteIndex.POST_COUNT])
context['songs'] = (Song.items_visible_to(self.request.user)
.order_by('-pub_date')[:SiteIndex.SONG_COUNT])
context['annotation'] = (Annotation.items_visible_to(self.request.user)
.order_by('-pub_date').first())
context['annotations'] = (Annotation.items_visible_to(self.request.user)
.order_by('-pub_date')
[:SiteIndex.ANNOTATION_COUNT])
return context
class About(TemplateView):
ARTICLE_FACTOR = 5
template_name = "about.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
authors = []
for user in User.objects.filter(is_active=True):
author = {}
author['user'] = user
author['annotations'] = Annotation.items_live().filter(
author=user).count()
author['songs'] = Song.items_live().filter(author=user).count()
author['articles'] = Article.items_live().filter(
author=user).count()
author['events'] = Event.items_live().filter(author=user).count()
author['total'] = (author['annotations'] + author['songs'] +
self.ARTICLE_FACTOR * author['articles'] +
author['events'])
if author['total']:
authors.append(author)
context['authors'] = sorted(authors, key=lambda k: k['total'],
reverse=True)
return context
class Format(TemplateView):
template_name = "format.generated"
| null |
piosenka/views.py
|
views.py
|
py
| 2,622 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.views.generic.TemplateView",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "frontpage.models.CarouselItem.objects.filter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "frontpage.models.CarouselItem.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "frontpage.models.CarouselItem",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "events.models.Event.items_visible_to",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "events.models.Event",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "blog.models.Post.items_visible_to",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "blog.models.Post",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "songs.models.Song.items_visible_to",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "songs.models.Song",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "songs.models.Annotation.items_visible_to",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "songs.models.Annotation",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "songs.models.Annotation.items_visible_to",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "songs.models.Annotation",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.filter",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "songs.models.Annotation.items_live",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "songs.models.Annotation",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "songs.models.Song.items_live",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "songs.models.Song",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "articles.models.Article.items_live",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "articles.models.Article",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "events.models.Event.items_live",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "events.models.Event",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 63,
"usage_type": "name"
}
] |
384049578
|
import csv
from datetime import datetime
from dataset.freeze.format.common import Serializer
def value_to_str(value):
if isinstance(value, datetime):
return value.isoformat()
if hasattr(value, 'encode'):
return value.encode('utf-8')
if value is None:
return ''
return value
class CSVSerializer(Serializer):
def init(self):
self.handles = {}
def write(self, path, result):
keys = list(result.keys())
if not path in self.handles:
fh = open(path, 'wb')
writer = csv.writer(fh)
writer.writerow([k.encode('utf-8') for k in keys])
self.handles[path] = (writer, fh)
writer, fh = self.handles[path]
values = [value_to_str(result.get(k)) for k in keys]
writer.writerow(values)
def close(self):
for writer, fh in self.handles.values():
fh.close()
| null |
dataset/freeze/format/fcsv.py
|
fcsv.py
|
py
| 913 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "dataset.freeze.format.common.Serializer",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "csv.writer",
"line_number": 26,
"usage_type": "call"
}
] |
140710146
|
import sys
import os
import numpy as np
import torch
import torch.utils.data as du
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import util
from util import *
from drugcell_NN import *
import argparse
import gc
# build mask: matrix (nrows = number of relevant gene set, ncols = number all genes)
# elements of matrix are 1 if the corresponding gene is one of the relevant genes
def create_term_mask(term_direct_gene_map, gene_dim):
term_mask_map = {}
for term, gene_set in term_direct_gene_map.items():
mask = torch.zeros(len(gene_set), gene_dim)
for i, gene_id in enumerate(gene_set):
mask[i, gene_id] = 1
mask_gpu = torch.autograd.Variable(mask.cuda(CUDA_ID))
term_mask_map[term] = mask_gpu
return term_mask_map
# solution for 1/2||x-y||^2_2 + c||x||_0
def proximal_l0(yvec, c):
yvec_abs = torch.abs(yvec)
csqrt = torch.sqrt(2*c)
xvec = (yvec_abs>=csqrt)*yvec
return xvec
# solution for 1/2||x-y||^2_2 + c||x||_g
def proximal_glasso_nonoverlap(yvec, c):
ynorm = torch.norm(yvec, p='fro')
if ynorm > c:
xvec = (yvec/ynorm)*(ynorm-c)
else:
xvec = torch.zeros_like(yvec)
return xvec
# solution for ||x-y||^2_2 + c||x||_2^2
def proximal_l2(yvec, c):
return (1./(1.+c))*yvec
# prune the structure by palm
def optimize_palm(model, dG, root, reg_l0, reg_glasso, reg_decay, lr=0.001, lip=0.001):
dG_prune = dG.copy()
for name, param in model.named_parameters():
if "direct" in name:
# mutation side
# l0 for direct edge from gene to term
param_tmp = param.data - lip*param.grad.data
param_tmp2 = proximal_l0(param_tmp, torch.tensor(reg_l0*lip))
#("%s: before #0 is %d, after #0 is %d, threshold: %f" %(name, len(torch.nonzero(param.data, as_tuple =False)), len(torch.nonzero(param_tmp2, as_tuple =False)), reg_l0*lip))
param.data = param_tmp2
elif "GO_linear_layer" in name:
# group lasso for
dim = model.num_hiddens_genotype
term_name = name.split('_')[0]
child = model.term_neighbor_map[term_name]
for i in range(len(child)):
#dim = model.num_hiddens_genotype
term_input = param.data[:,i*dim:(i+1)*dim]
term_input_grad = param.grad.data[:,i*dim:(i+1)*dim]
term_input_tmp = term_input - lip*term_input_grad
term_input_update = proximal_glasso_nonoverlap(term_input_tmp, reg_glasso*lip)
#print("%s child %d: before norm is %f, after #0 is %f, threshold %f" %(name, i, torch.norm(term_input, p='fro'), torch.norm(term_input_update, p='fro'), reg_glasso*lip))
param.data[:,i*dim:(i+1)*dim] = term_input_update
num_n0 = len(torch.nonzero(term_input_update, as_tuple =False))
if num_n0 == 0 :
dG_prune.remove_edge(term_name, child[i])
# weight decay for direct
direct_input = param.data[:,len(child)*dim:]
direct_input_grad = param.grad.data[:,len(child)*dim:]
direct_input_tmp = direct_input - lr*direct_input_grad
direct_input_update = proximal_l2(direct_input_tmp, reg_decay)
param.data[:,len(child)*dim:] = direct_input_update
else:
# other param weigth decay
param_tmp = param.data - lr*param.grad.data
param.data = proximal_l2(param_tmp, 2*reg_decay*lr)
#sub_dG_prune = dG_prune.subgraph(nx.shortest_path(dG_prune.to_undirected(),root))
#print("Original graph has %d nodes and %d edges" % (dG.number_of_nodes(), dG.number_of_edges()))
#NodesLeft = list()
#for nodetmp in dG_prune.nodes:
# for path in nx.all_simple_paths(dG_prune, source=root, target=nodetmp):
# #print(path)
# NodesLeft.extend(path)
#NodesLeft = list(set(NodesLeft))
#sub_dG_prune = dG_prune.subgraph(NodesLeft)
#print("Pruned graph has %d nodes and %d edges" % (sub_dG_prune.number_of_nodes(), sub_dG_prune.number_of_edges()))
del param_tmp, param_tmp2, child, term_input, term_input_grad, term_input_tmp, term_input_update
del direct_input, direct_input_grad, direct_input_tmp, direct_input_update
# check network statisics
def check_network(model, dG, root):
dG_prune = dG.copy()
for name, param in model.named_parameters():
if "GO_linear_layer" in name:
# group lasso for
dim = model.num_hiddens_genotype
term_name = name.split('_')[0]
child = model.term_neighbor_map[term_name]
for i in range(len(child)):
#dim = model.num_hiddens_genotype
term_input = param.data[:,i*dim:(i+1)*dim]
num_n0 = len(torch.nonzero(term_input, as_tuple =False))
if num_n0 == 0 :
dG_prune.remove_edge(term_name, child[i])
print("Original graph has %d nodes and %d edges" % (dG.number_of_nodes(), dG.number_of_edges()))
#sub_dG_prune = dG_prune.subgraph(nx.shortest_path(dG_prune.to_undirected(),root))
NodesLeft = list()
for nodetmp in dG_prune.nodes:
for path in nx.all_simple_paths(dG_prune, source=root, target=nodetmp):
#print(path)
NodesLeft.extend(path)
NodesLeft = list(set(NodesLeft))
#print(Nodes)
sub_dG_prune = dG_prune.subgraph(NodesLeft)
print("Pruned graph has %d nodes and %d edges" % (sub_dG_prune.number_of_nodes(), sub_dG_prune.number_of_edges()))
num_node = sub_dG_prune.number_of_nodes()
num_edge = sub_dG_prune.number_of_edges()
return sub_dG_prune, num_node, num_edge
def check_parameter(model, CUDA_ID):
count = torch.tensor([0]).cuda(CUDA_ID)
for name, param in model.named_parameters():
if "GO_linear_layer" in name:
print(name)
print(param.data)
count = count + 1
if count >= 10:
break
def training_acc(model, optimizer, train_loader, train_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID):
#Train
model.train()
train_predict = torch.zeros(0,0).cuda(CUDA_ID)
for i, (inputdata, labels) in enumerate(train_loader):
cuda_labels = torch.autograd.Variable(labels.cuda(CUDA_ID))
# Forward + Backward + Optimize
optimizer.zero_grad() # zero the gradient buffer
cuda_cell_features = build_input_vector(inputdata.narrow(1, 0, 1).tolist(), gene_dim, cuda_cells)
cuda_drug_features = build_input_vector(inputdata.narrow(1, 1, 1).tolist(), drug_dim, cuda_drugs)
print(i)
# Here term_NN_out_map is a dictionary
aux_out_map, _ = model(cuda_cell_features, cuda_drug_features)
if train_predict.size()[0] == 0:
train_predict = aux_out_map['final'].data
else:
train_predict = torch.cat([train_predict, aux_out_map['final'].data], dim=0)
total_loss = 0
for name, output in aux_out_map.items():
loss = nn.MSELoss()
if name == 'final':
total_loss += loss(output, cuda_labels)
else: # change 0.2 to smaller one for big terms
total_loss += 0.2 * loss(output, cuda_labels)
print(i, total_loss)
train_corr = spearman_corr(train_predict, train_label_gpu)
print("pretrained model %f total loss, %f training acc" % (total_loss, train_corr))
def test_acc(model, test_loader, test_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID):
model.eval()
test_predict = torch.zeros(0,0).cuda(CUDA_ID)
for i, (inputdata, labels) in enumerate(test_loader):
# Convert torch tensor to Variable
cuda_cell_features = build_input_vector(inputdata.narrow(1, 0, 1).tolist(), gene_dim, cuda_cells)
cuda_drug_features = build_input_vector(inputdata.narrow(1, 1, 1).tolist(), drug_dim, cuda_drugs)
cuda_cell_features.cuda(CUDA_ID)
cuda_drug_features.cuda(CUDA_ID)
aux_out_map, _ = model(cuda_cell_features, cuda_drug_features)
if test_predict.size()[0] == 0:
test_predict = aux_out_map['final'].data
else:
test_predict = torch.cat([test_predict, aux_out_map['final'].data], dim=0)
test_corr = spearman_corr(test_predict, test_label_gpu)
del aux_out_map, inputdata, labels, test_predict, cuda_cell_features, cuda_drug_features
torch.cuda.empty_cache()
#print("pretrained model %f test acc" % (test_corr))
return test_corr
def retrain(model, train_loader, train_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID, learning_rate):
for name, param in model.named_parameters():
if "direct" in name:
# mutation side
# l0 for direct edge from gene to term
mask = torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))
param.register_hook(lambda grad: grad.mul_(mask))
if "GO_linear_layer" in name:
# group lasso for
mask = torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))
param.register_hook(lambda grad: grad.mul_(mask))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.99), eps=1e-05)
for retain_epoch in range(1):
model.train()
train_predict = torch.zeros(0,0).cuda(CUDA_ID)
best_acc = [0]
for i, (inputdata, labels) in enumerate(train_loader):
cuda_labels = torch.autograd.Variable(labels.cuda(CUDA_ID))
# Forward + Backward + Optimize
optimizer.zero_grad() # zero the gradient buffer
cuda_cell_features = build_input_vector(inputdata.narrow(1, 0, 1).tolist(), gene_dim, cuda_cells)
cuda_drug_features = build_input_vector(inputdata.narrow(1, 1, 1).tolist(), drug_dim, cuda_drugs)
# Here term_NN_out_map is a dictionary
aux_out_map, _ = model(cuda_cell_features, cuda_drug_features)
if train_predict.size()[0] == 0:
train_predict = aux_out_map['final'].data
else:
train_predict = torch.cat([train_predict, aux_out_map['final'].data], dim=0)
total_loss = 0
for name, output in aux_out_map.items():
loss = nn.MSELoss()
if name == 'final':
total_loss += loss(output, cuda_labels)
else: # change 0.2 to smaller one for big terms
total_loss += 0.2 * loss(output, cuda_labels)
optimizer.zero_grad()
print("Retrain %d: total loss %f" % (i, total_loss.item()))
total_loss.backward()
optimizer.step()
print("Retrain %d: total loss %f" % (i, total_loss.item()))
train_corr = spearman_corr(train_predict, train_label_gpu)
retrain_test_corr = test_acc(model, test_loader, test_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID)
print(">>>>>Retraining step %d: model test acc %f" % (retain_epoch, prune_test_corr))
if retrain_test_corr > best_acc[-1]:
best_acc.append(accuracy)
torch.save(model.state_dict(), model_save_folder + 'prune_final/drugcell_retrain_lung_best'+str(epoch)+'_'+str(retain_epoch)+'.pkl')
best_model = model.state_dict()
model.load_state_dict(best_model)
return model
def grad_hook_masking(grad, mask):
grad = grad.mul_(mask)
del mask
#return grad.mul_(mask)
def sparse_direct_gene(model, GOlist):
GO_direct_spare_gene = {}
for go in GOlist:
GO_direct_spare_gene[go] = list()
preserved_gene = list()
for name, param in model.named_parameters():
if "direct" in name:
GOname = name.split('_')[0]
if GOname in GOlist:
param_tmp = torch.sum(param.data, dim=0)
#print(param_tmp.shape)
indn0 = torch.nonzero(param_tmp, as_tuple=True)[0]
#param_tmp = np.sum(param.data.numpy(), axis=0)
#print(param_tmp.shape)
#indn0 = np.nonzero(param_tmp)[0]
#print(np.count_nonzero(param_tmp))
GO_direct_spare_gene[GOname].extend(indn0)
preserved_gene.extend(indn0)
return GO_direct_spare_gene, preserved_gene
# train a DrugCell model
def train_model(pretrained_model, root, term_size_map, term_direct_gene_map, dG, train_data, gene_dim, drug_dim, model_save_folder, train_epochs, batch_size, learning_rate, num_hiddens_genotype, num_hiddens_drug, num_hiddens_final, cell_features, drug_features):
'''
# arguments:
# 1) root: the root of the hierarchy embedded in one side of the model
# 2) term_size_map: dictionary mapping the name of subsystem in the hierarchy to the number of genes contained in the subsystem
# 3) term_direct_gene_map: dictionary mapping each subsystem in the hierarchy to the set of genes directly contained in the subsystem (i.e., children subsystems would not have the genes in the set)
# 4) dG: the hierarchy loaded as a networkx DiGraph object
# 5) train_data: torch Tensor object containing training data (features and labels)
# 6) gene_dim: the size of input vector for the genomic side of neural network (visible neural network) embedding cell features
# 7) drug_dim: the size of input vector for the fully-connected neural network embedding drug structure
# 8) model_save_folder: the location where the trained model will be saved
# 9) train_epochs: the maximum number of epochs to run during the training phase
# 10) batch_size: the number of data points that the model will see at each iteration during training phase (i.e., #training_data_points < #iterations x batch_size)
# 11) learning_rate: learning rate of the model training
# 12) num_hiddens_genotype: number of neurons assigned to each subsystem in the hierarchy
# 13) num_hiddens_drugs: number of neurons assigned to the fully-connected neural network embedding drug structure - one string containing number of neurons at each layer delimited by comma(,) (i.e. for 3 layer of fully-connected neural network containing 100, 50, 20 neurons from bottom - '100,50,20')
# 14) num_hiddens_final: number of neurons assigned to the fully-connected neural network combining the genomic side with the drug side. Same format as 13).
# 15) cell_features: a list containing the features of each cell line in tranining data. The index should match with cell2id list.
# 16) drug_features: a list containing the morgan fingerprint (or other embedding) of each drug in training data. The index should match with drug2id list.
'''
#print("Original graph has %d nodes and %d edges" % (dG.number_of_nodes(), dG.number_of_edges()))
# initialization of variables
best_model = 0
#best_model = 0
max_corr = 0
dGc = dG.copy()
# driver gene
DgeneId = [51,125,128,140,171,184,214,261,281,283,287,372,378,468
,498,620,712,801,822,834,846,850,871,872,879,950,951,1082
,1131,1212,1247,1265,1305,1466,1497,1514,1516,1517,1520,1561,1607,1610
,1611,1657,1767,1790,1836,1885,1887,2016,2017,2062,2066,2113,2186,2197
,2207,2263,2289,2291,2344,2351,2357,2366,2465,2469,2612,2618,2829,2832]
# separate the whole data into training and test data
train_feature, train_label, test_feature, test_label = train_data
# copy labels (observation) to GPU - will be used to
train_label_gpu = torch.autograd.Variable(train_label.cuda(CUDA_ID))
test_label_gpu = torch.autograd.Variable(test_label.cuda(CUDA_ID))
# create dataloader for training/test data
train_loader = du.DataLoader(du.TensorDataset(train_feature,train_label), batch_size=batch_size, shuffle=False)
test_loader = du.DataLoader(du.TensorDataset(test_feature,test_label), batch_size=batch_size, shuffle=False)
# create a torch objects containing input features for cell lines and drugs
cuda_cells = torch.from_numpy(cell_features)
cuda_drugs = torch.from_numpy(drug_features)
# dcell neural network
model = drugcell_nn(term_size_map, term_direct_gene_map, dG, gene_dim, drug_dim, root, num_hiddens_genotype, num_hiddens_drug, num_hiddens_final, CUDA_ID)
# load model to GPU
model.cuda(CUDA_ID)
# define optimizer
# optimize drug NN
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.99), eps=1e-05)
term_mask_map = create_term_mask(model.term_direct_gene_map, gene_dim)
# load pretrain model
if os.path.isfile(pretrained_model):
print("Pre-trained model exists:" + pretrained_model)
model.load_state_dict(torch.load(pretrained_model,map_location=torch.device('cuda', CUDA_ID))) #param_file
#base_test_acc = test(model,val_loader,device)
else:
print("Pre-trained model does not exist, so before pruning we have to pre-train a model.")
sys.exit()
#training_acc(model, optimizer, train_loader, train_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID)
#test_acc(model, test_loader, test_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID)
'''optimizer.zero_grad()
for name, param in model.named_parameters():
term_name = name.split('_')[0]
if '_direct_gene_layer.weight' in name:
#print(name, param.size(), term_mask_map[term_name].size())
param.data = torch.mul(param.data, term_mask_map[term_name]) * 0.1
#param.data = torch.mul(param.data, term_mask_map[term_name])
else:
param.data = param.data * 0.1
'''
#training_acc(model, optimizer, train_loader, train_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID)
#test_acc(model, test_loader, test_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID)
#best_prune_acc = torch.tensor(0.0)
for epoch in range(train_epochs):
# prune step
for prune_epoch in range(10):
#Train
model.train()
train_predict = torch.zeros(0,0).cuda(CUDA_ID)
for i, (inputdata, labels) in enumerate(train_loader):
cuda_labels = torch.autograd.Variable(labels.cuda(CUDA_ID))
# Forward + Backward + Optimize
optimizer.zero_grad() # zero the gradient buffer
cuda_cell_features = build_input_vector(inputdata.narrow(1, 0, 1).tolist(), gene_dim, cuda_cells)
cuda_drug_features = build_input_vector(inputdata.narrow(1, 1, 1).tolist(), drug_dim, cuda_drugs)
cuda_cell_features.cuda(CUDA_ID)
cuda_drug_features.cuda(CUDA_ID)
# Here term_NN_out_map is a dictionary
aux_out_map, _ = model(cuda_cell_features, cuda_drug_features)
if train_predict.size()[0] == 0:
train_predict = aux_out_map['final'].data
else:
train_predict = torch.cat([train_predict, aux_out_map['final'].data], dim=0)
total_loss = 0
for name, output in aux_out_map.items():
loss = nn.MSELoss()
if name == 'final':
total_loss += loss(output, cuda_labels)
else: # change 0.2 to smaller one for big terms
total_loss += 0.2 * loss(output, cuda_labels)
total_loss.backward()
for name, param in model.named_parameters():
if '_direct_gene_layer.weight' not in name:
continue
term_name = name.split('_')[0]
#print(name, param.grad.data.size(), term_mask_map[term_name].size())
param.grad.data = torch.mul(param.grad.data, term_mask_map[term_name])
#print("Original graph has %d nodes and %d edges" % (dGc.number_of_nodes(), dGc.number_of_edges()))
optimize_palm(model, dGc, root, reg_l0=0.0001, reg_glasso=0.1, reg_decay=0.001, lr=0.001, lip=0.001)
print("check network:")
#check_network(model, dGc, root)
#optimizer.step()
print("Prune %d: total loss %f" % (i,total_loss.item()))
del total_loss, cuda_cell_features, cuda_drug_features
del aux_out_map, inputdata, labels
torch.cuda.empty_cache()
train_corr = spearman_corr(train_predict, train_label_gpu)
prune_test_corr = test_acc(model, test_loader, test_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID)
print(">>>>>%d epoch run Pruning step %d: model train acc %f test acc %f" % (epoch, prune_epoch, train_corr, prune_test_corr))
del train_predict, prune_test_corr
torch.cuda.empty_cache()
# retraining step
#retrain(model, train_loader, train_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID, learning_rate)
# masking
'''
print("check network before masking:")
check_network(model, dGc, root)
handle_list = list()
with torch.no_grad():
for name, param in model.named_parameters():
if "direct" in name:
# mutation side
# l0 for direct edge from gene to term
mask = torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))
handle = param.register_hook(lambda grad, mask=mask: grad_hook_masking(grad, mask))
#handle = param.register_hook(lambda grad: grad.mul_(torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))))
handle_list.append(handle)
if "GO_linear_layer" in name:
# group lasso for
mask = torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))
handle = param.register_hook(lambda grad, mask=mask: grad_hook_masking(grad, mask))
#handle = param.register_hook(lambda grad: grad.mul_(torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))))
handle_list.append(handle)
torch.cuda.empty_cache()
'''
#print("check network after masking:")
#check_network(model, dGc, root)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.99), eps=1e-05)
#print("check network after optimizer:")
#check_network(model, dGc, root)
best_retrain_corr = torch.tensor(0.0).cuda(CUDA_ID)
for retain_epoch in range(10):
#print("check network before train:")
#check_network(model, dGc, root)
print("check network before masking:")
#check_network(model, dGc, root)
# add hooks
handle_list = list()
with torch.no_grad():
for name, param in model.named_parameters():
if "direct" in name:
# mutation side
# l0 for direct edge from gene to term
mask = torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))
handle = param.register_hook(lambda grad, mask=mask: grad_hook_masking(grad, mask))
#handle = param.register_hook(lambda grad: grad.mul_(torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))))
handle_list.append(handle)
if "GO_linear_layer" in name:
# group lasso for
mask = torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))
handle = param.register_hook(lambda grad, mask=mask: grad_hook_masking(grad, mask))
#handle = param.register_hook(lambda grad: grad.mul_(torch.where(param.data.detach()!=0, torch.ones_like(param.data.detach()), torch.zeros_like(param.data.detach()))))
handle_list.append(handle)
model.train()
train_predict = torch.zeros(0,0).cuda(CUDA_ID)
#print("check network before retrain:")
#check_network(model, dGc, root)
best_acc = torch.tensor([0]).cuda(CUDA_ID)
for i, (inputdata, labels) in enumerate(train_loader):
cuda_labels = torch.autograd.Variable(labels.cuda(CUDA_ID))
# Forward + Backward + Optimize
optimizer.zero_grad() # zero the gradient buffer
cuda_cell_features = build_input_vector(inputdata.narrow(1, 0, 1).tolist(), gene_dim, cuda_cells)
cuda_drug_features = build_input_vector(inputdata.narrow(1, 1, 1).tolist(), drug_dim, cuda_drugs)
cuda_cell_features.cuda(CUDA_ID)
cuda_drug_features.cuda(CUDA_ID)
# Here term_NN_out_map is a dictionary
aux_out_map, _ = model(cuda_cell_features, cuda_drug_features)
if train_predict.size()[0] == 0:
train_predict = aux_out_map['final'].data
else:
train_predict = torch.cat([train_predict, aux_out_map['final'].data], dim=0)
total_loss = 0
for name, output in aux_out_map.items():
loss = nn.MSELoss()
if name == 'final':
total_loss += loss(output, cuda_labels)
else: # change 0.2 to smaller one for big terms
total_loss += 0.2 * loss(output, cuda_labels)
optimizer.zero_grad()
#print("Retrain %d: total loss %f" % (i, total_loss.item()))
total_loss.backward()
print("@check network before step:")
#check_network(model, dGc, root)
#check_parameter(model, CUDA_ID)
optimizer.step()
#check_parameter(model, CUDA_ID)
print("@check network after step:")
#check_network(model, dGc, root)
print("Retrain %d: total loss %f" % (i, total_loss.item()))
del total_loss, cuda_cell_features, cuda_drug_features
del aux_out_map, inputdata, labels
torch.cuda.empty_cache()
# remove hooks
for handle in handle_list:
handle.remove()
torch.cuda.empty_cache()
gc.collect()
train_corr = spearman_corr(train_predict, train_label_gpu)
retrain_test_corr = test_acc(model, test_loader, test_label_gpu, gene_dim, cuda_cells, drug_dim, cuda_drugs, CUDA_ID)
print(">>>>>%d epoch Retraining step %d: model training acc %f test acc %f" % (epoch, retain_epoch, train_corr, retrain_test_corr))
# save models
if best_retrain_corr < retrain_test_corr:
best_retrain_corr = retrain_test_corr
PrunedG, NumNode_left, NumEdge_left = check_network(model, dGc, root)
GOLeft = list(PrunedG.nodes)
GO_direct_gene, Prev_gene_tmp = sparse_direct_gene(model, GOLeft)
Prev_gene = [Prev_gene_tmp[i].item() for i in range(len(Prev_gene_tmp))]
Prev_gene_unique = list(set(Prev_gene))
NumGeneLeft = len(Prev_gene_unique)
Overlap = list(set(Prev_gene_unique) & set(DgeneId))
NumOverlap = len(Overlap)
fname = model_save_folder + 'st_gl_0.1_epoch_'+str(epoch)+'_trainacc_'+str(train_corr.item())+'_testacc_'+str(retrain_test_corr.item())+'_nodeleft_'+str(NumNode_left)+'_edgeleft_'+str(NumEdge_left)+'_geneleft_'+str(NumGeneLeft)+'_overlap_'+str(NumOverlap)+'.pkl'
torch.save(model.state_dict(), fname)
#if retrain_test_corr > best_acc:
# best_acc = retrain_test_corr
#best_acc.append(retrain_test_corr)
#torch.save(model.state_dict(), model_save_folder + 'prune_final/drugcell_retrain_lung_best'+str(epoch)+'_'+str(retain_epoch)+'.pkl')
# best_model_para = model.state_dict()
#model.load_state_dict(best_model_para)
#del best_model_para
parser = argparse.ArgumentParser(description='Train dcell')
parser.add_argument('-onto', help='Ontology file used to guide the neural network', type=str)
parser.add_argument('-train', help='Training dataset', type=str)
parser.add_argument('-test', help='Validation dataset', type=str)
parser.add_argument('-epoch', help='Training epochs for training', type=int, default=300)
parser.add_argument('-lr', help='Learning rate', type=float, default=0.001)
parser.add_argument('-batchsize', help='Batchsize', type=int, default=3000)
parser.add_argument('-modeldir', help='Folder for trained models', type=str, default='MODEL/')
parser.add_argument('-cuda', help='Specify GPU', type=int, default=0)
parser.add_argument('-gene2id', help='Gene to ID mapping file', type=str)
parser.add_argument('-drug2id', help='Drug to ID mapping file', type=str)
parser.add_argument('-cell2id', help='Cell to ID mapping file', type=str)
parser.add_argument('-genotype_hiddens', help='Mapping for the number of neurons in each term in genotype parts', type=int, default=3)
parser.add_argument('-drug_hiddens', help='Mapping for the number of neurons in each layer', type=str, default='100,50,3')
parser.add_argument('-final_hiddens', help='The number of neurons in the top layer', type=int, default=3)
parser.add_argument('-cellline', help='Mutation information for cell lines', type=str)
parser.add_argument('-fingerprint', help='Morgan fingerprint representation for drugs', type=str)
parser.add_argument('-pretrained_model', help='Pre-trained drugcell baseline model', type=str)
print("Start....")
# call functions
opt = parser.parse_args()
torch.set_printoptions(precision=3)
# load input data
train_data, cell2id_mapping, drug2id_mapping = prepare_train_data(opt.train, opt.test, opt.cell2id, opt.drug2id)
gene2id_mapping = load_mapping(opt.gene2id)
print('Total number of genes = %d' % len(gene2id_mapping))
cell_features = np.genfromtxt(opt.cellline, delimiter=',')
drug_features = np.genfromtxt(opt.fingerprint, delimiter=',')
num_cells = len(cell2id_mapping)
num_drugs = len(drug2id_mapping)
num_genes = len(gene2id_mapping)
drug_dim = len(drug_features[0,:])
# load ontology
dG, root, term_size_map, term_direct_gene_map = load_ontology(opt.onto, gene2id_mapping)
#print("Original graph has %d nodes and %d edges" % (dG.number_of_nodes(), dG.number_of_edges()))
# load the number of hiddens #######
num_hiddens_genotype = opt.genotype_hiddens
num_hiddens_drug = list(map(int, opt.drug_hiddens.split(',')))
num_hiddens_final = opt.final_hiddens
# load pretrain model
pretrained_model = opt.pretrained_model
######################################
# driver gene
#Dgene = [ 214, 1082, 1466, 1520, 1531, 1607, 2062, 2773, 2832]
CUDA_ID = opt.cuda
#print(">>>>>>>>>>>>Original graph has %d nodes and %d edges" % (dG.number_of_nodes(), dG.number_of_edges()))
train_model(pretrained_model, root, term_size_map, term_direct_gene_map, dG, train_data, num_genes, drug_dim, opt.modeldir, opt.epoch, opt.batchsize, opt.lr, num_hiddens_genotype, num_hiddens_drug, num_hiddens_final, cell_features, drug_features)
| null |
train_drugcell_prune_st.py
|
train_drugcell_prune_st.py
|
py
| 32,384 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "torch.abs",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.sqrt",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.nonzero",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nonzero",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "torch.where",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "torch.where",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "torch.save",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "torch.nonzero",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 345,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.TensorDataset",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.TensorDataset",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 368,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 427,
"usage_type": "name"
},
{
"api_name": "torch.mul",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 450,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 487,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "torch.where",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "torch.where",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 548,
"usage_type": "name"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 568,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 568,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 573,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 573,
"usage_type": "attribute"
},
{
"api_name": "gc.collect",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "torch.set_printoptions",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 647,
"usage_type": "call"
}
] |
236905313
|
import json
import os
import requests
from urllib3.util import parse_url
class Zodiac(object):
def __init__(self, username, password, company=None, api=None, api_version=None):
self.api = api or 'https://dashboard.zodiacmetrics.com/api'
self.api_version = api_version or 'v1'
self.login_as(username, password)
self.company = company or self.company
def _format_url(self, url, **kwargs):
kwargs['api'] = self.api
kwargs['version'] = self.api_version
return url.format(**kwargs)
def _post(self, url, values):
headers = {
"Content-Type": "application/json"
}
resp = self.session.post(url, json=values, headers=headers)
try:
return json.loads(resp.text)
except Exception as e:
print(resp.text)
raise e
def _put(self, url, filepath, filename):
headers = {
"Content-Type": "multipart/form-data",
"Content-Disposition": "attachment; filename=\"" + filename + "\""
}
with open(filepath, 'rb') as data:
resp = self.session.put(url, data=data, headers=headers)
return resp
def _get(self, url):
return self.session.get(url)
def login_as(self, username, password):
self.session = requests.Session()
url = self._format_url('{api}/{version}/auth/login')
body = self._post(url, {'email': username, 'password': password, 'rememberMe': True})
self.company = body['mask']
self.session.headers.update({'User-Authorization-Token': 'Bearer ' + body['token']})
def _create_upload(self, path, filename):
url = self._format_url(
path,
company=self.company
)
body = self._post(url, {'filename': filename})
return body['url']
def _create_record(self, path, filename, upload_url, description):
s3_path = parse_url(upload_url).path
url = self._format_url(path, company=self.company)
self._post(
url,
{'filename': filename, 's3_path': s3_path, 'description': description}
)
return s3_path
def _create_upload_location(self, filename):
return self._create_upload('{api}/{version}/{company}/datasets/upload_url', filename)
def upload_file(self, filepath, description):
filename = filepath.split('/')[-1]
if os.name == 'nt':
filename = filepath.split('\\')[-1]
upload_url = self._create_upload_location(filename)
self._put(upload_url, filepath, filename)
return self._create_record(
'{api}/{version}/{company}/datasets/create', filename, upload_url, description
)
def _create_email_upload_location(self, filename):
return self._create_upload('{api}/{version}/{company}/email/upload_url', filename)
def upload_emails(self, filepath):
filename = filepath.split('/')[-1]
upload_url = self._create_email_upload_location(filename)
self._put(upload_url, filepath, filename)
return self._create_record(
'{api}/{version}/{company}/email/create', filename, upload_url, ''
)
def submit_job(self, args):
txlogs = []
attrfiles = []
for trans_log in args.transactions:
txlogs.append(self.upload_file(trans_log, trans_log))
for attr_file in args.attributes or []:
attrfiles.append(self.upload_file(attr_file, attr_file))
url = self._format_url(
'{api}/{version}/{company}/models/{model_group_hash}/execute',
company=self.company, model_group_hash=args.model_group_hash
)
self._post(
url,
{'transaction_logs': txlogs, 'attributes': attrfiles}
)
def _list_datasets(self):
url = self._format_url('{api}/{version}/{company}/datasets/list', company=self.company)
return json.loads(self._get(url).text)
def list_datasets(self):
data = self._list_datasets()
return [d['filename'] for d in data]
def get_latest_output(self, args):
mgh = args.model_group_hash
url = self._format_url(
'{api}/{version}/{company}/datasets/{mgh}/modeling_output',
company=self.company,
mgh=mgh
)
api_text = self._get(url).text
filename = json.loads(api_text)['id']
resp = self._get(
self._format_url(
"{api}/{version}/{company}/datasets/{inst}/download_url",
company=self.company,
inst=filename)
)
download = json.loads(resp.text)['url']
print("Download url %s" % download)
return download
def get_download_url(self, filename):
datasets = self._list_datasets()
inst = [d['id'] for d in datasets if d['filename'] == filename][-1]
if not inst:
raise Exception("Unknown file")
resp = self._get(
self._format_url(
"{api}/{version}/{company}/datasets/{inst}/download_url",
company=self.company,
inst=inst)
)
return json.loads(resp.text)['url']
| null |
zodiac/api.py
|
api.py
|
py
| 5,237 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.loads",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "urllib3.util.parse_url",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.name",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 148,
"usage_type": "call"
}
] |
13191767
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
import re
import os
class DoubanPipeline(object):
"""
清洗过滤数据
"""
def process_item(self, item, spider):
"""
删除多余的空白行
:param item:
:param spider:
:return:
"""
for key, value in item.items():
if isinstance(value, str):
item[key] = value.strip()
elif isinstance(value, (tuple, list)):
item[key] = [i.strip() for i in value]
return item
class MongoPipeline(object):
"""
保存爬虫数据到Mongodb
"""
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
self.client = None
self.db = None
@classmethod
def from_crawler(cls, crawler):
"""
使用类方法,返回带有MONGO_URI和MONGO_DB值的类实例
:param crawler:
:return: 类实例
"""
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DB')
)
def open_spider(self, spider):
"""
打开Mongodb连接
:param spider:
:return:
"""
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def process_item(self, item, spider):
"""
保存所有数据到Mongodb
:param item:
:param spider:
:return:
"""
name = item.__class__.__name__
items = dict(item)
self.db[name].update_one(items, {"$set": items}, upsert=True) # 数据去重
return item
def close_spider(self, spider):
"""
关闭Mongodb连接
:param spider:
:return:
"""
self.client.close()
class SaveDataPipeline(object):
"""
保存爬虫数据到本地
"""
def __init__(self):
self.record_folder = os.getcwd() + '\\' + 'result'
def process_item(self, item, spider):
"""
保存所有文章到result目录下
:param item:
:param spider:
:return:
"""
items = dict(item)
title = re.match('[\u4e00-\u9fa5a-zA-Z0-9]+', items['title']) # 只匹配字符串中的中文,字母,数字
with open(r'{}\{}.txt'.format(self.record_folder, title.group()), 'w', encoding='utf-8') as wf:
wf.write(item['article'])
return item
| null |
scrapy_crawler/douban/douban/pipelines.py
|
pipelines.py
|
py
| 2,668 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pymongo.MongoClient",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 98,
"usage_type": "call"
}
] |
647361015
|
"""
TextWin, the window showing the text messages and info messages in poezio.
Can be locked, scrolled, has a separator, etc…
"""
import logging
from typing import Optional, List, Union
from poezio.windows.base_wins import Win
from poezio.text_buffer import TextBuffer
from poezio.config import config
from poezio.theming import to_curses_attr, get_theme
from poezio.ui.types import Message, BaseMessage
from poezio.ui.render import Line, build_lines, write_pre
log = logging.getLogger(__name__)
class TextWin(Win):
__slots__ = ('lines_nb_limit', 'pos', 'built_lines', 'lock', 'lock_buffer',
'separator_after', 'highlights', 'hl_pos',
'nb_of_highlights_after_separator')
hl_pos: Optional[int]
def __init__(self, lines_nb_limit: Optional[int] = None) -> None:
Win.__init__(self)
if lines_nb_limit is None:
lines_nb_limit = config.getint('max_lines_in_memory')
self.lines_nb_limit: int = lines_nb_limit
self.pos = 0
# Each new message is built and kept here.
# on resize, we rebuild all the messages
self.built_lines: List[Union[None, Line]] = []
self.lock = False
self.lock_buffer: List[Union[None, Line]] = []
self.separator_after: Optional[BaseMessage] = None
# the Lines of the highlights in that buffer
self.highlights: List[Line] = []
# the current HL position in that list NaN means that we’re not on
# an hl. -1 is a valid position (it's before the first hl of the
# list. i.e the separator, in the case where there’s no hl before
# it.)
self.hl_pos = None
# Keep track of the number of hl after the separator.
# This is useful to make “go to next highlight“ work after a “move to separator”.
self.nb_of_highlights_after_separator = 0
def toggle_lock(self) -> bool:
if self.lock:
self.release_lock()
else:
self.acquire_lock()
return self.lock
def acquire_lock(self) -> None:
self.lock = True
def release_lock(self) -> None:
for line in self.lock_buffer:
self.built_lines.append(line)
self.lock = False
def scroll_up(self, dist: int = 14) -> bool:
pos = self.pos
self.pos += dist
if self.pos + self.height > len(self.built_lines):
self.pos = len(self.built_lines) - self.height
if self.pos < 0:
self.pos = 0
return self.pos != pos
def scroll_down(self, dist: int = 14) -> bool:
pos = self.pos
self.pos -= dist
if self.pos <= 0:
self.pos = 0
return self.pos != pos
def build_new_message(self,
message: BaseMessage,
clean: bool = True,
timestamp: bool = False,
nick_size: int = 10) -> int:
"""
Take one message, build it and add it to the list
Return the number of lines that are built for the given
message.
"""
lines = build_lines(
message, self.width, timestamp=timestamp, nick_size=nick_size
)
if self.lock:
self.lock_buffer.extend(lines)
else:
self.built_lines.extend(lines)
if not lines or not lines[0]:
return 0
if isinstance(message, Message) and message.highlight:
self.highlights.append(lines[0])
self.nb_of_highlights_after_separator += 1
log.debug("Number of highlights after separator is now %s",
self.nb_of_highlights_after_separator)
if clean:
while len(self.built_lines) > self.lines_nb_limit:
self.built_lines.pop(0)
return len(lines)
def refresh(self) -> None:
log.debug('Refresh: %s', self.__class__.__name__)
if self.height <= 0:
return
if self.pos == 0:
lines = self.built_lines[-self.height:]
else:
lines = self.built_lines[-self.height - self.pos:-self.pos]
with_timestamps = config.getbool("show_timestamps")
nick_size = config.getint("max_nick_length")
self._win.move(0, 0)
self._win.erase()
offset = 0
for y, line in enumerate(lines):
if line:
msg = line.msg
if line.start_pos == 0:
offset = write_pre(msg, self, with_timestamps, nick_size)
elif y == 0:
offset = msg.compute_offset(with_timestamps,
nick_size)
self.write_text(
y, offset,
line.prepend + line.msg.txt[line.start_pos:line.end_pos])
else:
self.write_line_separator(y)
if y != self.height - 1:
self.addstr('\n')
self._win.attrset(0)
self._refresh()
def write_text(self, y: int, x: int, txt: str) -> None:
"""
write the text of a line.
"""
self.addstr_colored(txt, y, x)
def resize(self, height: int, width: int, y: int, x: int,
room: Optional[TextBuffer] = None, force: bool = False) -> None:
old_width: Optional[int]
if hasattr(self, 'width'):
old_width = self.width
else:
old_width = None
self._resize(height, width, y, x)
if room and (self.width != old_width or force):
self.rebuild_everything(room)
# reposition the scrolling after resize
# (see #2450)
buf_size = len(self.built_lines)
if buf_size - self.pos < self.height:
self.pos = buf_size - self.height
if self.pos < 0:
self.pos = 0
def rebuild_everything(self, room: TextBuffer) -> None:
self.built_lines = []
with_timestamps = config.getbool('show_timestamps')
nick_size = config.getint('max_nick_length')
for message in room.messages:
self.build_new_message(
message,
clean=False,
timestamp=with_timestamps,
nick_size=nick_size)
if self.separator_after is message:
self.built_lines.append(None)
while len(self.built_lines) > self.lines_nb_limit:
self.built_lines.pop(0)
def remove_line_separator(self) -> None:
"""
Remove the line separator
"""
log.debug('remove_line_separator')
if None in self.built_lines:
self.built_lines.remove(None)
self.separator_after = None
def add_line_separator(self, room: TextBuffer = None) -> None:
"""
add a line separator at the end of messages list
room is a textbuffer that is needed to get the previous message
(in case of resize)
"""
if None not in self.built_lines:
self.built_lines.append(None)
self.nb_of_highlights_after_separator = 0
log.debug("Resetting number of highlights after separator")
if room and room.messages:
self.separator_after = room.messages[-1]
def write_line_separator(self, y) -> None:
theme = get_theme()
char = theme.CHAR_NEW_TEXT_SEPARATOR
self.addnstr(y, 0, char * (self.width // len(char) - 1), self.width,
to_curses_attr(theme.COLOR_NEW_TEXT_SEPARATOR))
def __del__(self) -> None:
log.debug('** TextWin: deleting %s built lines',
(len(self.built_lines)))
del self.built_lines
def next_highlight(self) -> None:
"""
Go to the next highlight in the buffer.
(depending on which highlight was selected before)
if the buffer is already positioned on the last, of if there are no
highlights, scroll to the end of the buffer.
"""
log.debug('Going to the next highlight…')
if (not self.highlights or self.hl_pos is None
or self.hl_pos >= len(self.highlights) - 1):
self.hl_pos = None
self.pos = 0
return
hl_size = len(self.highlights) - 1
if self.hl_pos is not None and self.hl_pos < hl_size:
self.hl_pos += 1
else:
self.hl_pos = hl_size
log.debug("self.hl_pos = %s", self.hl_pos)
hl = self.highlights[self.hl_pos]
pos = None
while not pos:
try:
pos = self.built_lines.index(hl)
except ValueError:
if isinstance(self.hl_pos, int):
del self.highlights[self.hl_pos]
if not self.highlights:
self.hl_pos = None
self.pos = 0
return
self.hl_pos = 0
hl = self.highlights[0]
self.pos = len(self.built_lines) - pos - self.height
if self.pos < 0 or self.pos >= len(self.built_lines):
self.pos = 0
def previous_highlight(self) -> None:
"""
Go to the previous highlight in the buffer.
(depending on which highlight was selected before)
if the buffer is already positioned on the first, or if there are no
highlights, scroll to the end of the buffer.
"""
log.debug('Going to the previous highlight…')
if not self.highlights or self.hl_pos and self.hl_pos <= 0:
self.hl_pos = None
self.pos = 0
return
if self.hl_pos is None:
self.hl_pos = len(self.highlights) - 1
else:
self.hl_pos -= 1
log.debug("self.hl_pos = %s", self.hl_pos)
hl = self.highlights[self.hl_pos]
pos = None
while not pos:
try:
pos = self.built_lines.index(hl)
except ValueError:
if self.hl_pos is not None:
del self.highlights[self.hl_pos]
if not self.highlights:
self.hl_pos = None
self.pos = 0
return
self.hl_pos = 0
hl = self.highlights[0]
self.pos = len(self.built_lines) - pos - self.height
if self.pos < 0 or self.pos >= len(self.built_lines):
self.pos = 0
def scroll_to_separator(self) -> None:
"""
Scroll to the first message after the separator. If no
separator is present, scroll to the first message of the window
"""
if None in self.built_lines:
self.pos = len(self.built_lines) - self.built_lines.index(
None) - self.height + 1
if self.pos < 0:
self.pos = 0
else:
self.pos = len(self.built_lines) - self.height + 1
# Chose a proper position (not too high)
self.scroll_up(0)
# Make “next highlight” work afterwards. This makes it easy to
# review all the highlights since the separator was placed, in
# the correct order.
self.hl_pos = len(
self.highlights) - self.nb_of_highlights_after_separator - 1
log.debug("self.hl_pos = %s", self.hl_pos)
def modify_message(self, old_id, message) -> None:
"""
Find a message, and replace it with a new one
(instead of rebuilding everything in order to correct a message)
"""
with_timestamps = config.getbool('show_timestamps')
nick_size = config.getint('max_nick_length')
for i in range(len(self.built_lines) - 1, -1, -1):
current = self.built_lines[i]
if current is not None and current.msg.identifier == old_id:
index = i
while (
index >= 0
and current is not None
and current.msg.identifier == old_id
):
self.built_lines.pop(index)
index -= 1
if index >= 0:
current = self.built_lines[index]
index += 1
lines = build_lines(
message, self.width, timestamp=with_timestamps, nick_size=nick_size
)
for line in lines:
self.built_lines.insert(index, line)
index += 1
break
| null |
poezio/windows/text_win.py
|
text_win.py
|
py
| 12,577 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "poezio.windows.base_wins.Win",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "poezio.windows.base_wins.Win.__init__",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "poezio.windows.base_wins.Win",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "poezio.config.config.getint",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "poezio.config.config",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "poezio.ui.render.Line",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "poezio.ui.render.Line",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "poezio.ui.types.BaseMessage",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "poezio.ui.render.Line",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "poezio.ui.types.BaseMessage",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "poezio.ui.render.build_lines",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "poezio.ui.types.Message",
"line_number": 102,
"usage_type": "argument"
},
{
"api_name": "poezio.config.config.getbool",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "poezio.config.config",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "poezio.config.config.getint",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "poezio.config.config",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "poezio.ui.render.write_pre",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "poezio.text_buffer.TextBuffer",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "poezio.text_buffer.TextBuffer",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "poezio.config.config.getbool",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "poezio.config.config",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "poezio.config.config.getint",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "poezio.config.config",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "poezio.text_buffer.TextBuffer",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "poezio.theming.get_theme",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "poezio.theming.to_curses_attr",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "poezio.config.config.getbool",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "poezio.config.config",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "poezio.config.config.getint",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "poezio.config.config",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "poezio.ui.render.build_lines",
"line_number": 330,
"usage_type": "call"
}
] |
578065990
|
import argparse
import sys
import libvh
PARSER = argparse.ArgumentParser()
PARSER.add_argument("api", help="The api file to work on")
PARSER.add_argument("-v", "--version", help="Explicitly sets version to the specified version number.")
PARSER.add_argument("-d", "--directory", help="Specify a directory where the source code lives, if different from the location of the api file")
PARSER.add_argument("-p", "--prerelease", help="Specify a pre-release string to be included after the patch number")
PARSER.add_argument("-b", "--build_metadata", help="Specify build metadata string to be included after the patch number")
PARSER.add_argument("-db", "--database", help="Specify the database file that stores the prior API version")
PARSER.add_argument("-c", "--checker", help="Specify (comma separated) file(s) that should provide the `check_api_item` functionality")
PARSER.add_argument("-x", "--extensions", help="Specify the file extensions of possible source files")
PARSER.add_argument("-nic", "--no_invariant_check", help="Specify that the invariant checker should not be run", action="store_true")
PARSER.add_argument("-dry", "--dry_run", help="Perform a dry run; Does not write to DB or API file", action="store_true")
PARSER.add_argument("-s", "--silent", help="Do not display any information to stdout", action="store_true")
def main():
if "--site_config" in sys.argv:
sys.argv.remove("--site_config")
sys.argv.remove("Alert_Handler.defaults={\'parse_args\':False}")
args = PARSER.parse_args()
libvh.version_helper(args.api, args.directory, args.version,
args.prerelease, args.build_metadata,
args.database, args.checker, args.extensions,
args.no_invariant_check, args.dry_run, args.silent)
if __name__ == "__main__":
if "-m" in sys.argv:
sys.argv.remote("-m")
# disables the pride Alert_Handler from displaying when --help is used
#if "--site_config" not in sys.argv:
sys.argv.insert(1, "--site_config")
sys.argv.insert(2, "Alert_Handler.defaults={\'parse_args\':False}")
main()
| null |
versionhelper/main.py
|
main.py
|
py
| 2,128 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sys.argv.remove",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sys.argv.remove",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "libvh.version_helper",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "sys.argv.remote",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.argv.insert",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "sys.argv.insert",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 35,
"usage_type": "attribute"
}
] |
580036407
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 16:07:23 2021
@author: Peter
"""
#Import Packages
import cv2
from skimage.color import rgb2gray
from skimage.feature import blob_log
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from math import sqrt
from math import hypot
import scipy.spatial
import warnings
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 600 #Changes DPI of in-line figures.
plt.rcParams['font.family'] = "Arial"
import os
file_extension = "C:/Users/Peter/Desktop/Github/Calculation of Interparticle Distance for Direct Neighbors/Github/Data/"
filename_load = "Original Image.tif"
filepath = file_extension + filename_load
image = cv2.imread(filepath, 0) #Defined for later use. The '(,0)' makes the image grayscale.
def plot_original_image(filepath):
"""Plot full-color version of original image"""
plt.imshow(cv2.imread(filepath))
plt.title("Original Image")
plt.xticks([])
plt.yticks([])
plt.show()
#Plot input image with original colors
plot_original_image(filepath)
def extract_LOG_coordinates(image):
"""Extracts Coordinates of Nanoparticle Centers."""
with warnings.catch_warnings(): #Ignore warnings given by 'blob_log'
warnings.simplefilter("ignore")
image_gray = rgb2gray(image)
laplace_coords = blob_log(image_gray, min_sigma = 1, max_sigma=50, num_sigma=50, threshold=0.06, overlap = 0.1)
laplace_coords[:, 2] = laplace_coords[:, 2] * sqrt(2) # Compute radii in the 3rd column.
return laplace_coords
# Extract Coordinates from where NP's are.
laplace_coords = extract_LOG_coordinates(image)
def plot_overlaid_coordinates(image, laplace_coords):
"""Plots Original Image vs. Original Image with overlaid locations of nanoparticles, identified by laplace_coords."""
#Plot Subplots
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3), sharex=True, sharey=True)
ax1.imshow(image, cmap = 'binary_r')
ax2.imshow(image, cmap = 'binary_r')
ax1.set_title("Original")
ax2.set_title("Points ID'd by Laplacian of Gaussian")
plt.xticks([])
plt.yticks([])
for blob in laplace_coords:
y, x, r = blob
c = plt.Circle((x, y), r, color="yellow", linewidth=1, fill=False)
ax2.add_patch(c)
plt.tight_layout()
plt.show()
# Visualize Coords overlaid on image.
plot_overlaid_coordinates(image, laplace_coords) #Plot All Identified Coordinates
def plot_connected_delaunay(laplace_coords):
"""Plots Delaunay Tessellation of all laplace_coords. """
dela = scipy.spatial.Delaunay
points = np.array(column(laplace_coords, [0 , 1])) #note np.array object is used for plt.triplot()
triang = dela(points) #
#Plot Tessellation.
plt.triplot(points[:,0], points[:,1], triang.simplices, lw = 0.5, alpha = 0.6)
plt.plot(points[:,0], points[:,1], 'o', markersize = 0.5)
plt.xticks([])
plt.yticks([])
plt.box(False)
plt.show()
def column(matrix, i): #select i columns of array.
"""Select ith column of np.array object"""
return [row[i] for row in matrix]
# Visualize Delaunay Tessellation at all laplace_coords.
plot_connected_delaunay(laplace_coords)
def find_neighbors(pindex, triang): #pindex = point index.
"""Finds direct neighbors of Delaunay Tessellation.
pindex: int : point index of 'laplace_coords', for which neighbors are found relative to.
triang: Delaunay object containing info on which points are vertices of connecting triangles
"""
neighbors = []
for simplex in triang.vertices:
if pindex in simplex:
neighbors.extend([simplex[i] for i in range(len(simplex)) if simplex[i] != pindex])
'''
this is a one liner for if a simplex contains the point we`re interested in,
extend the neighbors list by appending all the *other* point indices in the simplex
'''
#now we just have to strip out all the dulicate indices and return the neighbors list:
return list(set(neighbors))
def plot_direct_neighbors(image, laplace_coords, plot_examples = 6):
"""Plots Original Image with overlaid central NP's (red) and direct neighbors (yellow).
Intended to show effectiveness of identifying direct neighbors with Delaunay tessellation.
"""
#Re-instantiate Delaunay Tessellation
dela = scipy.spatial.Delaunay
triang = dela(column(laplace_coords, [0 , 1])) #select only x,y columns.
#Plot Delaunay neighbor examples:
#Select the # of examples to plot defined with 'plot_examples'
indexes = list(range(int(0.25 * len(laplace_coords)), int(0.75 * len(laplace_coords)), int(0.5 * len(laplace_coords)) // (plot_examples-1)))
for index in indexes: #else, cycle through range(len(laplace_coords)) to plot all neighbors.
center = laplace_coords[index]
neighbors = laplace_coords[find_neighbors(index, triang)]
#Plot center in red, and direct neighbors in yellow.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharex=True, sharey=True)
ax1.imshow(image, cmap = "binary_r")
ax2.imshow(image, cmap = "binary_r")
ax1.set_title("Neighbors Overlaid on LOG", fontsize = 10)
ax2.set_title("Neighbors Overlaid on Grayscale", fontsize = 10)
ax1.tick_params(axis='both', labelsize= 8)
ax2.tick_params(axis='both', labelsize= 8)
#Left Plot (ax1)
#Plot all non-neighbors in faint yellow color. (Left Plot)
laplace_no_neighbor = np.delete(laplace_coords, find_neighbors(index, triang), 0)
for blob in laplace_no_neighbor: #plot all circles
y, x, r = blob
c = plt.Circle((x, y), r, color="yellow", linewidth=1, fill=False, alpha = 0.15)
ax1.add_patch(c)
#Plot neighbors in strong yellow color. (Left Plot)
for blob in neighbors:
y, x, r = blob
c = plt.Circle((x, y), r, color="yellow", linewidth=1, fill=False)
ax1.add_patch(c)
#Plot central point in red. (Left Plot)
y2, x2, r2 = center
c2 = plt.Circle((x2, y2), r2, color="red", linewidth=1, fill=False)
ax1.add_patch(c2)
#Right Plot (ax2)
#Plot central point in red. (Right Plot)
y2, x2, r2 = center
c2 = plt.Circle((x2, y2), r2, color="red", linewidth=1, fill=False)
ax2.add_patch(c2)
#Plot direct neighbors in yellow (Right Plot)
for blob in neighbors:
y, x, r = blob
c = plt.Circle((x, y), r, color="yellow", linewidth=1, fill=False)
ax2.add_patch(c)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.show()
# Visualize Examples of identified neighbors
plot_direct_neighbors(image, laplace_coords, plot_examples = 6)
def euclidean_distance(p1,p2):
"""Calculates Euclidean distance between two points."""
x1,y1 = p1
x2,y2 = p2
return hypot(x2 - x1, y2 - y1)
def direct_neighbor_distance(laplace_coords, max_distance = 100):
"""Creates list of euclidean distances between points, only considering direct neighbors found with Delaunay tessellation.
Max_distance = 100 pixels to avoid non-representative large spacing on edge of image.
Optional scale_bar in units of: distance (um) / pixel : (Converts distance unit)"""
#re-instantiate Delaunay Tessellation
dela = scipy.spatial.Delaunay
triang = dela(column(laplace_coords, [0 , 1])) #
#Find euclidean distance to neighbors.
distances = []
for index in range(len(laplace_coords)):
center_coords = tuple(laplace_coords[index][:2])
neighbors = laplace_coords[find_neighbors(index, triang)]
#Find Euclidean distances
for neighbor in neighbors:
neighbor_coords = tuple(neighbor[:2])
neighbor_distance = euclidean_distance(center_coords, neighbor_coords)
if neighbor_distance < max_distance: #implement max_distance for edge of image examples
distances.append(neighbor_distance)
else:
continue
count = len(laplace_coords) #Total # of NP's
return distances, count
# Extract list of distances between direct neighbors only.
distances, count = direct_neighbor_distance(laplace_coords, max_distance = 100)
def plot_neighbor_histogram(distances, count, scale_bar = 1, scale_bar_unit = "Pixels"):
"""Plots histogram of euclidean distance between neighbors. Overlays Mean, Median, Std on plot.
Optional scale_bar in units of: distance (um) / pixel : Included to scale location of text on plot."""
import pandas as pd
if scale_bar:
distances = [x * scale_bar for x in distances]
if scale_bar_unit == "um":
scale_bar_unit = '\u03BCm'
#Plot
with warnings.catch_warnings(): #Ignore warnings given by 'blob_log'
warnings.simplefilter("ignore")
sns.distplot(distances, norm_hist = True)
#plt.title(str(file), fontsize=12)
plt.xlabel("Euclidean Distance / " + str(scale_bar_unit), fontsize = 14, labelpad = 10)
plt.ylabel("Density", fontsize = 14, labelpad = 10)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlim(0, 50)
plt.ylim(0, 0.25)
#Overlay mean, median, std as immediate readout. Else could output in txt.
plt.text(80 * scale_bar, 0.21 , "Scale (" + scale_bar_unit + "/Pixel)" + " = " + str(scale_bar), fontsize = 12)
plt.text(80 * scale_bar, 0.18 , "Mean = " + str(round(float(np.mean(distances)), 1)) + " " + str(scale_bar_unit), fontsize = 12)
plt.text(80 * scale_bar, 0.15 , "Median = " + str(round(float(np.median(distances)), 1)) + " " + str(scale_bar_unit), fontsize = 12)
plt.text(80 * scale_bar, 0.12 , "Stdev = " + str(round(float(np.std(distances)), 1)) + " " + str(scale_bar_unit), fontsize = 12)
plt.text(80 * scale_bar, 0.09 , "Total # NP's = " + str(count), fontsize = 12)
plt.text(80 * scale_bar, 0.06 , "Total # Distances = " + str(len(distances)), fontsize = 12)
plt.show()
# Plot histogram of neighboring Euclidean distances
plot_neighbor_histogram(distances, count, scale_bar = 0.32, scale_bar_unit = "um")
| null |
Scripts/Interparticle Distance of Direct Neighbors.py
|
Interparticle Distance of Direct Neighbors.py
|
py
| 11,082 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.rcParams",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "warnings.catch_warnings",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "warnings.simplefilter",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "skimage.color.rgb2gray",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "skimage.feature.blob_log",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Circle",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "scipy.spatial.spatial",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "scipy.spatial",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.triplot",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.box",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "scipy.spatial.spatial",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "scipy.spatial",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "numpy.delete",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.Circle",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Circle",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Circle",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Circle",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Circle",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "math.hypot",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.spatial",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "scipy.spatial",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "warnings.catch_warnings",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "warnings.simplefilter",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "numpy.median",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "numpy.std",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 314,
"usage_type": "name"
}
] |
449420169
|
import torch
from torch import nn
from torch.nn import functional as F
from model.layers import ConvNorm, LinearNorm
from hparams import hparams as hps
class Prenet(nn.Module):
def __init__(self):
super(Prenet, self).__init__()
out_sizes = hps.prenet_out_sizes
in_sizes = [hps.prenet_input_dim] + out_sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False, w_init_gain='relu') for (in_size, out_size) in zip(in_sizes, out_sizes)]
)
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), hps.prenet_dropout_p, self.training)
# print('prenet linear size : ', x.size())
return x
if __name__ == '__main__':
prenet = Prenet()
| null |
model/modules.py
|
modules.py
|
py
| 789 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "hparams.hparams.prenet_out_sizes",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "hparams.hparams",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "hparams.hparams.prenet_input_dim",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "hparams.hparams",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "model.layers.LinearNorm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.dropout",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "hparams.hparams.prenet_dropout_p",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "hparams.hparams",
"line_number": 19,
"usage_type": "name"
}
] |
535235433
|
from user_auth.models import DeviceToken
import datetime
class DeviceTokenHelper(object):
def __init__(self, username):
self.username = username
timestamp_str = datetime.datetime.now().strftime('%s')
self.datetime_now = int(timestamp_str)
def add_update_device_token(self, device_token):
try:
row = DeviceToken.objects.get(username=self.username)
row.token = device_token
row.updated_time = self.datetime_now
row.save()
except DeviceToken.DoesNotExist:
DeviceToken(
username=self.username,
token=device_token,
updated_time=self.datetime_now
).save()
def get_device_token(self):
try:
row = DeviceToken.objects.get(username=self.username)
device_token = row.token
return device_token
except DeviceToken.DoesNotExist:
return None
| null |
user_auth/functions/device_token_helper.py
|
device_token_helper.py
|
py
| 966 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "user_auth.models.DeviceToken.objects.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "user_auth.models.DeviceToken.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "user_auth.models.DeviceToken",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "user_auth.models.DeviceToken.DoesNotExist",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "user_auth.models.DeviceToken",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "user_auth.models.DeviceToken",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "user_auth.models.DeviceToken.objects.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "user_auth.models.DeviceToken.objects",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "user_auth.models.DeviceToken",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "user_auth.models.DeviceToken.DoesNotExist",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "user_auth.models.DeviceToken",
"line_number": 29,
"usage_type": "name"
}
] |
600825962
|
import numpy as np
import cv2
import time
import win32api, win32con
cap = cv2.VideoCapture(0)
time.sleep(2)
ret, frame = cap.read()
lower=[157,69,119]
upper=[179,255,255]
while(1):
ret, frame = cap.read()
frame = cv2.resize(frame,(1360, 750), interpolation = cv2.INTER_CUBIC)
frame=cv2.flip(frame,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array(lower)
upper = np.array(upper)
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
mask = cv2.medianBlur(mask,5)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 10:
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
win32api.SetCursorPos((int(x), int(y)))
cv2.circle(frame, center, 5, (0, 0, 255), -1)
cv2.imshow("Frame", frame)
if cv2.waitKey(1) == 27:
break
cap.release()
cv2.destroyAllWindows()
| null |
cursor_movement_detection.py
|
cursor_movement_detection.py
|
py
| 1,267 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.flip",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.erode",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.dilate",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.medianBlur",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.contourArea",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "cv2.minEnclosingCircle",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.moments",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "win32api.SetCursorPos",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 41,
"usage_type": "call"
}
] |
392091634
|
from flask_caching import Cache
from collections import OrderedDict
import pandas as pd
import sys
import os
from traffic_analysis.d00_utils.data_loader_s3 import DataLoaderS3
from traffic_analysis.d00_utils.data_loader_sql import DataLoaderSQL
from traffic_viz.d06_visualisation.dash_object_detection.server import server
from traffic_analysis.d00_utils.load_confs import load_app_parameters, load_parameters, load_paths, load_credentials
from traffic_analysis.d00_utils.get_project_directory import get_project_directory
project_dir = get_project_directory()
src_dir = f"{project_dir}/src"
sys.path.append(src_dir)
TIMEOUT = 60
cache = Cache(
server, config={"CACHE_TYPE": "filesystem", "CACHE_DIR": "cache-directory"}
)
app_params = load_app_parameters()
params = load_parameters()
paths = load_paths()
creds = load_credentials()
s3_credentials = creds[paths["s3_creds"]]
DEBUG = app_params["debug"]
def get_vehicle_types():
return params["selected_labels"]
def get_cams():
dl = DataLoaderS3(s3_credentials, bucket_name=paths["bucket_name"])
camera_meta_data_path = paths["s3_camera_details"]
data = dict(dl.read_json(camera_meta_data_path))
values = data.values()
cam_dict = {item["id"]: item["commonName"] for item in values}
# [{'label': item['commonName'], 'value': item['id']}
# for item in values]
cam_dict = OrderedDict(sorted(cam_dict.items(), key=lambda x: x[1]))
return cam_dict
def load_camera_statistics(camera_id):
output = pd.DataFrame()
dl = DataLoaderSQL(creds, paths)
sql = f"select * from {paths['db_video_level']} where camera_id = '{camera_id}';"
df = dl.select_from_table(sql)
if df is None:
return output
df["video_upload_datetime"] = pd.to_datetime(df.video_upload_datetime)
output = df[df.camera_id == camera_id]
return output
def load_vehicle_type_statistics(df, vehicle_type, start_date, end_date):
df_vehicle_type = df[df.vehicle_type == vehicle_type]
df_vehicle_type.sort_values("video_upload_datetime", inplace=True)
df_vehicle_type = df_vehicle_type[
((start_date <= df_vehicle_type.video_upload_datetime)
& (df_vehicle_type.video_upload_datetime <= end_date))
]
return df_vehicle_type
| null |
src/traffic_viz/d06_visualisation/dash_object_detection/helper.py
|
helper.py
|
py
| 2,254 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "traffic_analysis.d00_utils.get_project_directory.get_project_directory",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask_caching.Cache",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "traffic_viz.d06_visualisation.dash_object_detection.server.server",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "traffic_analysis.d00_utils.load_confs.load_app_parameters",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "traffic_analysis.d00_utils.load_confs.load_parameters",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "traffic_analysis.d00_utils.load_confs.load_paths",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "traffic_analysis.d00_utils.load_confs.load_credentials",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "traffic_analysis.d00_utils.data_loader_s3.DataLoaderS3",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "traffic_analysis.d00_utils.data_loader_sql.DataLoaderSQL",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 54,
"usage_type": "call"
}
] |
269344995
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import matplotlib.pyplot as plt
from matplotlib.artist import setp as setprop
from matplotlib.backends.backend_pdf import PdfPages
import datetime as dt
import matplotlib.cm as cm
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText as anctext
import config_file as sp
import master_plot as mp
"""
description
"""
__author__ = "Fanger1.Yunis"
__copyright__ = "Copyright 2012, Fanger1.Yunis"
__credits__ = ["Fanger1.Yunis"]
__license__ = "GPL"
__version__ = "$Rev$"
__status__ = "Alpha"
class ScatterPlot:
def draw_scatterplot_on_axis(self,
x=[],
y=[],
labels=[],
colors=[],
alphas=[],
exclude=None,
sizes=[],
markers=[],
edgecolors=[],
scatcmaps=[],
facecolors=[],
norms=[],
vmaxes=[],
vmins=[],
synclabels=False,
linewidthslist=[],
vertslist=[],
faceteds=[],
ymajorticks=[],
xmajorticks=[],
title='',
cmap=cm.get_cmap(),
legendloc=1,
xminorticks=(-1),
legendfontsize=9,
xtickrotation=None,
source='',
description=False,
descriptionloc=1,
valuesaspercent=(-1),
yboundlow=0,
yboundup=None,
xboundleft=None,
xboundright=None,
ax=None,
xscale=None,
yscale=None,
ymarginup=None,
ymarginlow=None,
fixedinterval=False,
xticks=10,
yticks=10,
majorticks='auto',
xtickformat=None,
minorticks=False,
weekday=(-1),
decimal_places=4,
save=False,
show=False,
shadow=True,
fancybox=True,
showxaxis=True,
showyaxis=True,
showxticklabels=True,
showyticklabels=True,
xticklabelha='right',
yticklabelha='right',
xticklabelva='top',
yticklabelva='bottom',
xlabel=None,
ylabel=None,
xgrid=False,
ygrid=False,
gridcolor='lightgrey',
xlabelrotation='horizontal',
ylabelrotation='vertical',
sourcecolor='k',
titlecolor='k',
subtitlecolor='k',
xlabelcolor='k',
ylabelcolor='k',
sourcebackgroundcolor='w',
titlebackgroundcolor='w',
subtitlebackgroundcolor='w',
xlabelbackgroundcolor='w',
ylabelbackgroundcolor='w',
ticklabelfontsize=8,
xlabelfontsize=12,
ylabelfontsize=12,
titlefontsize=13,
ystepwidth=None,
vertical_line=None,
vertical_line_color='k',
vertical_line_width=2.0,
horizontal_line=None,
horizontal_line_color='k',
horizontal_line_width=2.0,
**dump):
mp.warnigs(dump=dump, of='ScatterPlot')
pp = None
fig = None
if(save):
pp = PdfPages(sp.savepath + save + '.pdf')
fig = ax.get_figure()
fig.set_size_inches((29.7 / 2, 21.0 / 2))
x, y = mp.check_if_list(x, y)
dates = True
mp.set_scale(xscale, yscale, ax)
x, y = mp.fixedinterval(fixedinterval, x, y)
sortinglist = []
variables = [labels, x, y, colors, alphas, sizes,
markers, edgecolors, scatcmaps,
facecolors, norms, vmaxes, vmins,
linewidthslist, vertslist, faceteds]
checkifnone = mp.checkifnone
for n in range(len(x)):
sortinglist.append([checkifnone(var[0], var[1], var[2]) for var in
((labels, n, ''),
(x, n, None),
(y, n, None),
(colors, n, None),
(alphas, n, 1),
(sizes, n, 30),
(markers, n, None),
(edgecolors, n, 'k'),
(scatcmaps, n, None),
(facecolors, n, None),
(norms, n, None),
(vmaxes, n, None),
(vmins, n, None),
(linewidthslist, n, None),
(vertslist, n, None),
(faceteds, n, None))])
sortinglist = sorted(sortinglist, key=lambda sortbylabels: sortbylabels[0])
sortinglist.reverse()
for e in range(len(sortinglist)):
if sortinglist[e][0] is None:
sortinglist.insert(0, sortinglist.pop(e))
for var in range(len(variables)):
variables[var] = [sortedrow[var] for sortedrow in sortinglist]
if exclude:
x, y, labels = mp.exclude1(x, y, labels, exclude)
for i in range(len(x)):
x[i], y[i] = mp.cut_off_out_of_bound_data(x[i], y[i], xboundleft, xboundright)
if colors is not None and len(colors) > i and colors[i]is not None:
color = colors[i]
else:
color = cmap(float(i) / (len(x)))
if colors is None:
colors = [color]
elif not len(colors) > i:
colors.append(color)
elif(colors[i] is None):
colors[i] = color
if len(labels) > i:
if labels[i] is None:
label = 'None'
else:
label = labels[i]
else:
label = ''
if len(markers) > i:
marker = markers[i]
else:
marker = None
if len(norms) > i:
norm = norms[i]
else:
norm = None
if len(alphas) > i:
alpha = alphas[i]
else:
alpha = 1
if len(edgecolors) > i:
edgecolor = edgecolors[i]
else:
edgecolor = None
if len(scatcmaps) > i:
cmap2 = scatcmaps[i]
else:
cmap2 = None
if len(sizes) > i:
size = sizes[i]
else:
size = 30
if len(vmins) > i:
vmin = vmins[i]
else:
vmin = None
if len(vmaxes) > i:
vmax = vmaxes[i]
else:
vmax = None
if len(linewidthslist) > i:
linewidths = linewidthslist[i]
else:
linewidths = None
if len(faceteds) > i:
faceted = faceteds[i]
else:
faceted = None
if len(vertslist) > i:
verts = vertslist[i]
else:
verts = None
xy = zip(x[i], y[i])
xy = sorted(xy, key=lambda xyt: xyt[0])
cx = []
cy = []
for j in range(len(xy)):
cx.append(xy[j][0])
cy.append(xy[j][1])
x[i] = cx
y[i] = cy
if exclude:
x[i], y[i], label = mp.exclude2(x[i], y[i], label, exclude)
if not isinstance(x[i][0], dt.date):
dates = False
ax.scatter(x=x[i],
y=y[i],
s=size,
c=color,
marker=marker,
cmap=cmap2,
norm=norm,
vmin=vmin,
vmax=vmax,
edgecolor=edgecolor,
alpha=alpha,
linewidths=linewidths,
faceted=faceted,
verts=verts,
label=label)
highestvalue = None
lowestvalue = None
for a in range(len(y)):
for b in y[a]:
if highestvalue == None or highestvalue < b:
highestvalue = b
if lowestvalue == None or lowestvalue > b:
lowestvalue = b
if ymarginup != None:
if yboundup == None:
yboundup = ax.get_ybound()[1]
yboundup = highestvalue + ymarginup
ax.set_ybound(upper=yboundup)
if ymarginlow != None:
if yboundlow == None:
yboundlow = ax.get_ybound()[0]
yboundlow = lowestvalue - ymarginlow
ax.set_ybound(lower=yboundlow)
mp.set_bounds(yboundup, yboundlow, xboundright, xboundleft, ax)
tickinterval = mp.set_majortick_loc_form(dates, weekday, x, xticks, majorticks, ax, xtickformat)
mp.set_minortick_loc_form(dates, minorticks, majorticks, ax, xminorticks, xticks, tickinterval)
valuesaspercent = mp.check_if_valuesaspercent(valuesaspercent, y)
integer = mp.check_if_integer(y)
mp.set_yticks(yticks, ax, ystepwidth, integer)
mp.check_if_kw(dates, majorticks, ax)
fontP = mp.set_ticklabel_props(dates, ax, xticklabelha, xticklabelva, legendfontsize, yticklabelha,
yticklabelva, showxticklabels, showyticklabels, ticklabelfontsize)
mp.set_visible(showxaxis, showyaxis, ax)
mp.formatlabels(ax)
mp.valuesaspercent(valuesaspercent, ax, decimal_places)
if ymajorticks:
ax.set_yticklabels(ymajorticks)
if xmajorticks:
ax.set_xticklabels(xmajorticks)
if xtickrotation:
setprop(ax.get_xmajorticklabels(), rotation=xtickrotation, ha=xticklabelha, va=xticklabelva)
if xgrid:
if xgrid not in ['major', 'minor', 'both']:
xgrid = 'both'
ax.xaxis.grid(b=True, which=xgrid, color=gridcolor)
if ygrid:
if ygrid not in ['major', 'minor', 'both']:
ygrid = 'both'
ax.yaxis.grid(b=True, which=ygrid, color=gridcolor)
if xgrid or ygrid:
ax.set_axisbelow(True)
if xlabel:
ax.set_xlabel(unicode(xlabel),
rotation=xlabelrotation,
color=xlabelcolor,
fontsize=xlabelfontsize,
backgroundcolor=xlabelbackgroundcolor)
if ylabel:
ax.set_ylabel(unicode(ylabel),
rotation=ylabelrotation,
fontsize=ylabelfontsize,
color=ylabelcolor,
backgroundcolor=ylabelbackgroundcolor)
if(source):
ax.text(s=unicode(source),
verticalalignment='bottom',
horizontalalignment='right', fontsize=9,
color=sourcecolor, x=0.98, y=0.02,
backgroundcolor=sourcebackgroundcolor,
transform=ax.transAxes)
if (description):
at = anctext(description,
prop=dict(size=11), frameon=False,
loc=descriptionloc,
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
if(title):
plt.title(unicode(title),
color=titlecolor,
backgroundcolor=titlebackgroundcolor,
x=0.5, y=1.01,
fontsize=titlefontsize)
if vertical_line:
plt.hold
(tmp1, tmp2, vertical_line_low, vertical_line_high) = plt.axis()
for vertical_point in vertical_line:
plt.plot([vertical_point, vertical_point], [vertical_line_low, vertical_line_high],
vertical_line_color + '-', linewidth=vertical_line_width)
plt.axis([tmp1, tmp2, vertical_line_low, vertical_line_high])
if horizontal_line:
plt.hold
(tmp1, tmp2, tmp3, tmp4) = plt.axis()
for horizontal_point in horizontal_line:
plt.plot([tmp1, tmp2], [horizontal_point, horizontal_point],
horizontal_line_color + '-', linewidth=horizontal_line_width)
plt.axis([tmp1, tmp2, tmp3, tmp4])
return mp.returnvalues(labels, show, plt, save, pp, fig, ax, legendloc, xlabel, ylabel,
title, source, fontP, fancybox, shadow, synclabels, cmap)
if __name__ == '__main__':
ScatterPlot().draw_scatterplot_on_axis(x=[[5, 4, 3, 2, 1, 0], [5, 4, 3, 2, 1, 0], [5, 4, 3, 2, 1, 0]],
y=[[0.5, 0.4, 0.3, 0.2, 0.1, 0], [0.10, 0.5, 0.9, 0.4, 0.8, 0.3], [0.0, 0.4, 0.6, 0.6, 0.4, 0.0]],
ax=plt.axes(), labels=['asd', 'sad', 'dsa'], markers=['o', '^', '*'],
colors=['r', 'y', 'g'], legendloc=1, ymarginup=0.16,
vertical_line=[3.5, 2.5], horizontal_line=[0.05, 0.3],
fancybox=False, show=True)
| null |
naos-python/Library/Graphics/scatter_plot.py
|
scatter_plot.py
|
py
| 15,439 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "master_plot.warnigs",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_pdf.PdfPages",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "config_file.savepath",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "master_plot.check_if_list",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "master_plot.set_scale",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "master_plot.fixedinterval",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "master_plot.checkifnone",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "master_plot.exclude1",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "master_plot.cut_off_out_of_bound_data",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "master_plot.exclude2",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "master_plot.set_bounds",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "master_plot.set_majortick_loc_form",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "master_plot.set_minortick_loc_form",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "master_plot.check_if_valuesaspercent",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "master_plot.check_if_integer",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "master_plot.set_yticks",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "master_plot.check_if_kw",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "master_plot.set_ticklabel_props",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "master_plot.set_visible",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "master_plot.formatlabels",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "master_plot.valuesaspercent",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "matplotlib.artist.setp",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "mpl_toolkits.axes_grid.anchored_artists.AnchoredText",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 385,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 386,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 388,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 396,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 398,
"usage_type": "name"
},
{
"api_name": "master_plot.returnvalues",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 400,
"usage_type": "argument"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 406,
"usage_type": "name"
}
] |
616446930
|
from django.test.testcases import TestCase
from 試驗.書面表.書面試驗表 import 書面試驗表
class 字數試驗(TestCase):
def test_一句(self):
漢字 = '一場無練習'
臺羅 = 'Tsi̍t tiûnn bô liān-si̍p ê pí-sài'
書面 = 書面試驗表.新增一筆書面(編號='3', 文章名='一場無練習的比賽', 作者='33')
資料 = 書面.新增資料(None, 漢字=漢字, 臺羅=臺羅)
漢字字數, 臺羅字數 = 資料.字數()
self.assertEqual(漢字字數, 5)
self.assertEqual(臺羅字數, 8)
def test_換逝(self):
漢字 = '一場無練習的比賽 \n\n車'
臺羅 = 'Tsi̍t tiûnn bô liān-si̍p ê pí-sài\n\nTshia sái-kuè\n'
書面 = 書面試驗表.新增一筆書面(編號='3', 文章名='一場無練習的比賽', 作者='33')
資料 = 書面.新增資料(None, 漢字=漢字, 臺羅=臺羅)
漢字字數, 臺羅字數 = 資料.字數()
self.assertEqual(漢字字數, 9 + 2)
# -1 for rstrip tail
self.assertEqual(臺羅字數, 11 + 3 - 1)
| null |
試驗/書面表/test字數試驗.py
|
test字數試驗.py
|
py
| 1,114 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.test.testcases.TestCase",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "試驗.書面表.書面試驗表.書面試驗表.新增一筆書面",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "試驗.書面表.書面試驗表.書面試驗表",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "試驗.書面表.書面試驗表.書面試驗表.新增一筆書面",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "試驗.書面表.書面試驗表.書面試驗表",
"line_number": 19,
"usage_type": "name"
}
] |
317478873
|
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, render, redirect
# from .models import related models
from .models import CarModel
# from .restapis import related methods
# from .restapis import get_dealers_from_cf, get_dealers_by_state_from_cf, get_dealer_reviews_from_cf, post_request, get_request
from .restapis import get_dealers_from_cf, get_dealer_reviews_from_cf, post_request, get_request
from django.contrib.auth import login, logout, authenticate
from django.contrib import messages
from datetime import datetime
import logging
import json
from datetime import datetime
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
# Create an `about` view to render a static about page
def about(request):
context = {}
if request.method == "GET":
return render(request, 'djangoapp/about.html', context)
# Create a `contact` view to return a static contact page
def contact(request):
context = {}
if request.method == "GET":
return render(request, 'djangoapp/contact.html', context)
# Create a `login_request` view to handle sign in request
def login_request(request):
if request.method == "POST":
username = request.POST['usernameInput']
password = request.POST['passwordInput']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect("djangoapp:index")
# Create a `logout_request` view to handle sign out request
def logout_request(request):
if request.method == "GET":
logout(request)
return redirect("djangoapp:index")
# Create a `registration_request` view to handle sign up request
def registration_request(request):
context = {}
# If it is a GET request, just render the registration page
if request.method == 'GET':
return render(request, 'djangoapp/registration.html', context)
# If it is a POST request, then create the user and return to the main page
elif request.method == 'POST':
# Get user information from request.POST
username = request.POST['username']
password = request.POST['password']
first_name = request.POST['firstname']
last_name = request.POST['lastname']
user_exist = False
try:
# Check if user already exists
User.objects.get(username=username)
user_exist = True
except:
# If not, simply log this is a new user
logger.debug("{} is new user".format(username))
# If it is a new user
if (user_exist == False):
# Create user in auth_user table
user = User.objects.create_user(username=username, first_name=first_name,
last_name=last_name, password=password)
# Login the user and redirect to course list page
login(request, user)
return redirect("djangoapp/index.html")
else:
# context = {"ErrorMessage":"Username already exists in the system. Please register a different username."}
return render(request, 'djangoapp/registration.html', context)
# ...
# Update the `get_dealerships` view to render the index page with a list of dealerships
def get_dealerships(request):
context = {}
if request.method == "GET":
# return render(request, 'djangoapp/index.html', context)
url = "https://d480556a.us-south.apigw.appdomain.cloud/api/dealership"
#get dealers from the URL
#if state was given as a parameter then get the entries related to that 'state'
if "state" in request.GET:
print(">>>> state was found in GET request")
dealerships = get_dealer_by_state(url, state=request.GET["state"])
#else, get all dealerships
else:
print(">>> getting all dealerships")
dealerships = get_dealers_from_cf(url)
print(">>> SUCCESS, got all dealerships")
# dealer_names = ' '.join([dealer.short_name for dealer in dealerships])
# # Return a list of dealer short name
# return HttpResponse(dealer_names)
context['dealerships'] = dealerships
return render(request, 'djangoapp/index.html', context)
# Create a `get_dealer_details` view to render the reviews of a dealer
def get_dealer_details(request, dealer_id):
context = {}
if request.method == "GET":
url = "https://d480556a.us-south.apigw.appdomain.cloud/api/reviews"
reviews = get_dealer_reviews_from_cf(url, dealerId=dealer_id)
# # If response does not have error message
# if "msg" not in reviews:
# # review_text = " ".join([review.review + ": " + " \n" for review in reviews])
# review_text = " ".join([review.review + ": " + review.sentiment + " | " for review in reviews])
# return HttpResponse(review_text)
# else:
# # Return response error
# return HttpResponse(str(reviews))
print(">>>>reviews: ", reviews)
context = {
"dealer": dealer_id,
"reviews": reviews,
}
return render(request, 'djangoapp/dealer_details.html', context)
# Create a `add_review` view to submit a review
def add_review(request, dealer_id):
if request.method == "GET":
url = f"https://d480556a.us-south.apigw.appdomain.cloud/api/dealership?dealerId={dealer_id}"
context = {
"cars": CarModel.objects.all(),
"dealer": get_dealers_from_cf(url)[0],
# "dealer": get_dealers_from_cf(url, dealerId=dealer_id),
}
print(">>>>>>>>>>>>> context.dealer: ", context["dealer"])
return render(request, 'djangoapp/add_review.html', context)
if request.method == "POST":
url = "https://d480556a.us-south.apigw.appdomain.cloud/api/reviews"
totalreviews = get_request(url)
print("totalreviews['entries'] = ", totalreviews["entries"])
new_id = len(totalreviews["entries"])
print("\n\n\n\n\n len(totalReviews) = "+ str(len(totalreviews["entries"])) +"\n\n\n\n\n")
form = request.POST
review = {
"name": f"{request.user.first_name} {request.user.last_name}",
"dealership": dealer_id,
"review": form["content"],
"id":new_id,
"purchase": form.get("purchasecheck"),
}
if form.get("purchasecheck"):
review["purchasedate"] = datetime.strptime(form.get("purchasedate"), "%Y-%m-%d").isoformat()
car = CarModel.objects.get(pk=form["car"])
review["car_make"] = car.car_make.name
review["car_model"] = car.name
review["car_year"]= car.year.strftime("%Y")
json_payload = {"review":review}
response = post_request(url, json_payload, dealerId=dealer_id)
# return HttpResponse(str(response))
# else:
# return HttpResponse("Only authenticated users can submit reviews")
print(">>>>>>>>>>>>>>>>> Redirect to dealer details")
return redirect("djangoapp:dealer_details", dealer_id=dealer_id)
| null |
server/djangoapp/views.py
|
views.py
|
py
| 7,329 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.logout",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.create_user",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "restapis.get_dealers_from_cf",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "restapis.get_dealer_reviews_from_cf",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "models.CarModel.objects.all",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "models.CarModel.objects",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "models.CarModel",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "restapis.get_dealers_from_cf",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "restapis.get_request",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "models.CarModel.objects.get",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "models.CarModel.objects",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "models.CarModel",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "restapis.post_request",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 188,
"usage_type": "call"
}
] |
465221981
|
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
import mount_efs
import socket
import pytest
from mock import MagicMock
FS_ID = 'fs-deadbeef'
DEFAULT_REGION = 'us-east-1'
@pytest.fixture(autouse=True)
def setup(mocker):
mocker.patch('mount_efs.get_region', return_value=DEFAULT_REGION)
mocker.patch('socket.gethostbyname')
def _get_mock_config(dns_name_format='{fs_id}.efs.{region}.amazonaws.com'):
mock_config = MagicMock()
mock_config.get.return_value = dns_name_format
return mock_config
def test_get_dns_name(mocker):
config = _get_mock_config()
dns_name = mount_efs.get_dns_name(config, FS_ID)
assert '%s.efs.%s.amazonaws.com' % (FS_ID, DEFAULT_REGION) == dns_name
def test_get_dns_name_other_format(mocker):
config = _get_mock_config('{fs_id}.elastic-file-system.{region}.amazonaws.com')
dns_name = mount_efs.get_dns_name(config, FS_ID)
assert '%s.elastic-file-system.%s.amazonaws.com' % (FS_ID, DEFAULT_REGION) == dns_name
def test_get_dns_name_region_hardcoded(mocker):
get_region_mock = mocker.patch('mount_efs.get_region')
config = _get_mock_config('{fs_id}.elastic-file-system.us-west-2.amazonaws.com')
dns_name = mount_efs.get_dns_name(config, FS_ID)
get_region_mock.assert_not_called()
assert '%s.elastic-file-system.us-west-2.amazonaws.com' % FS_ID == dns_name
def test_get_dns_name_bad_format_wrong_specifiers(mocker):
config = _get_mock_config('{foo}.efs.{bar}')
with pytest.raises(ValueError) as ex:
mount_efs.get_dns_name(config, FS_ID)
assert 'must include' in str(ex.value)
def test_get_dns_name_bad_format_too_many_specifiers_1(mocker):
config = _get_mock_config('{fs_id}.efs.{foo}')
with pytest.raises(ValueError) as ex:
mount_efs.get_dns_name(config, FS_ID)
assert 'incorrect number' in str(ex.value)
def test_get_dns_name_bad_format_too_many_specifiers_2(mocker):
config = _get_mock_config('{fs_id}.efs.{region}.{foo}')
with pytest.raises(ValueError) as ex:
mount_efs.get_dns_name(config, FS_ID)
assert 'incorrect number' in str(ex.value)
def test_get_dns_name_unresolvable(mocker, capsys):
config = _get_mock_config()
mocker.patch('socket.gethostbyname', side_effect=socket.gaierror)
with pytest.raises(SystemExit) as ex:
mount_efs.get_dns_name(config, FS_ID)
assert 0 != ex.value.code
out, err = capsys.readouterr()
assert 'Failed to resolve' in err
| null |
test/mount_efs_test/test_get_dns_name.py
|
test_get_dns_name.py
|
py
| 2,670 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pytest.fixture",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "mount_efs.get_dns_name",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "mount_efs.get_dns_name",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "mount_efs.get_dns_name",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "mount_efs.get_dns_name",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "mount_efs.get_dns_name",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "mount_efs.get_dns_name",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "socket.gaierror",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "mount_efs.get_dns_name",
"line_number": 93,
"usage_type": "call"
}
] |
319525318
|
from sklearn.datasets import load_breast_cancer
import sklearn.preprocessing
from Ridge_Lasso_model.Ridge_Lasso import RidgeClassifier, LassoClassifier
import torch
x, y = load_breast_cancer(return_X_y=True)
x = sklearn.preprocessing.scale(x)
x_train, y_train = torch.from_numpy(x[:400]), torch.from_numpy(y[:400])
x_test, y_test = torch.from_numpy(x[400:]), torch.from_numpy(y[400:])
ridge = RidgeClassifier(alpha=10, lr=1e-4, random_state=123, max_iter=5000)
lasso = LassoClassifier(alpha=10, lr=1e-4, random_state=123, max_iter=5000)
print('*****Ridge*****')
ridge.fit(x_train, y_train)
y_pred = ridge.predict(x_test)
print((y_test == y_pred).sum().item() / len(y_pred))
print('*****Lasso*****')
lasso.fit(x_train, y_train)
y_pred = lasso.predict(x_test)
print((y_test == y_pred).sum().item() / len(y_pred))
| null |
Ridge_Lasso_model/test_classification.py
|
test_classification.py
|
py
| 843 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.datasets.load_breast_cancer",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.preprocessing.scale",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.preprocessing",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sklearn.datasets",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Ridge_Lasso_model.Ridge_Lasso.RidgeClassifier",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "Ridge_Lasso_model.Ridge_Lasso.LassoClassifier",
"line_number": 14,
"usage_type": "call"
}
] |
579590404
|
"""
rex.widget.port_support
=======================
:copyright: 2015, Prometheus Research, LLC
"""
from contextlib import contextmanager
from werkzeug.local import LocalStack
from rex.port import Port
__all__ = ('PortSupport',)
_stack = LocalStack()
_default_parameters = {}
def get_parameters():
parameters = _stack.top
return parameters if parameters is not None else _default_parameters
@contextmanager
def set_parameters(parameters):
try:
_stack.push(parameters)
yield
finally:
_stack.pop()
class PortSupport(object):
def __init__(self):
super(PortSupport, self).__init__()
self.port_parameters = get_parameters()
@staticmethod
def parameters(*args, **kwargs):
parameters = {}
for arg in args:
parameters.update(arg)
parameters.update(kwargs)
return set_parameters(parameters)
def create_port(self, port):
parameters = [{'parameter': parameter, 'default': default}
for parameter, default
in list(self.port_parameters.items())]
return Port(parameters + [port])
| null |
src/rex.widget/src/rex/widget/port_support.py
|
port_support.py
|
py
| 1,168 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "werkzeug.local.LocalStack",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "rex.port.Port",
"line_number": 53,
"usage_type": "call"
}
] |
642226255
|
#!/usr/bin/env python3
# Copyright 2017 SchedMD LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import sys
import shutil
import time
from pathlib import Path
from subprocess import DEVNULL
from functools import reduce, partialmethod
from concurrent.futures import ThreadPoolExecutor
import googleapiclient.discovery
import requests
import yaml
# get util.py from metadata
UTIL_FILE = Path('/tmp/util.py')
try:
resp = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/attributes/util-script',
headers={'Metadata-Flavor': 'Google'})
resp.raise_for_status()
UTIL_FILE.write_text(resp.text)
except requests.exceptions.RequestException:
print("util.py script not found in metadata")
if not UTIL_FILE.exists():
print(f"{UTIL_FILE} also does not exist, aborting")
sys.exit(1)
spec = importlib.util.spec_from_file_location('util', UTIL_FILE)
util = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = util
spec.loader.exec_module(util)
cd = util.cd # import util.cd into local namespace
NSDict = util.NSDict
Path.mkdirp = partialmethod(Path.mkdir, parents=True, exist_ok=True)
util.config_root_logger(logfile='/tmp/setup.log')
log = logging.getLogger(Path(__file__).name)
sys.excepthook = util.handle_exception
# get setup config from metadata
config_yaml = yaml.safe_load(util.get_metadata('attributes/config'))
cfg = util.Config.new_config(config_yaml)
# load all directories as Paths into a dict-like namespace
dirs = NSDict({n: Path(p) for n, p in dict.items({
'home': '/home',
'apps': '/apps',
'scripts': '/slurm/scripts',
'prefix': '/usr/local',
'secdisk': '/mnt/disks/sec',
'apps_sec': '/mnt/disks/sec/apps',
})})
RESUME_TIMEOUT = 300
SUSPEND_TIMEOUT = 300
CONTROL_MACHINE = cfg.cluster_name + '-controller'
def start_motd():
""" advise in motd that slurm is currently configuring """
msg = """
*** Slurm is currently being configured in the background. ***
"""
Path('/etc/motd').write_text(msg)
# END start_motd()
def end_motd(broadcast=True):
""" modify motd to signal that setup is complete """
Path('/etc/motd').write_text("")
if not broadcast:
return
util.run("wall -n '*** Slurm {} setup complete ***'"
.format(cfg.instance_type))
if cfg.instance_type != 'controller':
util.run("""wall -n '
/home on the controller was mounted over the existing /home.
Log back in to ensure your home directory is correct.
'""")
# END start_motd()
def expand_instance_templates():
""" Expand instance template into instance_defs """
compute = googleapiclient.discovery.build('compute', 'v1',
cache_discovery=False)
for pid, instance_def in cfg.instance_defs.items():
if (instance_def.instance_template and
(not instance_def.machine_type or not instance_def.gpu_count)):
template_resp = util.ensure_execute(
compute.instanceTemplates().get(
project=cfg.project,
instanceTemplate=instance_def.instance_template))
if template_resp:
template_props = template_resp['properties']
if not instance_def.machine_type:
instance_def.machine_type = template_props['machineType']
if (not instance_def.gpu_count and
'guestAccelerators' in template_props):
accel_props = template_props['guestAccelerators'][0]
instance_def.gpu_count = accel_props['acceleratorCount']
instance_def.gpu_type = accel_props['acceleratorType']
# END expand_instance_templates()
def expand_machine_type():
""" get machine type specs from api """
machines = {}
compute = googleapiclient.discovery.build('compute', 'v1',
cache_discovery=False)
for pid, part in cfg.instance_defs.items():
machine = {'cpus': 1, 'memory': 1}
machines[pid] = machine
if not part.machine_type:
log.error("No machine type to get configuration from")
continue
type_resp = None
if part.regional_capacity:
filter = f"(zone={part.region}-*) AND (name={part.machine_type})"
list_resp = util.ensure_execute(
compute.machineTypes().aggregatedList(
project=cfg.project, filter=filter))
if 'items' in list_resp:
zone_types = list_resp['items']
for k, v in zone_types.items():
if part.region in k and 'machineTypes' in v:
type_resp = v['machineTypes'][0]
break
else:
type_resp = util.ensure_execute(
compute.machineTypes().get(
project=cfg.project, zone=part.zone,
machineType=part.machine_type))
if type_resp:
cpus = type_resp['guestCpus']
machine['cpus'] = cpus // (1 if part.image_hyperthreads else 2)
# Because the actual memory on the host will be different than
# what is configured (e.g. kernel will take it). From
# experiments, about 16 MB per GB are used (plus about 400 MB
# buffer for the first couple of GB's. Using 30 MB to be safe.
gb = type_resp['memoryMb'] // 1024
machine['memory'] = type_resp['memoryMb'] - (400 + (gb * 30))
return machines
# END expand_machine_type()
def install_meta_files():
""" save config.yaml and download all scripts from metadata """
Path(dirs.scripts).mkdirp()
cfg.save_config(dirs.scripts/'config.yaml')
meta_entries = [
('util.py', 'util-script'),
('setup.py', 'setup-script'),
('startup.sh', 'startup-script'),
('custom-compute-install', 'custom-compute-install'),
('custom-controller-install', 'custom-controller-install'),
]
def install_metafile(filename, metaname):
text = util.get_metadata('attributes/' + metaname)
if not text:
return
path = dirs.scripts/filename
path.write_text(text)
path.chmod(0o755)
with ThreadPoolExecutor() as exe:
exe.map(lambda x: install_metafile(*x), meta_entries)
# END install_meta_files()
def prepare_network_mounts(hostname, instance_type):
""" Prepare separate lists of cluster-internal and external mounts for the
given host instance, returning (external_mounts, internal_mounts)
"""
log.info("Set up network storage")
default_mounts = (
dirs.home,
dirs.apps,
dirs.apps_sec,
)
# create dict of mounts, local_mount: mount_info
CONTROL_NFS = {
'server_ip': CONTROL_MACHINE,
'remote_mount': 'none',
'local_mount': 'none',
'fs_type': 'nfs',
'mount_options': 'defaults,hard,intr',
}
# seed the non-controller mounts with the default controller mounts
mounts = {
path: util.Config(CONTROL_NFS, local_mount=path, remote_mount=path)
for path in default_mounts
}
# convert network_storage list of mounts to dict of mounts,
# local_mount as key
def listtodict(mountlist):
return {Path(d['local_mount']).resolve(): d for d in mountlist}
# On non-controller instances, entries in network_storage could overwrite
# default exports from the controller. Be careful, of course
mounts.update(listtodict(cfg.network_storage))
if instance_type == 'compute':
pid = util.get_pid(hostname)
mounts.update(listtodict(cfg.instance_defs[pid].network_storage))
else:
# login_network_storage is mounted on controller and login instances
mounts.update(listtodict(cfg.login_network_storage))
# filter mounts into two dicts, cluster-internal and external mounts, and
# return both. (external_mounts, internal_mounts)
def internal_mount(mount):
return mount[1].server_ip == CONTROL_MACHINE
def partition(pred, coll):
""" filter into 2 lists based on pred returning True or False
returns ([False], [True])
"""
return reduce(
lambda acc, el: acc[pred(el)].append(el) or acc,
coll, ([], [])
)
return tuple(map(dict, partition(internal_mount, mounts.items())))
# END prepare_network_mounts
def setup_network_storage():
""" prepare network fs mounts and add them to fstab """
global mounts
ext_mounts, int_mounts = prepare_network_mounts(cfg.hostname,
cfg.instance_type)
mounts = ext_mounts
if cfg.instance_type != 'controller':
mounts.update(int_mounts)
# Determine fstab entries and write them out
fstab_entries = []
for local_mount, mount in mounts.items():
remote_mount = mount.remote_mount
fs_type = mount.fs_type
server_ip = mount.server_ip
# do not mount controller mounts to itself
if server_ip == CONTROL_MACHINE and cfg.instance_type == 'controller':
continue
log.info("Setting up mount ({}) {}{} to {}".format(
fs_type, server_ip+':' if fs_type != 'gcsfuse' else "",
remote_mount, local_mount))
local_mount.mkdirp()
mount_options = (mount.mount_options.split(',') if mount.mount_options
else [])
if not mount_options or '_netdev' not in mount_options:
mount_options += ['_netdev']
if fs_type == 'gcsfuse':
if 'nonempty' not in mount_options:
mount_options += ['nonempty']
fstab_entries.append(
"{0} {1} {2} {3} 0 0"
.format(remote_mount, local_mount, fs_type,
','.join(mount_options)))
else:
remote_mount = Path(remote_mount).resolve()
fstab_entries.append(
"{0}:{1} {2} {3} {4} 0 0"
.format(server_ip, remote_mount, local_mount,
fs_type, ','.join(mount_options)))
for mount in mounts:
Path(mount).mkdirp()
with open('/etc/fstab', 'a') as f:
f.write('\n')
for entry in fstab_entries:
f.write(entry)
f.write('\n')
# END setup_network_storage()
def mount_fstab():
""" Wait on each mount, then make sure all fstab is mounted """
global mounts
def mount_path(path):
while not os.path.ismount(path):
log.info(f"Waiting for {path} to be mounted")
util.run(f"mount {path}", wait=5)
with ThreadPoolExecutor() as exe:
exe.map(mount_path, mounts.keys())
util.run("mount -a", wait=1)
# END mount_external
def setup_nfs_exports():
""" nfs export all needed directories """
# The controller only needs to set up exports for cluster-internal mounts
# switch the key to remote mount path since that is what needs exporting
_, con_mounts = prepare_network_mounts(cfg.hostname, cfg.instance_type)
con_mounts = {m.remote_mount: m for m in con_mounts.values()}
for pid, _ in cfg.instance_defs.items():
# get internal mounts for each partition by calling
# prepare_network_mounts as from a node in each partition
_, part_mounts = prepare_network_mounts(f'{pid}-n', 'compute')
part_mounts = {m.remote_mount: m for m in part_mounts.values()}
con_mounts.update(part_mounts)
# export path if corresponding selector boolean is True
exports = []
for path in con_mounts:
Path(path).mkdirp()
util.run(rf"sed -i '\#{path}#d' /etc/exports")
exports.append(f"{path} *(rw,no_subtree_check,no_root_squash)")
exportsd = Path('/etc/exports.d')
exportsd.mkdirp()
with (exportsd/'cluster.exports').open('w') as f:
f.write('\n')
f.write('\n'.join(exports))
util.run("exportfs -a")
# END setup_nfs_exports()
def setup_secondary_disks():
""" Format and mount secondary disk """
util.run(
"sudo mkfs.ext4 -m 0 -F -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/sdb")
Path(dirs.secdisk).mkdirp()
with open('/etc/fstab', 'a') as f:
f.write(
"\n/dev/sdb {0} ext4 discard,defaults,nofail 0 2"
.format(dirs.secdisk))
# END setup_secondary_disks()
def setup_controller():
""" Run controller setup """
expand_instance_templates()
if cfg.controller_secondary_disk:
setup_secondary_disks()
setup_network_storage()
mount_fstab()
try:
util.run(str(dirs.scripts/'custom-controller-install'))
except Exception:
# Ignore blank files with no shell magic.
pass
# Export at the end to signal that everything is up
util.run("systemctl enable nfs-server")
util.run("systemctl start nfs-server")
setup_nfs_exports()
log.info("Done setting up controller")
pass
def setup_login():
""" run login node setup """
setup_network_storage()
mount_fstab()
try:
util.run(str(dirs.scripts/'custom-compute-install'))
except Exception:
# Ignore blank files with no shell magic.
pass
log.info("Done setting up login")
def setup_compute():
""" run compute node setup """
setup_network_storage()
mount_fstab()
pid = util.get_pid(cfg.hostname)
if cfg.instance_defs[pid].gpu_count:
retries = n = 50
while util.run("nvidia-smi").returncode != 0 and n > 0:
n -= 1
log.info(f"Nvidia driver not yet loaded, try {retries-n}")
time.sleep(5)
try:
util.run(str(dirs.scripts/'custom-compute-install'))
except Exception:
# Ignore blank files with no shell magic.
pass
log.info("Done setting up compute")
def main():
start_motd()
install_meta_files()
# call the setup function for the instance type
setup = dict.get(
{
'controller': setup_controller,
'compute': setup_compute,
'login': setup_login
},
cfg.instance_type,
lambda: log.fatal(f"Unknown instance type: {cfg.instance_type}")
)
setup()
end_motd()
# END main()
if __name__ == '__main__':
main()
| null |
dm/scripts/setup.py
|
setup.py
|
py
| 15,000 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "importlib.util.spec_from_file_location",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "importlib.util",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "importlib.util.module_from_spec",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "importlib.util",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "sys.modules",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path.mkdirp",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "functools.partialmethod",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.mkdir",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sys.excepthook",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "yaml.safe_load",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "googleapiclient.discovery.discovery.build",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "googleapiclient.discovery.discovery",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "googleapiclient.discovery",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "googleapiclient.discovery.discovery.build",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "googleapiclient.discovery.discovery",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "googleapiclient.discovery",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "os.path.ismount",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 428,
"usage_type": "call"
}
] |
78156344
|
from nltk.corpus import names
from nltk import NaiveBayesClassifier
from nltk import classify
from nltk import MaxentClassifier
import random
names = ([(name, 'male') for name in names.words('male.txt')] + [(name, 'female') for name in names.words('female.txt')])
random.shuffle(names)
print(names[0:10])
def gender_features(word):
return {'last_letter': word[-1]}
print(gender_features('Gary'))
featuresets = [(gender_features(n), g) for (n, g) in names]
print(featuresets[0:10])
train_set, test_set = featuresets[500:], featuresets[:500]
print(len(train_set), len(test_set))
nb_classifier = NaiveBayesClassifier.train(train_set)
print(nb_classifier.classify(gender_features('Gary')))
print(nb_classifier.classify(gender_features('Grace')))
print(classify.accuracy(nb_classifier, test_set))
nb_classifier.show_most_informative_features(5)
me_classifier = MaxentClassifier.train(train_set)
me_classifier.classify(gender_features('Gary'))
me_classifier.classify(gender_features('Grace'))
print(classify.accuracy(me_classifier, test_set))
me_classifier.show_most_informative_features(5)
def gender_features2(name):
features = {}
features["firstletter"] = name[0].lower()
features["lastletter"] = name[-1].lower()
for letter in 'abcdefghijklmnopqrstuvwxyz':
features["count(%s)" % letter] = name.lower().count(letter)
features["has(%s)" % letter] = (letter in name.lower())
return features
print(gender_features2('Gary'))
featuresets = [(gender_features2(n), g) for (n, g) in names]
train_set, test_set = featuresets[500:], featuresets[:500]
nb2_classifier = NaiveBayesClassifier.train(train_set)
classify.accuracy(nb2_classifier, test_set)
me2_classifier = MaxentClassifier.train(train_set)
print(classify.accuracy(me2_classifier, test_set))
def gender_features3(name):
features = {}
features["fl"] = name[0].lower()
features["ll"] = name[-1].lower()
features["fw"] = name[:2].lower()
features["lw"] = name[-2:].lower()
return features
print(gender_features3('Gary'))
featuresets = [(gender_features3(n), g) for (n, g) in names]
train_set, test_set = featuresets[500:], featuresets[:500]
nb3_classifier = NaiveBayesClassifier.train(train_set)
print(classify.accuracy(nb3_classifier, test_set))
me3_classifier = MaxentClassifier.train(train_set)
print(classify.accuracy(me3_classifier, test_set))
| null |
nltk/practice/gender_identification_classifier.py
|
gender_identification_classifier.py
|
py
| 2,378 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "nltk.corpus.names",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.names.words",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.names",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "nltk.corpus.names",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.names",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "nltk.NaiveBayesClassifier.train",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "nltk.NaiveBayesClassifier",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "nltk.classify.accuracy",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "nltk.classify",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "nltk.MaxentClassifier.train",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "nltk.MaxentClassifier",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "nltk.classify.accuracy",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "nltk.classify",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.names",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "nltk.NaiveBayesClassifier.train",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "nltk.NaiveBayesClassifier",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "nltk.classify.accuracy",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "nltk.classify",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "nltk.MaxentClassifier.train",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "nltk.MaxentClassifier",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "nltk.classify.accuracy",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "nltk.classify",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.names",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "nltk.NaiveBayesClassifier.train",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "nltk.NaiveBayesClassifier",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "nltk.classify.accuracy",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "nltk.classify",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "nltk.MaxentClassifier.train",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "nltk.MaxentClassifier",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "nltk.classify.accuracy",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "nltk.classify",
"line_number": 75,
"usage_type": "name"
}
] |
49160889
|
from collections import OrderedDict
import inspect
import numpy as np
import os
from Assignment6.Code import kDataPath, report_path
from model.NeuralNetworkModel import NeuralNetwork
from model.RandomForestsModel import RandomForestModel
from utils.Assignment4Support import draw_accuracies
from utils.Assignment5Support import Featurize, TrainTestSplit, LoadRawData
from utils.EvaluationsStub import Evaluation
prob2_report_path = os.path.join(report_path, 'prob2')
def compare_roc_curves_nn_rf(xTrainRaw, yTrainRaw,
xTestRaw, yTestRaw,
config, thresholds,
num_hidden_layer=2,
num_nodes=15,
step_size=0.08,
iterations=200):
graphs = []
legends = []
(xTrain, xTest) = Featurize(xTrainRaw, xTestRaw,
includeGradients=False,
includeRawPixels=False,
includeIntensities=True)
yTrain = yTrainRaw
yTest = yTestRaw
print("Running RandomForest")
legends.append("RandomForest")
model = RandomForestModel(numTrees=config['num_trees'],
bagging_w_replacement=config['bagging_w_replacement'])
model.fit(xTrain, yTrain, min_to_split=config['min_to_split'])
yTestPredicted_prob = model.predict_probabilities(xTest)
cont_length_fpr_fnr = []
for threshold in thresholds:
yTestPredicted = [int(p >= threshold) for p in yTestPredicted_prob]
# featured = [int(y < threshold) for y in yTestPredicted]
ev = Evaluation(yTest, yTestPredicted)
# if threshold == 0:
# assert (ev.fpr == 0.0), ev
print(ev)
cont_length_fpr_fnr.append((ev.fpr, ev.fnr))
graphs.append(cont_length_fpr_fnr)
#
print("NeuralNetwork")
legends.append("NeuralNetwork")
xTrains = np.array([[1] + x for x in xTrain])
xTests = np.array([[1] + x for x in xTest])
yTrains = np.array([[y] for y in yTrainRaw])
yTests = np.array([[y] for y in yTestRaw])
case = (num_hidden_layer, num_nodes)
NN = NeuralNetwork(xTrains, yTrains,
num_hidden_layer=num_hidden_layer,
num_nodes=num_nodes,
step_size=step_size)
predictions = np.zeros(yTrains.shape)
for i in range(iterations):
# outputs = NN.feedforward()
# loss = np.mean(np.square(yTrains - outputs))
# training_loss_data[case].append((i, loss))
# if i % 50 == 0: # mean sum squared loss
# print("Case: " + str(case) + " Loss: \n" + str(loss))
# print("\n")
# NN.train(xTrains, yTrains)
predictions = NN.predict()
loss = np.mean(np.square(yTrains - predictions))
predictions = NN.predict(xTests)
# test_loss = np.mean(np.square(yTests - predictions))
test_loss = np.sum(np.square(yTests - predictions)) / 2.
test_ev = Evaluation([x[0] for x in yTests], [1 if x[0] >= 0.5 else 0 for x in predictions])
if i % 10 == 0:
print("Loss: " + str(loss)) # mean sum squared loss
print("Test Loss: " + str(test_loss))
print("Accuracy: {}".format(test_ev.accuracy))
NN.train()
yTestPredicted_prob = [x[0] for x in predictions]
cont_length_fpr_fnr = []
for threshold in thresholds:
yTestPredicted = [int(p >= threshold) for p in yTestPredicted_prob]
# featured = [int(y < threshold) for y in yTestPredicted]
ev = Evaluation(yTest, yTestPredicted)
# if threshold == 0:
# assert (ev.fpr == 0.0), ev
print(ev)
cont_length_fpr_fnr.append((ev.fpr, ev.fnr))
graphs.append(cont_length_fpr_fnr)
# plotting
start = thresholds[0]
end = thresholds[-1]
step = thresholds[1] - thresholds[0]
fname = 'prob2{}_{}_{}_{}.png'.format(inspect.stack()[0][3], start, end, step)
img_fname = os.path.join(prob2_report_path, fname)
draw_accuracies(graphs,
'False Positive Rate', 'False Negative Rate',
'ROC Curve Comparision',
img_fname,
legends=legends,
invert_yaxis=True,
data_pt='-',
title_y=1.05)
if __name__ == '__main__':
(xRaw, yRaw) = LoadRawData(kDataPath, includeLeftEye=True, includeRightEye=True)
(xTrainRaw, yTrainRaw, xTestRaw, yTestRaw) = TrainTestSplit(xRaw, yRaw, percentTest=.25)
config = {'min_to_split': 2,
'bagging_w_replacement': True,
'num_trees': 40,
'feature_restriction': 0}
start = 0
end = 1
N = 100
thresholds = [x / N for x in range(N + 1)]
compare_roc_curves_nn_rf(xTrainRaw, yTrainRaw,
xTestRaw, yTestRaw,
config, thresholds,
num_hidden_layer=2,
num_nodes=15,
step_size=0.08,
iterations=200)
| null |
Assignment8/Code/assignments/previous/hw6/prob2_roc_curves.py
|
prob2_roc_curves.py
|
py
| 5,173 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "Assignment6.Code.report_path",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "utils.Assignment5Support.Featurize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "model.NeuralNetworkModel",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "model.RandomForestsModel.RandomForestModel",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "model.NeuralNetworkModel.fit",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "model.NeuralNetworkModel",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "model.NeuralNetworkModel.predict_probabilities",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "model.NeuralNetworkModel",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "utils.EvaluationsStub.Evaluation",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "model.NeuralNetworkModel.NeuralNetwork",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "utils.EvaluationsStub.Evaluation",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "utils.EvaluationsStub.Evaluation",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "inspect.stack",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "utils.Assignment4Support.draw_accuracies",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "utils.Assignment5Support.LoadRawData",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "Assignment6.Code.kDataPath",
"line_number": 120,
"usage_type": "argument"
},
{
"api_name": "utils.Assignment5Support.TrainTestSplit",
"line_number": 121,
"usage_type": "call"
}
] |
308717504
|
import os
import numpy as np
from glob import glob
import random
# image data
import SimpleITK as sitk
from ops.preprocessing import Preprocessing
# Learning
import torch
import torch.nn as nn
from ops.data_loader import Create_Batch
from train import training
from validation import validation
from test import testing
from ops.util import init_model
import argparse
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_idx",type=int,default=0)
parser.add_argument("--n_epoch",type=int,default=100)
parser.add_argument("--patch_size",type=int,default=64)
parser.add_argument("--n_patch",type=int,default=10000000)
parser.add_argument("--batch_size",type=int,default=3072)
parser.add_argument("--root",type=str,default='/mnt/disk1/data/MRI_Data/')
parser.add_argument("--data_name",type=str,default='YS')
parser.add_argument("--n_class",type=int,default=2)
parser.add_argument("--n_mode",type=int,default=2)
parser.add_argument("--volume_size",type=int,default=512)
parser.add_argument("--learning_rate",type=float,default=0.0002)
parser.add_argument("--tr_dim",type=int,default=2)
parser.add_argument("--tr_bl",type=int,default=1)
args = parser.parse_args()
use_gpu = '{},{}'.format(args.gpu_idx,args.gpu_idx+1)
os.environ["CUDA_VISIBLE_DEVICES"]=use_gpu
n_channel = 1
out_dim = 2
n4b = False # Whether to use or not N4 bias correction image
n4b_apply = True # Perform N4 bias correction (if not is_exist corrected image: do this)
print('----------------------------------------------')
print(args)
print('----------------------------------------------')
# Init models
models, model_path = init_model(args, n4b)
# Init optimizer, loss function
optimizer = torch.optim.Adam(models[2].parameters(), lr=args.learning_rate) # classifier optimizer
loss_func = nn.BCEWithLogitsLoss().cuda()
# Preprocessing
pp = Preprocessing(args, n4b, n4b_apply)
p_path, all_len = pp.preprocess()
if args.tr_bl == 1 and args.data_name != 'YS':
# Create data batch
tr_bc = Create_Batch(args.batch_size, int(args.patch_size/2), args.n_mode-1, p_path+'/train')
tr_batch = tr_bc.db_load()
val_path = glob(p_path+'/validation/**')
val_batch = []
for path in val_path:
val_bc = Create_Batch(args.batch_size, int(args.patch_size/2), args.n_mode-1, path)
val_batch.append(val_bc.db_load())
# Training & Validation
cnt = 1
for ep in range(args.n_epoch):
# Training
models, cnt = training(args, tr_batch, models, loss_func, optimizer, cnt, model_path)
# Training
validation(args, tr_batch, models, ep)
# Validation
for b in val_batch:
validation(args, b, models, ep)
else:
# Real MR data test (Segmentation)
if args.data_name == 'YS':
test_bc = Create_Batch(args.batch_size, int(args.patch_size/2), args.n_mode-1, p_path+'/test_ys/0')
test_batch = test_bc.db_load()
testing(args, test_batch, models, 0)
# MICCAI MR data test (Segmentation)
else:
val_path = glob(p_path+'/validation/**')
val_batch = []
for path in val_path:
val_bc = Create_Batch(args.batch_size, int(args.patch_size/2), args.n_mode-1, path)
val_batch.append(val_bc.db_load())
idx = 2
for b in val_batch:
testing(args, b, models, idx)
idx += 1
| null |
main.py
|
main.py
|
py
| 3,408 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "ops.util.init_model",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "ops.preprocessing.Preprocessing",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "ops.data_loader.Create_Batch",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "ops.data_loader.Create_Batch",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "train.training",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "validation.validation",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "validation.validation",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "ops.data_loader.Create_Batch",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "test.testing",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "ops.data_loader.Create_Batch",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "test.testing",
"line_number": 113,
"usage_type": "call"
}
] |
534070130
|
# -*- coding:utf-8 -*-
from .sortbase import ConditionWithSort
import psutil
class FreeSpaceCondition(ConditionWithSort):
def __init__(self, settings):
ConditionWithSort.__init__(self, settings['action'])
self._min = settings['min'] * 1073741824 # limit = limit * 1GiB
self._path = settings['path']
def apply(self, torrents):
torrents = list(torrents)
ConditionWithSort.sort_torrents(self, torrents)
_, _, free_space, _ = psutil.disk_usage(self._path)
for torrent in torrents:
if free_space < self._min:
free_space += torrent.size
self.remove.add(torrent)
else:
self.remain.add(torrent)
| null |
autoremovetorrents/condition/freespace.py
|
freespace.py
|
py
| 728 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sortbase.ConditionWithSort",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sortbase.ConditionWithSort.__init__",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sortbase.ConditionWithSort",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sortbase.ConditionWithSort.sort_torrents",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sortbase.ConditionWithSort",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "psutil.disk_usage",
"line_number": 16,
"usage_type": "call"
}
] |
90738847
|
import os
import time
import shutil
from uuid import getnode
from abc import ABC, abstractmethod
from recording.src.coding_framework.BDAConfigParser import g_config
from recording.src.speech_recognition.BDAVideoAudioUtilities import BDAVideoToAudio
from google.cloud import speech
from google.cloud import storage
from google.cloud.speech import types
# Class hierarchy
# ---------------
# +---------------------+
# | GoogleStorageBase |
# +----------+----------+
# ^
# |
# |
# |
# +-----------------------------+-----------------------------+
# | |
# +---------------+--------------------+ +-------------------+------------------+
# | BDAGoogleStorageUploadAbstractBase | | BDAGoogleStorageUploadAbstractBase |
# +---------------+--------------------+ +--------------------------------------+
# ^
# |
# |
# +-------------------+-------------------+
# | |
# +-------------+-------------------+ +---------------+-----------+
# | BDAGoogleStorageConvertUpload | | BDAGoogleStorageConsume |
# +---------------------------------+ +---------------------------+
#
class GoogleStorageBase(ABC):
def __init__(self):
"""
Constructor
:param path_to_file: Absolute path to video file. Tested on MP4 video format.
"""
super().__init__()
self.__load_env_variables()
self.__ensure_bucket_availability()
def __load_env_variables(self):
"""
Google Speech Recognition requires a 'GOOGLE_APPLICATION_CREDENTIALS' environment variable, pointing to the
JSON file containing the relevant credentials.
In case the mentioned environment variable is not present, this method creates it by extracting the relevant
information from the main config file through BDAConfigParser.
:return: None
"""
google_speech_env_var = g_config.get_value('SpeechRecognition', 'GoogleCredentials_EnvVarName')
if os.environ.get(google_speech_env_var) is None:
os.environ[google_speech_env_var] = g_config.get_value('SpeechRecognition',
'GoogleCredentials_FilePath')
def __ensure_bucket_availability(self):
"""
Ensures the relevant bucket is available on Google Storage.
:return: None
"""
bucket_name = g_config.get_value('SpeechRecognition', 'GoogleStorage_BucketName')
if bucket_name is None:
raise ValueError('Key not found in config file: SpeechRecognition::GoogleStorage_BucketName')
storage_client = storage.Client()
if storage_client.lookup_bucket(bucket_name) is None:
# Create the new bucket
storage_client.create_bucket(bucket_name)
class BDAGoogleStorageUploadAbstractBase(GoogleStorageBase):
####################
# Private members
__session_id = None
__bucket_name = None
__blob_path = None
####################
# Protected members
_path_to_file = None
_working_dir = None
####################
def __init__(self, path_to_file):
"""
Constructor
:param path_to_file: Absolute path to video file. Tested on MP4 video format.
"""
super().__init__()
self.__initialization(path_to_file)
def __del__(self):
"""
Destructor
"""
self.__clean_up()
@abstractmethod
def upload_file(self):
pass
def _upload_blob(self, path_to_file):
"""
Uploads the audio file to the appropriate bucket on Google Storage.
:return: None
"""
storage_client = storage.Client()
bucket = storage_client.get_bucket(self.__bucket_name)
blob = bucket.blob(self.__blob_path)
print("Beginning Upload on [" + path_to_file + "]")
blob.upload_from_filename(path_to_file)
print("Video Uploaded!")
return self.__bucket_name, self.__blob_path
def __initialization(self, path_to_file):
"""
Validation, clean-up and initialization
:param path_to_file: Absolute path to video file.
:return: None
:raises ValueError in case validation fails.
"""
if not os.path.isfile(path_to_file):
raise ValueError('File not found.')
idx_last_slash = path_to_file.rfind('/')
if idx_last_slash < 0:
raise ValueError('Invalid file path specified: %s' % path_to_file)
self.__bucket_name = g_config.get_value('SpeechRecognition', 'GoogleStorage_BucketName')
if self.__bucket_name is None:
raise ValueError('Key not found in config file: SpeechRecognition::GoogleStorage_BucketName')
self.__clean_up()
self._path_to_file = path_to_file
# Session ID --> MACAddress_TimestampInMS
self.__session_id = str(getnode()) + '_' + str(int(time.time() * 1000))
self.__blob_path = self.__session_id + '/audioFile.flac'
self._working_dir = path_to_file[: idx_last_slash + 1] + self.__session_id + '/'
if not os.path.exists(self._working_dir):
os.makedirs(self._working_dir)
def __clean_up(self):
"""
Delete working with its contents.
:return: None
"""
if self._working_dir is not None and os.path.isdir(self._working_dir):
shutil.rmtree(self._working_dir)
class BDAGoogleStorageUpload(BDAGoogleStorageUploadAbstractBase):
def __init__(self, path_to_file):
"""
Constructor
:param path_to_file: Absolute path to file.
"""
super().__init__(path_to_file)
def upload_file(self):
"""
Upload file to Google Storage platform, without FLAC file format validation.
:return: (bucket_name, blob_path)
"""
return self._upload_blob(self._path_to_file)
def upload_file_debug(self):
"""
Upload file to Google Storage platform, with FLAC file format validation.
:return: (bucket_name, blob_path)
"""
is_valid, result = BDAVideoToAudio.verify_audio_flac_format(self._path_to_file)
if is_valid is False:
raise ValueError('Input audio file does not meet the FLAC format requirements.')
return self._upload_blob(self._path_to_file)
class BDAGoogleStorageConvertUpload(BDAGoogleStorageUploadAbstractBase):
def __init__(self, path_to_file):
"""
Constructor
:param path_to_file: Absolute path to file.
"""
super().__init__(path_to_file)
def upload_file(self):
"""
Convert media file to FLAC and upload to Google Storage platform.
:return: (bucket_name, blob_path)
"""
path_to_flac_file = BDAVideoToAudio.video_to_audio(self._path_to_file, self._working_dir, 48000, 'flac')
return self._upload_blob(path_to_flac_file)
class BDAGoogleStorageConsume(GoogleStorageBase):
def __init__(self):
"""
Constructor
"""
super().__init__()
def transcribe_file(self, bucket_name, blob_path):
"""
Transcribe file on Google Storage. Deletes file when transcription is complete.
:param bucket_name: Bucket name on Google Storage
:param blob_path: Path to FLAC file on Google Storage
:return: Transcribed string
"""
client = speech.SpeechClient()
audio = types.RecognitionAudio(uri='gs://' + bucket_name + '/' + blob_path)
config = types.RecognitionConfig(
encoding="FLAC",
sample_rate_hertz=48000,
language_code='en-US')
operation = client.long_running_recognize(config, audio)
transcription_results = operation.result()
self.__delete_blob(bucket_name, blob_path)
transcribed_result = ''
for idx in range(len(transcription_results.results)):
if len(transcription_results.results[idx].alternatives) > 0:
transcribed_result += ' '
transcribed_result += transcription_results.results[idx].alternatives[0].transcript
return transcribed_result
def __delete_blob(self, bucket_name, blob_path):
"""
Deletes the FLAC file from the bucket on Google Storage.
:return: None
"""
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_path)
if blob.exists():
blob.delete()
############################################################
# path_to_file_invalid = '/home/niki/Desktop/deleteMe/FFmpy/aaa/JohnOliver.mp4.Stereo.flac'
# path_to_file_valid = '/home/niki/Desktop/deleteMe/FFmpy/aaa/000_split_JohnOliver.mp4.flac'
#
# try:
# x = BDAGoogleStorageUpload(path_to_file_valid)
# bucket_name, path_to_blob = x.upload_file()
# print(bucket_name)
# print(path_to_blob)
#
# except ValueError as e:
# print(str(e))
############################################################
# path_to_video_file = '/home/niki/Desktop/deleteMe/FFmpy/aaa/JohnOliver.mp4'
# try:
# x = BDAGoogleStorageConvertUpload(path_to_video_file)
# bucket_name, path_to_blob = x.upload_file()
# print(bucket_name)
# print(path_to_blob)
#
# except ValueError as e:
# print(str(e))
############################################################
# bucket_name = 'big_data_assignment_bucket'
# blob_path = '44883553471107_1522595041002/audioFile.flac'
# try:
# x = BDAGoogleStorageConsume()
# result = x.transcribe_file(bucket_name, blob_path)
# print(result)
#
# except ValueError as e:
# print(str(e))
############################################################
| null |
recording/src/speech_recognition/BDAGoogleStorage.py
|
BDAGoogleStorage.py
|
py
| 10,566 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "abc.ABC",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "recording.src.coding_framework.BDAConfigParser.g_config.get_value",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "recording.src.coding_framework.BDAConfigParser.g_config",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "recording.src.coding_framework.BDAConfigParser.g_config.get_value",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "recording.src.coding_framework.BDAConfigParser.g_config",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "recording.src.coding_framework.BDAConfigParser.g_config.get_value",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "recording.src.coding_framework.BDAConfigParser.g_config",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "google.cloud.storage.Client",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "google.cloud.storage",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "google.cloud.storage.Client",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "google.cloud.storage",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "recording.src.coding_framework.BDAConfigParser.g_config.get_value",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "recording.src.coding_framework.BDAConfigParser.g_config",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "uuid.getnode",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "recording.src.speech_recognition.BDAVideoAudioUtilities.BDAVideoToAudio.verify_audio_flac_format",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "recording.src.speech_recognition.BDAVideoAudioUtilities.BDAVideoToAudio",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "recording.src.speech_recognition.BDAVideoAudioUtilities.BDAVideoToAudio.video_to_audio",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "recording.src.speech_recognition.BDAVideoAudioUtilities.BDAVideoToAudio",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "google.cloud.speech.SpeechClient",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "google.cloud.speech",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "google.cloud.speech.types.RecognitionAudio",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "google.cloud.speech.types",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "google.cloud.speech.types.RecognitionConfig",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "google.cloud.speech.types",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "google.cloud.storage.Client",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "google.cloud.storage",
"line_number": 241,
"usage_type": "name"
}
] |
379746640
|
import numpy as np
import torch, torchvision
import torch.nn as nn
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from PIL import Image
from dataset_3 import CatAndDogsDataset
class net(nn.Module):
def __init__(self):
super(net, self).__init__()
self.net = nn.Sequential(nn.Conv2d(3, 10, 3, padding=1), nn.ReLU(),
nn.Conv2d(10, 10, 3, padding=1), nn.ReLU(),
nn.Conv2d(10, 10, 3, padding=1, bias=False), nn.MaxPool2d((2, 2)),
nn.BatchNorm2d(10),
nn.Conv2d(10, 10, 3, padding=1), nn.ReLU(),
nn.Conv2d(10, 10, 3, padding=1), nn.ReLU(),
nn.Conv2d(10, 10, 3, padding=1, bias=False), nn.MaxPool2d((2, 2)),
nn.BatchNorm2d(10),
nn.Conv2d(10, 10, 3, padding=1), nn.ReLU(),
nn.Conv2d(10, 10, 3, padding=1), nn.ReLU(),
nn.Conv2d(10, 10, 3, padding=1, bias=False), nn.MaxPool2d((2, 2)),
nn.BatchNorm2d(10),
nn.Conv2d(10, 10, 3, padding=1), nn.ReLU(),
nn.Conv2d(10, 10, 3, padding=1), nn.ReLU(),
nn.Conv2d(10, 10, 3, padding=1), nn.MaxPool2d((2, 2)),
nn.Flatten(), # out is 10*2*2 dims
nn.Linear(40, 20), nn.LeakyReLU(),
nn.Dropout(),
nn.Linear(20, 10), nn.LeakyReLU(),
nn.Dropout(),
nn.Linear(10, 5), nn.LeakyReLU(),
nn.Linear(5, 2))
def forward(self, x):
return self.net(x)
transform = transforms.Compose([transforms.Resize((32, 32)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
my_dataset = CatAndDogsDataset('data/PetImages/', transform=transform)
train_weight = 0.7
train_size = int(train_weight * len(my_dataset))
test_size = len(my_dataset) - train_size
train_set, test_set = torch.utils.data.random_split(my_dataset, [train_size, test_size])
train_ldr = torch.utils.data.DataLoader(
train_set, batch_size=64, shuffle=True, num_workers=4, pin_memory=True)
test_ldr = torch.utils.data.DataLoader(
test_set, batch_size=64, shuffle=False, num_workers=4, pin_memory=True)
device = 'cuda:0'
model = net()
# model.load_state_dict(torch.load("net.pth"))
model = model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
def train(num_epochs):
train_loss = torch.zeros((num_epochs), device=device)
test_loss = torch.zeros((num_epochs), device=device)
test_acc = torch.zeros((num_epochs), device=device)
for i in range(num_epochs):
model.train()
for (x, y) in train_ldr:
outputs = model(x.cuda())
loss = criterion(outputs, y.cuda())
train_loss[i] += loss.detach().sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
model.eval()
for inputs, targets in test_ldr:
outputs = model(inputs.to(device))
targets = targets.to(device)
loss = criterion(outputs, targets)
test_loss[i] += loss.sum()
_, predicted = outputs.max(1)
predicted = (predicted > 0.5).float()
test_acc[i] += (predicted == targets).sum().float()
print(f"{i+1}/{num_epochs}")
train_loss /= (train_ldr.batch_size * len(train_ldr))
test_loss /= (test_ldr.batch_size * len(test_ldr))
test_acc /= (test_ldr.batch_size * len(test_ldr))
return train_loss.cpu(), test_loss.cpu(), test_acc.cpu()
if __name__ == "__main__":
num_epochs=30
epochs_train_loss, epochs_test_loss, test_acc = train(num_epochs)
# plt.plot(epochs_train_loss, label='training loss')
# plt.plot(epochs_test_loss, label='testing loss')
# plt.plot(test_acc, label='testing accuracy')
# plt.legend()
# plt.show()
fig, ax1 = plt.subplots()
idx = range(num_epochs)
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss')
ax1.plot(idx, epochs_train_loss, label='training loss')
ax1.plot(idx, epochs_test_loss, label='testing loss')
ax1.legend(loc='upper left')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('accuracy') # we already handled the x-label with ax1
ax2.plot(idx, test_acc, 'k', label='testing accuracy')
ax2.legend(loc='upper right')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
torch.save(model.state_dict(), "net.pth")
| null |
homeworks/homework2/cnn_4.py
|
cnn_4.py
|
py
| 5,143 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn.Flatten",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "dataset_3.CatAndDogsDataset",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.random_split",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "torch.save",
"line_number": 134,
"usage_type": "call"
}
] |
631303399
|
from serial import Serial
class StepperMotor():
"""Stepper Motor with Serial"""
def __init__(self, serial_port, baud_rate):
self.serial_port = serial_port
self.baud_rate = baud_rate
self.serial = None
self.connected = False
def connect(self):
try:
self.serial = Serial(serial_port,self.baud_rate)
except:
print("Failed connect to serial port",self.serial_port)
self.connected = True
def disconnect(self):
try:
if(self.connected):
self.serial.close()
except:
print("Failed disconnect to serial port",self.serial_port)
self.connected = False
def move(self,angle):
if(self.serial is None or not self.connected):
print("Device is not connected")
return
angle = int(angle)
data = str(angle)+'\r\n'
self.serial.write(data.encode)
| null |
Python/StepperMotor.py
|
StepperMotor.py
|
py
| 970 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "serial.Serial",
"line_number": 14,
"usage_type": "call"
}
] |
306191441
|
'''
MIT License
Copyright (c) 2019 Arshdeep Bahga and Vijay Madisetti
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import boto.s3
conn = boto.connect_s3(aws_access_key_id='<enter>',
aws_secret_access_key='<enter>')
def percent_cb(complete, total):
print ('.')
def upload_to_s3_bucket_path(bucketname, path, filename):
mybucket = conn.get_bucket(bucketname)
fullkeyname=os.path.join(path,filename)
key = mybucket.new_key(fullkeyname)
key.set_contents_from_filename(filename, cb=percent_cb, num_cb=10)
#key.make_public(recursive=False)
def upload_to_s3_bucket_root(bucketname, filename):
mybucket = conn.get_bucket(bucketname)
key = mybucket.new_key(filename)
key.set_contents_from_filename(filename, cb=percent_cb, num_cb=10)
def getuserfiles(bucketname,username):
mybucket = conn.get_bucket(bucketname)
keys = mybucket.list(username)
totalsize=0.0
userfiles = {}
for key in keys:
value=[]
#value.append(key.name)
filename = key.name
filename=filename.replace(username+'/media/','')
value.append(key.last_modified)
keysize = float(key.size)/1000.0
value.append(str(keysize))
userfiles[filename]=value
totalsize = totalsize + float(key.size)
totalsize = totalsize/1000000.0
return userfiles,totalsize
def delete_from_s3(bucketname, username,filename):
mybucket = conn.get_bucket(bucketname)
mybucket.delete_key(username+'/media/'+filename)
| null |
imagepro/myapp/s3upload.py
|
s3upload.py
|
py
| 2,384 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "boto.s3.connect_s3",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "boto.s3",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
}
] |
166454965
|
import sys
from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets(".", one_hot=True, reshape=False)
mnist = input_data.read_data_sets('K:\datasets\MNIST', one_hot=True, reshape=False)
#MNIST数据集已经可以用one-hot编码的形式提供
import tensorflow as tf
save_file = './train_model.ckpt'
saving = False
restored = False
if len(sys.argv) > 1:
if sys.argv[1] == '-s' or sys.argv[1] == '-S':
saving =True
elif sys.argv[1] == '-r' or sys.argv[1] == '-R':
restored=True
else:
pass
else:
pass
# 学习的参数大家可以自行调节
learning_rate = 0.001
training_epochs = 20
batch_size = 128 # Decrease batch size if you don't have enough memory
display_step = 1
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
#隐藏层的数量也就是我们上一节中讲的ReLU的数量H,这个值也可以调节
n_hidden_layer = 256 # layer number of features
# 权重和偏置需要有两份,一份是wx+b;另一份是w1*ReLU输出+b1
weights = {
'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])),
'hidden_layer2': tf.Variable(tf.random_normal([n_hidden_layer, n_hidden_layer])),
'out': tf.Variable(tf.random_normal([n_hidden_layer, n_classes]))
}
biases = {
'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])),
'hidden_layer2': tf.Variable(tf.random_normal([n_hidden_layer])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# tf Graph input
x = tf.placeholder("float", [None, 28, 28, 1])
y = tf.placeholder("float", [None, n_classes])
#由于输入的是28*28*1的图像,需要将它转变为784的一维数组输入
x_flat = tf.reshape(x, [-1, n_input])
#建立两层神经网络
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']), biases['hidden_layer'])
layer_1 = tf.nn.relu(layer_1)
# layer_1 = tf.nn.sigmoid(layer_1)
# Output layer with linear activation
layer_2= tf.add(tf.matmul(layer_1, weights['hidden_layer2']), biases['hidden_layer2'])
logits = tf.matmul(layer_2, weights['out']) + biases['out']
#GradientDescentOptimizer在TensorFlow入门那一章节讲过
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
loss_plot=[]
x_axis=[]
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
if not restored:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
#mnist.train.next_batch()每次返回一个训练集的子集
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# Display logs per epoch step
if epoch % display_step == 0:
c = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(c))
loss_plot.append(c)
x_axis.append(epoch)
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Decrease test_size if you don't have enough memory
test_size = 256
total_accuracy=accuracy.eval({x: mnist.test.images[:test_size], y: mnist.test.labels[:test_size]})
print("Accuracy:", total_accuracy)
if not saving:
pass
else:
saver.save(sess, save_file)
print('Trained Model Saved.')
else:
saver.restore(sess, save_file)
# Test model
correct_prediction = tf.equal(tf.argmax(logits, 1),tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
test_accuracy = sess.run(accuracy,feed_dict={x: mnist.test.images, y: mnist.test.labels})
print('Test Accuracy: {}'.format(test_accuracy))
print("Model loaded!")
import matplotlib.pyplot as plt
import numpy as np
plt.xlabel('epoch')
plt.ylabel('loss')
my_x_ticks = np.arange(0,training_epochs,10)
plt.xticks(my_x_ticks)
plt.title('sigmoid:learning_rate = 0.001,training_epochs = 20,batch_size = 128,3layers')
plt.plot(x_axis,loss_plot,'r')
plt.annotate("accuracy=%s , Loss= %s" %(total_accuracy,loss_plot[-1]), xy=(training_epochs-1,loss_plot[-1]), xytext=(-40, 10), textcoords='offset points')
plt.show()
| null |
Class1/4-02_Save_and_restore_tensorflow_models/multilayer_perceptron.py
|
multilayer_perceptron.py
|
py
| 4,805 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Variable",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.add",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.relu",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.add",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.GradientDescentOptimizer",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Session",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "tensorflow.equal",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "tensorflow.argmax",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "tensorflow.cast",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "tensorflow.equal",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "tensorflow.argmax",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "tensorflow.cast",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
}
] |
58415856
|
#!/user/bin/env python
'''viewStructures.py
Simple wrapper functions that uses ipywidgets and py3Dmol to view a list of
protein structures.
'''
__author__ = "Mars (Shih-Cheng) Huang"
__maintainer__ = "Mars (Shih-Cheng) Huang"
__email__ = "[email protected]"
__version__ = "0.2.0"
__status__ = "Done"
from ipywidgets import interact, IntSlider, Dropdown
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import py3Dmol
def view_structure(pdbIds, bioAssembly = False, style='cartoon', color='spectrum'):
'''A wrapper function that simply displays a list of protein structures using
ipywidgets and py3Dmol
Parameters
----------
pdbIds : list
A list of PDBIDs to display
bioAssembly : bool
display bioAssembly
style : str, optional
Style of 3D structure (stick line cross sphere cartoon VDW MS)
color : str, optional
Color of 3D structure
'''
if type(pdbIds) == str:
pdbIds = [pdbIds]
def view3d(i=0):
'''Simple structure viewer that uses py3Dmol to view PDB structure by
indexing the list of PDBids
Parameters
----------
i (int): index of the protein if a list of PDBids
'''
print(f"PdbID: {pdbIds[i]}, Style: {style}")
if '.' not in pdbIds[i]:
viewer = py3Dmol.view(query='pdb:' + pdbIds[i], options={'doAssembly': bioAssembly})
viewer.setStyle({style: {'color': color}})
viewer.setStyle({'hetflag': True},{'stick':{'singleBond':False}})
else:
pdbid,chainid = pdbIds[i].split('.')
viewer = py3Dmol.view(query='pdb:' + pdbid, options={'doAssembly': bioAssembly})
viewer.setStyle({})
viewer.setStyle({'chain': chainid}, {style: {'color': color}})
viewer.setStyle({'chain': chainid, 'hetflag': True},{'stick':{'singleBond':False}})
viewer.zoomTo({'chain': chainid})
return viewer.show()
s_widget = IntSlider(min=0, max=len(pdbIds)-1, description='Structure', continuous_update=False)
return interact(view3d, i=s_widget)
def view_group_interaction(pdbIds, interacting_group='None', style='cartoon', color='spectrum'):
'''A wrapper function that simply displays a list of protein structures using
ipywidgets and py3Dmol and highlight specified interacting groups
Parameters
----------
pdbIds : list
A list of PDBIDs to display
interacting_atom : str, optional
The interacting atom to highlight
style : str, optional
Style of 3D structure (stick line cross sphere cartoon VDW MS)
color : str, optional
Color of 3D structure
'''
if type(pdbIds) == str:
pdbIds = [pdbIds]
def view3d(i=0):
'''Simple structure viewer that uses py3Dmol to view PDB structure by
indexing the list of PDBids
Parameters
----------
i (int): index of the protein if a list of PDBids
'''
print(
f"PdbID: {pdbIds[i]}, Interactions: {interacting_group}, Style: {style}")
viewer = py3Dmol.view(query='pdb:' + pdbIds[i])
viewer.setStyle({style: {'color': color}})
if interacting_group != "None":
viewer.setStyle({'resn': interacting_group}, {
'sphere': {}})
return viewer.show()
s_widget = IntSlider(min=0, max=len(pdbIds)-1, description='Structure', continuous_update=False)
return interact(view3d, i=s_widget)
def view_binding_site(pdbIds=None, groups=None, chains=None, distance=3.0):
'''A wrapper function that zooms in to a group of a protein structure and highlight
its neighbors within a certain distance.
Parameters
----------
pdbIds : list, optional
A list of PDBIDs to display
groups : list, optional
A list of groups to center at for each protein structure
chains : list, optional
A list of chains specified for each protein structure.
If no chains is specified, chain 'A' will be default to
all structures.
cutoffDistance : float, optional
The cutoff distance use the find the neighbors of specified group
'''
if pdbIds is None or groups is None:
raise ValueError("PdbIds and groups need to be specified")
if len(pdbIds) != len(groups):
raise ValueError(
"Number of structures should match with number of groups")
if type(pdbIds) == str and groups == str:
pdbIds, groups = [pdbIds], [groups]
if chains is None:
chains = ['A'] * len(pdbIds)
def view3d(i=0):
'''Simple structure viewer that zooms into a specified group and highlight
its neighbors
Parameters
----------
i (int): index of the protein if a list of PDBids
'''
print(
f"PDB: {pdbIds[i]}, group: {groups[i]}, chain: {chains[i]}, cutoffDistance: {distance}")
if type(groups[i]) == int:
center = {'resi': groups[i], 'chain': chains[i]}
neighbors = {'resi': groups[i], 'chain': chains[i],
'byres': 'true', 'expand': distance}
else:
center = {'resn': groups[i], 'chain': chains[i]}
neighbors = {'resn': groups[i], 'chain': chains[i],
'byres': 'true', 'expand': distance}
viewer = py3Dmol.view(query='pdb:' + pdbIds[i])
viewer.setStyle(neighbors, {'stick': {}});
viewer.setStyle(center, {'sphere': {'colorscheme': 'orangeCarbon'}})
viewer.zoomTo(neighbors)
return viewer.show()
s_widget = IntSlider(min=0, max=len(pdbIds)-1, description='Structure', continuous_update=False)
return interact(view3d, i=s_widget)
def group_interaction_viewer(df, sortBy, metal=None):
'''A wrapper function that zooms in to a group in a protein structure and
highlight its interacting atoms. The input dataframe should be generated
from the GroupInteractionExtractor class.
References
----------
GroupInteractionExtractor: https://github.com/sbl-sdsc/mmtf-pyspark/blob/master/mmtfPyspark/interactions/groupInteractionExtractor.py
Parameters
----------
df : dataframe
the dataframe generated from GroupIneteractionExtractor
sort_by : str
the q value to sort by ['q4','q5','q6']
'''
# Filter by metal
if metal is not None:
df = df[df["element0"] == metal]
# Sort dataframe based on sortBy parameter (q4-6 values)
df = df.sort_values([sortBy], ascending = False).dropna(subset=[sortBy])
if sortBy in ['q4','q5']:
q = 'q' + str(int(sortBy[-1]) + 1)
df = df[df[q] != np.nan]
i_widget = IntSlider(
min=0, max=df.shape[0] - 1, description='Structure', continuous_update=False)
def get_neighbors_chain(i):
return [df[f'chain{j}'].iloc[i] for j in range(1, 7) if df[f'element{j}'] is not None]
def get_neighbors_group(i):
return [df[f'groupNum{j}'].iloc[i] for j in range(1, 7) if df[f'element{j}'] is not None]
def get_neighbors_elements(i):
elements = [df[f'element{j}'].iloc[i]
for j in range(1, 7) if df[f'element{j}'] is not None]
return [str(e).upper() for e in elements]
def view3d(i=0):
'''Simple structure viewer that uses py3Dmol to view PDB structure by
indexing the list of PDBids
Parameters
----------
i : int
index of the protein if a list of PDBids
'''
structures = df['pdbId'].iloc
groups = df['groupNum0'].iloc
chains = df['chain0'].iloc
elements = df['element0'].iloc
ori = str(df[sortBy].iloc[i])[:5]
print(f"PDBId: {structures[i]} chain: {chains[i]} element: {elements[i]}")
print(f"{sortBy}: {ori}")
viewer = py3Dmol.view(query='pdb:' + structures[i], width=700, height=700)
neighbors = {'resi': get_neighbors_group(i), 'chain': get_neighbors_chain(i)}
metal = {'resi': groups[i], 'atom': str(elements[i]).upper(), 'chain': chains[i]}
viewer.setStyle(neighbors, {'stick': {'colorscheme': 'orangeCarbon'}})
viewer.setStyle(metal, {'sphere': {'radius': 0.5, 'color': 'gray'}})
viewer.zoomTo(neighbors)
return viewer.show()
return interact(view3d, i=i_widget)
def metal_distance_widget(df_concat):
'''Plot an violinplot of metal-element distances with ipywidgets
Parameters
----------
df_concat : Dataframe
dataframe of metal-elements distances
'''
metals = df_concat['Metal'].unique().tolist()
m_widget = Dropdown(options = metals, description = "Metals")
def metal_distance_violinplot(metal):
df_metal = df_concat[df_concat["Metal"] == metal].copy()
df_metal['Element'] = df_metal['Element'].apply(lambda x: metal+"-"+x)
# Set fonts
fig, ax = plt.subplots()
fig.set_size_inches(15,6)
subplot = sns.violinplot(x="Element", y="Distance", palette="muted", data=df_metal, ax=ax)
subplot.set(xlabel="Metal Interactions", ylabel="Distance", title=f"{metal} to Elements Distances Violin Plot")
return interact(metal_distance_violinplot, metal=m_widget);
| null |
mmtfPyspark/structureViewer.py
|
structureViewer.py
|
py
| 9,318 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "py3Dmol.view",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "py3Dmol.view",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "ipywidgets.IntSlider",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interact",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "py3Dmol.view",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "ipywidgets.IntSlider",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interact",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "py3Dmol.view",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "ipywidgets.IntSlider",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interact",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "ipywidgets.IntSlider",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "py3Dmol.view",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interact",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Dropdown",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "seaborn.violinplot",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interact",
"line_number": 276,
"usage_type": "call"
}
] |
72923834
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from urllib import parse
class SteamSpider(scrapy.Spider):
name = 'steam'
# allowed_domains = ["lab.scrapyd.cn"]
start_urls = ['http://lab.scrapyd.cn/']
def parse(self, response):
# 1.获取当前页所有文章列表url,并对文章进行解析
# 2.获取下一页url
post_url = response.css("#main .quote.post span:nth-child(2) a::attr(href)").extract()
for post_url in post_url:
print(post_url)
yield Request(url=parse.urljoin(response.url, post_url), callback=self.parse_detail)
# main > ol > li.next > a
next_url = response.css("#main .page-navigator .next a::attr(href)").extract_first("")
if next_url:
yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)
def parse_detail(self, response):
# 通过CSS选择器获取文章的具体字段
# main > article > h1 > a
title = response.css("#main .post .post-title a::text").extract()[0]
# main > article > div > p:nth-child(1)
content = response.css("#main .post .post-content p::text").extract()[0]
# main > article > p > a:nth-child(1)
tag_list = response.css("#main .post .tags a::text").extract()
print(title)
print(content)
print(tag_list[0], tag_list[1])
| null |
demo1/spiders/steam.py
|
steam.py
|
py
| 1,401 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scrapy.Spider",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urljoin",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "scrapy.Request",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urljoin",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 22,
"usage_type": "name"
}
] |
178609921
|
import argparse
import json
import os
import requests
from google.oauth2 import service_account
from google.cloud.iot import DeviceManagerClient, types, enums
from google.api_core.exceptions import NotFound
WOTT_ENDPOINT = os.getenv('WOTT_ENDPOINT', 'https://api.wott.io')
PROJECT = 'wott-244904'
LOCATION = 'europe-west1'
REGISTRY = 'wott_registry'
PUBSUB = 'wott-pubsub'
def create_client():
# iot.DeviceManagerClient doesn't do this himself, unlike other Google libs.
if 'GOOGLE_APPLICATION_CREDENTIALS' in os.environ:
with open(os.environ['GOOGLE_APPLICATION_CREDENTIALS']) as key:
key_json = json.load(key)
credentials = service_account.Credentials.from_service_account_info(key_json)
else:
credentials = None
client = DeviceManagerClient(credentials=credentials)
return client
def create_registry(client, project_name, location, registry_name, pubsub_topic, ca_cert):
location_path = client.location_path(project_name, location)
registry_path = client.registry_path(project_name, location, registry_name)
try:
registry = client.get_device_registry(registry_path)
except NotFound:
print(f'Registry "{registry_path}" not found, creating.')
registry = types.DeviceRegistry(
id = registry_name,
# name should be empty
mqtt_config = types.MqttConfig(
mqtt_enabled_state = enums.MqttState.MQTT_ENABLED
),
state_notification_config = types.StateNotificationConfig(
pubsub_topic_name = f"projects/{project_name}/topics/{pubsub_topic}"
),
credentials = [types.RegistryCredential(
public_key_certificate=types.PublicKeyCertificate(
format=enums.PublicKeyCertificateFormat.X509_CERTIFICATE_PEM,
certificate=ca_cert
)
)],
http_config = types.HttpConfig(
http_enabled_state=enums.HttpState.HTTP_DISABLED
)
)
registry = client.create_device_registry(location_path, registry)
return registry
def create_or_update_device(client, project_name, location_name, registry_name, device_id, device_cert):
device_name = client.device_path(project_name, location_name, registry_name, 'wott-' + device_id)
try:
device = client.get_device(device_name)
except NotFound:
print(f'Creating new device {device_name}')
device = types.Device(
id='wott-' + device_id,
# name should be empty
credentials=[
types.DeviceCredential(
public_key=types.PublicKeyCredential(
format=enums.PublicKeyFormat.ES256_X509_PEM,
key=device_cert
)
)
]
)
registry_path = client.registry_path(project_name, location_name, registry_name)
device = client.create_device(registry_path, device)
else:
print(f'Updating device {device_name}')
device = types.Device(
name = device_name,
credentials=[
types.DeviceCredential(
public_key=types.PublicKeyCredential(
format=enums.PublicKeyFormat.ES256_X509_PEM,
key=device_cert
)
)
]
)
client.update_device(device, types.FieldMask(paths=['credentials']))
return device
def get_ca_cert(debug):
ca = requests.get(f'{WOTT_ENDPOINT}/v0.2/ca-bundle')
if debug:
print("[RECEIVED] Get CA Cert: {}".format(ca.status_code))
print("[RECEIVED] Get CA Cert: {}".format(ca.content))
if not ca.ok:
print('Failed to get CA...')
print(ca.status_code)
print(ca.content)
return
return ca.json()['ca_bundle']
def get_device_cert(device_id, debug):
cert_request = requests.get(f'{WOTT_ENDPOINT}/v0.2/device-cert/{device_id}')
if debug:
print("[RECEIVED] Get CA Cert: {}".format(cert_request.status_code))
print("[RECEIVED] Get CA Cert: {}".format(cert_request.content))
if not cert_request.ok:
print('Failed to get CA...')
print(cert_request.status_code)
print(cert_request.content)
return
return cert_request.content
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--project',
required=True,
help="IoT Project name.")
parser.add_argument(
'--location',
required=True,
help="IoT location region.")
parser.add_argument(
'--registry',
required=False,
default=REGISTRY,
help="IoT Registry name.")
parser.add_argument(
'--pubsub',
required=False,
default=PUBSUB,
help="pubsub name.")
parser.add_argument(
'--device',
required=False,
default='',
help="device id.")
parser.add_argument(
'--debug',
action="store_true",
help="debug mode.")
args = parser.parse_args()
ca_cert = get_ca_cert(args.debug)
client = create_client()
registry = create_registry(client, args.project, args.location, args.registry, args.pubsub, ca_cert)
device_cert = get_device_cert(args.device, args.debug)
device = create_or_update_device(client, args.project, args.location, args.registry, args.device, device_cert)
| null |
wott.py
|
wott.py
|
py
| 5,549 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "google.oauth2.service_account.Credentials.from_service_account_info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "google.oauth2.service_account.Credentials",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "google.oauth2.service_account",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.DeviceManagerClient",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "google.api_core.exceptions.NotFound",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.DeviceRegistry",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.MqttConfig",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.enums.MqttState",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.iot.enums",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.StateNotificationConfig",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.RegistryCredential",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.PublicKeyCertificate",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.enums.PublicKeyCertificateFormat",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.iot.enums",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.HttpConfig",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.enums.HttpState",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.iot.enums",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "google.api_core.exceptions.NotFound",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.Device",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.DeviceCredential",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.PublicKeyCredential",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.enums.PublicKeyFormat",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.iot.enums",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.Device",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.DeviceCredential",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.PublicKeyCredential",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.enums.PublicKeyFormat",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.iot.enums",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "google.cloud.iot.types.FieldMask",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "google.cloud.iot.types",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 133,
"usage_type": "call"
}
] |
165654780
|
# -*- coding: utf-8 -*-
# @Time : 2018-04-05 16:26
# @Author : Dingzh.tobest
# 文件描述 :计算指数相关数据的算法
from scada.db_data.index_db_data import *
from scada.util import global_var as gv
import pandas as pd
import json
# 计算指数PE数据
def compute_index_pe_ttm(index_code, trade_date):
df = get_index_cons(index_code, trade_date)
symbols = ",".join(df['symbol'])
df_data = get_daily_stock_info(symbols, trade_date)
df_data.set_index('symbol', inplace=True)
index_pe_ttm = df_data[df_data.pe_ttm > 0]['pe_ttm'].sum() / len(df_data[df_data.pe_ttm > 0])
return index_pe_ttm, len(df_data[df_data.pe_ttm > 0]), len(df_data[df_data.pe_ttm <= 0])
# 获取从开始日期至今的指数PE数据
def get_index_pe_ttm(index_code, start_date):
import time
today = time.strftime('%Y%m%d', time.localtime(time.time()))
trade_date_df = get_trade_cal(start_date, today)
ittm_pe_df = pd.DataFrame(columns=['code', 'date', 'pe_ttm', 'profit', 'loss'])
for index, row in trade_date_df.iterrows():
index_pe_ttm, profit, loss = compute_index_pe_ttm(index_code, row['trade_date'])
trade_date = row['trade_date'][0:4] + "-" + row['trade_date'][4:6] + "-" + row['trade_date'][6:8]
if index_code.endswith('.SH'):
index_code_change = index_code[:6] + ".XSHG"
else:
index_code_change = index_code[:6] + ".XSHE"
ittm_pe_df = ittm_pe_df.append(
{"code": index_code_change, "date": trade_date, "pe_ttm": index_pe_ttm, "profit": profit,"loss": loss}, ignore_index=True)
print(index_code_change + "==>" + trade_date + " : " + str(index_pe_ttm))
gv.getMongoConnection().indexdata.pe_ttm.insert(json.loads(ittm_pe_df.to_json(orient='records')))
| null |
scada/algo_module/index_data_algo.py
|
index_data_algo.py
|
py
| 1,786 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.strftime",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "scada.util.global_var.getMongoConnection",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "scada.util.global_var",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 40,
"usage_type": "call"
}
] |
499899601
|
"""
Loader for SDSS individual spectrum files: spec_ files.
.. _spec: https://data.sdss.org/datamodel/files/BOSS_SPECTRO_REDUX/RUN2D/spectra/PLATE4/spec.html
"""
import os
import re
import _io
from astropy.io import fits
from astropy.table import Table
from astropy.wcs import WCS
from astropy.units import Unit, def_unit
from astropy.nddata import StdDevUncertainty, InverseVariance
import numpy as np
from ...spectra import Spectrum1D
from ..registers import data_loader, custom_writer
from ..parsing_utils import read_fileobj_or_hdulist
__all__ = ['spec_identify', 'spSpec_identify',
'spec_loader', 'spSpec_loader']
_spSpec_pattern = re.compile(r'spSpec-\d{5}-\d{4}-\d{3}\.fit')
_spec_pattern = re.compile(r'spec-\d{4,5}-\d{5}-\d{4}\.fits')
def spec_identify(origin, *args, **kwargs):
"""
Check whether given input is FITS and has SDSS-III/IV spec type
BINTABLE in first extension. This is used for Astropy I/O Registry.
"""
# Test if fits has extension of type BinTable and check for spec-specific keys
with read_fileobj_or_hdulist(*args, **kwargs) as hdulist:
return (hdulist[0].header.get('TELESCOP') == 'SDSS 2.5-M' and
hdulist[0].header.get('FIBERID', 0) > 0 and
len(hdulist) > 1 and
(isinstance(hdulist[1], fits.BinTableHDU) and
hdulist[1].header.get('TTYPE3') == 'ivar'))
def spSpec_identify(origin, *args, **kwargs):
"""
Check whether given input is FITS with SDSS-I/II spSpec tyamepe data.
This is used for Astropy I/O Registry.
"""
# Test telescope keyword and check if primary HDU contains data
# consistent with spSpec format
with read_fileobj_or_hdulist(*args, **kwargs) as hdulist:
return (hdulist[0].header.get('TELESCOP') == 'SDSS 2.5-M' and
hdulist[0].header.get('FIBERID', 0) > 0 and
isinstance(hdulist[0].data, np.ndarray) and
hdulist[0].data.shape[0] == 5)
def spPlate_identify(origin, *args, **kwargs):
"""
Check whether given input is FITS with SDSS spPlate fibre spectral data.
This is used for Astropy I/O Registry.
"""
# Test telescope keyword and check if primary HDU contains data
# consistent with spSpec format
with read_fileobj_or_hdulist(*args, **kwargs) as hdulist:
return (hdulist[0].header.get('TELESCOP') == 'SDSS 2.5-M' and
hdulist[0].header.get('FIBERID', 0) <= 0 and
isinstance(hdulist[0].data, np.ndarray) and
hdulist[0].data.shape[0] > 5)
@data_loader(label="SDSS-III/IV spec", identifier=spec_identify, extensions=['fits'])
def spec_loader(file_obj, **kwargs):
"""
Loader for SDSS-III/IV optical spectrum "spec" files.
Parameters
----------
file_obj: str, file-like, or HDUList
FITS file name, object (provided from name by Astropy I/O Registry),
or HDUList (as resulting from astropy.io.fits.open()).
Returns
-------
data: Spectrum1D
The spectrum that is represented by the 'loglam' (wavelength) and 'flux'
data columns in the BINTABLE extension of the FITS `file_obj`.
"""
with read_fileobj_or_hdulist(file_obj, **kwargs) as hdulist:
header = hdulist[0].header
name = header.get('NAME')
meta = {'header': header}
bunit = header.get('BUNIT', '1e-17 erg / (Angstrom cm2 s)')
if 'Ang' in bunit and 'strom' not in bunit:
bunit = bunit.replace('Ang', 'Angstrom')
flux_unit = Unit(bunit)
# spectrum is in HDU 1
flux = hdulist[1].data['flux'] * flux_unit
uncertainty = InverseVariance(hdulist[1].data['ivar'] / flux_unit**2)
dispersion = 10**hdulist[1].data['loglam']
dispersion_unit = Unit('Angstrom')
mask = hdulist[1].data['and_mask'] != 0
return Spectrum1D(flux=flux, spectral_axis=dispersion * dispersion_unit,
uncertainty=uncertainty, meta=meta, mask=mask)
@data_loader(label="SDSS-I/II spSpec", identifier=spSpec_identify, extensions=['fit', 'fits'])
def spSpec_loader(file_obj, **kwargs):
"""
Loader for SDSS-I/II spSpec files.
Parameters
----------
file_obj: str, file-like, or HDUList
FITS file name, object (provided from name by Astropy I/O Registry),
or HDUList (as resulting from astropy.io.fits.open()).
Returns
-------
data: Spectrum1D
The spectrum that is represented by the wavelength solution from the
header WCS and data array of the primary HDU.
"""
with read_fileobj_or_hdulist(file_obj, **kwargs) as hdulist:
header = hdulist[0].header
# name = header.get('NAME')
meta = {'header': header}
wcs = WCS(header).dropaxis(1)
bunit = header.get('BUNIT', '1e-17 erg / (Angstrom cm2 s)')
# fix mutilated flux unit
bunit = bunit.replace('/cm/s/Ang', '/ (Angstrom cm2 s)')
if 'Ang' in bunit and 'strom' not in bunit:
bunit = bunit.replace('Ang', 'Angstrom')
flux_unit = Unit(bunit)
flux = hdulist[0].data[0, :] * flux_unit
uncertainty = StdDevUncertainty(hdulist[0].data[2, :] * flux_unit)
# dispersion along NAXIS1 from the WCS
dispersion = wcs.pixel_to_world(np.arange(flux.shape[0]))
# convert out of logspace (default for spSpec/spPlate spectra)?
if header.get('DC-Flag', 1) == 1:
dispersion = 10**dispersion
dispersion_unit = Unit('Angstrom')
mask = hdulist[0].data[3, :] != 0
return Spectrum1D(flux=flux, spectral_axis=dispersion * dispersion_unit,
uncertainty=uncertainty, meta=meta, mask=mask)
@data_loader(label="SDSS spPlate", identifier=spPlate_identify, extensions=['fits'])
def spPlate_loader(file_obj, limit=None, **kwargs):
"""
Loader for SDSS spPlate files, reading flux spectra from all fibres into single array.
Parameters
----------
file_obj: str, file-like, or HDUList
FITS file name, object (provided from name by Astropy I/O Registry),
or HDUList (as resulting from astropy.io.fits.open()).
limit : :class:`int`, optional
If set, only return the first `limit` spectra in `flux` array.
Returns
-------
Spectrum1D
The spectra represented by the wavelength solution from the header WCS
and the data array of the primary HDU (typically 640 along dimension 1).
"""
with read_fileobj_or_hdulist(file_obj, **kwargs) as hdulist:
header = hdulist[0].header
meta = {'header': header}
wcs = WCS(header).dropaxis(1)
if limit is None:
limit = header['NAXIS2']
bunit = header.get('BUNIT', '1e-17 erg / (Angstrom cm2 s)')
if 'Ang' in bunit and 'strom' not in bunit:
bunit = bunit.replace('Ang', 'Angstrom')
flux_unit = Unit(bunit)
flux = hdulist[0].data[0:limit, :] * flux_unit
uncertainty = InverseVariance(hdulist[1].data[0:limit, :] / flux_unit**2)
# dispersion along NAXIS1 from the WCS
wcs = WCS(header).dropaxis(1)
dispersion = wcs.pixel_to_world(np.arange(flux.shape[-1]))
# convert out of logspace (default for spSpec/spPlate spectra)?
if header.get('DC-Flag', 1) == 1:
dispersion = 10**dispersion
dispersion_unit = Unit('Angstrom')
mask = hdulist[2].data[0:limit, :] != 0
meta['plugmap'] = Table.read(hdulist[5])[0:limit]
return Spectrum1D(flux=flux, spectral_axis=dispersion*dispersion_unit,
uncertainty=uncertainty, meta=meta, mask=mask)
| null |
specutils/io/default_loaders/sdss.py
|
sdss.py
|
py
| 7,686 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "re.compile",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "parsing_utils.read_fileobj_or_hdulist",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits.BinTableHDU",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "astropy.io.fits",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "parsing_utils.read_fileobj_or_hdulist",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "parsing_utils.read_fileobj_or_hdulist",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "parsing_utils.read_fileobj_or_hdulist",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "astropy.units.Unit",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "astropy.nddata.InverseVariance",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "astropy.units.Unit",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "spectra.Spectrum1D",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "registers.data_loader",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "parsing_utils.read_fileobj_or_hdulist",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "astropy.wcs.WCS",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "astropy.units.Unit",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "astropy.nddata.StdDevUncertainty",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "astropy.units.Unit",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "spectra.Spectrum1D",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "registers.data_loader",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "parsing_utils.read_fileobj_or_hdulist",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "astropy.wcs.WCS",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "astropy.units.Unit",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "astropy.nddata.InverseVariance",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "astropy.wcs.WCS",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "astropy.units.Unit",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table.read",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "spectra.Spectrum1D",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "registers.data_loader",
"line_number": 158,
"usage_type": "call"
}
] |
456618904
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.test import EntityTestCase
from stix.extensions.identity.ciq_identity_3_0 import CIQIdentity3_0Instance
class CIQIdentity3_0InstanceTests(EntityTestCase, unittest.TestCase):
klass = CIQIdentity3_0Instance
_full_dict = {'id': 'example:ciqidentity-1',
'name': 'John Smith',
'roles': ['Programmer', 'Analyst'],
'specification': {'party_name': {'name_lines': [{'value': 'Foo'},
{'value': 'Bar'}],
'organisation_names': [{'name_elements': [{'element_type': 'FullName',
'value': 'Foo Inc.'}],
'subdivision_names': [{'type': 'Department',
'value': 'InfoSec'}]}],
'person_names': [{'name_elements': [{'value': 'John Smith'}]},
{'name_elements': [{'value': 'Jill Smith'}]}]}},
'xsi:type': 'ciqIdentity:CIQIdentity3.0InstanceType'}
if __name__ == "__main__":
unittest.main()
| null |
stix-1.1.1.0/stix/test/extensions/identity/ciq_identity_3_0_test.py
|
ciq_identity_3_0_test.py
|
py
| 1,424 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cybox.test.EntityTestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "stix.extensions.identity.ciq_identity_3_0.CIQIdentity3_0Instance",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_number": 26,
"usage_type": "call"
}
] |
240944477
|
from dotenv import load_dotenv
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.safari.options import Options as SafariOptions
from selenium.webdriver.edge.options import Options as EdgeOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from threading import Thread
import time
# This array 'capabilities' defines the capabilities browser, device and OS combinations where the test will run
load_dotenv()
BUILD_NAME = "browserstack-mypkfitTest"
capabilities = [
{
"browserName": "chrome",
"browserVersion": "103.0",
"os": "Windows",
"osVersion": "11",
"sessionName": "ChromeBrowser", # test name
"buildName": BUILD_NAME, # Your tests will be organized within this build
},
{
"browserName": "firefox",
"browserVersion": "102.0",
"os": "Windows",
"osVersion": "10",
"sessionName": "FirefoxBrowser",
"buildName": BUILD_NAME,
},
]
def get_browser_option(browser):
switcher = {
"chrome": ChromeOptions(),
"firefox": FirefoxOptions(),
"edge": EdgeOptions(),
"safari": SafariOptions(),
}
return switcher.get(browser, ChromeOptions())
# run_session function searches for 'BrowserStack' on duckduckgo.com
def run_session(cap):
cap["userName"] = os.environ.get("BROWSERSTACK_USERNAME") or "balrajbemainboin_ZgOJfw"
cap["accessKey"] = os.environ.get("BROWSERSTACK_ACCESS_KEY") or "dPV5U2AqsKuLbBpvwqfT"
options = get_browser_option(cap["browserName"].lower())
options.set_capability("browserName", cap["browserName"].lower())
options.set_capability("bstack:options", cap)
driver = webdriver.Remote(
command_executor="https://hub.browserstack.com/wd/hub", options=options
)
driver.get("https://sg-prd-hema.mypkfit.com")
if not "myPKFiT" in driver.title:
raise Exception("Unable to load myPKFiT page!")
driver.maximize_window()
elem = driver.find_element(By.LINK_TEXT, "India (English)")
elem.click()
driver.find_element_by_css_selector("button[ng-click='vm.confirm(vm.selectedCountry)']").click()
time.sleep(20)
driver.find_element_by_css_selector("button[ng-click='vm.close()']").click()
try:
WebDriverWait(driver, 5).until(EC.title_contains("myPKFiT"))
driver.execute_script(
'browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"passed", "reason": "Title matched!"}}'
)
except TimeoutException:
driver.execute_script(
'browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"failed", "reason": "Title not matched"}}'
)
print(driver.title)
driver.quit()
# The Thread function takes run_session function and each set of capability from the caps array as an argument to run each session parallelly
for cap in capabilities:
Thread(target=run_session, args=(cap,)).start()
| null |
samplebrowsertest.py
|
samplebrowsertest.py
|
py
| 3,262 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.firefox.options.Options",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.edge.options.Options",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.safari.options.Options",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Remote",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.LINK_TEXT",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.title_contains",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.TimeoutException",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 75,
"usage_type": "call"
}
] |
474958326
|
import docx
from diot import Diot
from docx.shared import Inches, Pt
from docx.enum.dml import MSO_COLOR_TYPE, MSO_THEME_COLOR
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_LINE_SPACING, WD_BREAK_TYPE, WD_COLOR, WD_TAB_ALIGNMENT, WD_TAB_LEADER, WD_UNDERLINE
from docx.enum.style import WD_STYLE, WD_STYLE_TYPE
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.enum.table import WD_TABLE_ALIGNMENT, WD_TABLE_DIRECTION
from docx.enum.shape import WD_INLINE_SHAPE_TYPE
from os import path
from runpy import run_path
from bioprocs.utils import log2pyppl
infile = {{i.infile | quote}}
outfile = {{o.outfile | quote}}
codefiles = {{i.codes | repr}}
error = {{args.error | repr}}
bcode = {{args.bcode | repr}}
if not isinstance(bcode, list):
bcode = [bcode]
bcode = '\n'.join(bcode) + '\n'
acode = {{args.acode | repr}}
if not isinstance(acode, list):
acode = [acode]
acode = '\n'.join(acode) + '\n'
section = {{args.section | repr}}
if section:
section['type'] = section.get('type', 'NEW_PAGE')
section['orient'] = section.get('orient', 'PORTRAIT')
section['margin'] = section.get('margin', [36] * 4)
if isinstance(section['margin'], int):
section['margin'] = [section['margin']] * 4
elif len(section['margin']) == 2:
section['margin'].extend(section['margin'])
elif len(section['margin']) == 3:
section['margin'].append(section['margin'][1])
def doSection(doc, section = section, new = True):
if not section:
return
sec = doc.add_section() if new else doc.sections[0]
sec.type = getattr(WD_SECTION, section['type'])
sec.orientation = getattr(WD_ORIENT, section['orient'])
_new_width, _new_height = sec.page_height, sec.page_width
sec.page_width, sec.page_height = _new_width, _new_height
sec.top_margin, sec.right_margin, sec.bottom_margin, sec.left_margin = (Pt(x) for x in section['margin'])
return sec
if infile and path.isfile(infile):
doc = docx.Document(infile)
# do section
sec = doSection(doc, new = True)
exec(bcode, globals())
else:
doc = docx.Document()
sec = doSection(doc, new = False)
if infile:
doc.add_heading(infile, 0)
exec(bcode, globals())
for codefile in codefiles:
log2pyppl('Doing: {}'.format(path.basename(codefile)), level = 'Info')
try:
_ = run_path(codefile, globals())
except Exception as ex:
if error == 'exit':
raise
else:
log2pyppl('Failed to run: {}'.format(codefile), level = 'Error')
for line in str(ex).splitlines():
log2pyppl('\t' + line)
exec(acode, globals())
doc.save(outfile)
| null |
bioprocs/scripts/docx/pDocx.py
|
pDocx.py
|
py
| 2,509 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "docx.enum.section.WD_SECTION",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "docx.enum.section.WD_ORIENT",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "docx.shared.Pt",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "docx.Document",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "docx.Document",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "bioprocs.utils.log2pyppl",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "runpy.run_path",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "bioprocs.utils.log2pyppl",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "bioprocs.utils.log2pyppl",
"line_number": 71,
"usage_type": "call"
}
] |
504289676
|
# coding=UTF8
import requests
URL = "https://www.madailicai.com/p2p/service/"
def count_for_each(product_type, service_type):
params = {"productType": product_type}
result = requests.head(url=URL + service_type, params=params)
return result.headers.get('X-Record-Count')
def list_of_each(start, size, product_type, service_type):
params = {"from": start, "productType": product_type, "size": size}
result = requests.get(url=URL + service_type, params=params)
return result.json()
def count_for_car_loan():
all_size = count_for_each("CAR_LOAN_REQUEST", "carLoans")
sum_of_money = 0
for each_page in range(0, int(all_size), 10):
one_page = list_of_each(each_page, 10, "CAR_LOAN_REQUEST", "carLoans")
for each_item in one_page:
sum_of_money += each_item['currentInvestmentAmount']
sum_of_money = sum_of_money
return sum_of_money
print("融车宝: ", count_for_car_loan())
def count_for_loans():
all_size = count_for_each("", "products")
sum_of_money = 0
for each_page in range(0, int(all_size), 10):
one_page = list_of_each(each_page, 10, "", "products")
for each_item in one_page:
sum_of_money += each_item['currentInvestmentAmount']
sum_of_money = sum_of_money
return sum_of_money
print("腾信宝: ", count_for_loans())
def count_for_enterpriseLoans():
all_size = count_for_each("ENTERPRISE_ACCOUNTS_RECEIVABLE", "enterpriseLoans")
sum_of_money = 0
for each_page in range(0, int(all_size), 10):
one_page = list_of_each(each_page, 10, "ENTERPRISE_ACCOUNTS_RECEIVABLE", "enterpriseLoans")
for each_item in one_page:
sum_of_money += each_item['currentInvestmentAmount']
sum_of_money = sum_of_money
return sum_of_money
print("融企宝: ", count_for_enterpriseLoans())
all_money = count_for_loans() + count_for_car_loan() + count_for_enterpriseLoans()
print("总放款金额: ", all_money)
| null |
week-1/zhangqing.py
|
zhangqing.py
|
py
| 1,982 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.head",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
}
] |
463580579
|
from datetime import datetime
import pytest
from unittest.mock import MagicMock
from pytz import utc
from sccc_contestbot.models import Contest, ContestData
from sccc_contestbot.contest_manager import RenewalFlag
from .conftest import temp_db_data
def test_renewal_contest(contest_manager, db_session, event_loop):
"""
renewal_contest 테스트
"""
# renewal_call_back을 임시로 Mock로 바꿉니다.
temporary_call_back = contest_manager.renewal_call_back
# 컨테스트 추가 테스트
contest_manager.renewal_call_back = MagicMock()
test_contest = ContestData("A", "A", datetime.now(utc), "A")
event_loop.run_until_complete(contest_manager.renewal_contest(test_contest))
contest_manager.renewal_call_back.assert_called_with(
test_contest, RenewalFlag.CREATED
)
assert (
db_session.query(Contest).filter(Contest.contest_id == "A").first() is not None
)
# 똑같은 컨테스트 중복 테스트
contest_manager.renewal_call_back = MagicMock()
event_loop.run_until_complete(contest_manager.renewal_contest(test_contest))
assert not contest_manager.renewal_call_back.called
# 콘테스트 변경 테스트
new_contest = ContestData("A", "B", datetime.now(utc), "BB")
event_loop.run_until_complete(contest_manager.renewal_contest(new_contest))
contest_manager.renewal_call_back.assert_called_with(
new_contest, RenewalFlag.CHANGED
)
db_session.query(Contest).filter(Contest.contest_id == "A").delete()
db_session.commit()
db_session.close()
contest_manager.renewal_call_back = temporary_call_back
def test_delete_contest(contest_manager, db_session, event_loop):
"""
콘테스트 삭제 테스트
"""
test_data = ContestData("A", "A", datetime.now(utc), "A")
# 데이터가 없으니 삭제되지 않고, 예외도 뜨면 안된다.
event_loop.run_until_complete(contest_manager.delete_contest(test_data))
with temp_db_data(db_session, (Contest(test_data),)):
event_loop.run_until_complete(contest_manager.delete_contest(test_data))
assert (
db_session.query(Contest).filter(Contest.contest_id == "A").first() is None
)
def test_is_latest(contest_manager, db_session_maker, event_loop):
"""
특정한 콘테스트가 최신인지 확인하는 기능을 테스트합니다.
"""
test_contest = ContestData("ID", "A", datetime.now(utc), "B")
assert False == event_loop.run_until_complete(
contest_manager.is_latest(test_contest)
)
# 첫번째 세션, 콘테스트 추가 후, 검사
db_session = db_session_maker()
db_session.add(Contest(test_contest))
db_session.commit()
assert True == event_loop.run_until_complete(
contest_manager.is_latest(test_contest)
)
db_session.close()
# 두번째 세션, 콘테스트 변경 후, 검사
db_session = db_session_maker()
changed_contest = ContestData("ID", "AA", datetime.now(utc), "BB")
db_session.query(Contest).filter(Contest.contest_id == "ID").update(
{
Contest.contest_name: changed_contest.contest_name,
Contest.start_date: changed_contest.start_date,
Contest.URL: changed_contest.URL,
Contest.hash_value: changed_contest.hash_value,
}
)
db_session.commit()
assert test_contest.contest_name != changed_contest.contest_name
assert False == event_loop.run_until_complete(
contest_manager.is_latest(test_contest)
)
db_session.query(Contest).filter(Contest.contest_id == "ID").delete()
db_session.commit()
db_session.close()
| null |
tests/test_contest_manager.py
|
test_contest_manager.py
|
py
| 3,678 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.mock.MagicMock",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sccc_contestbot.models.ContestData",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.contest_manager.RenewalFlag.CREATED",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sccc_contestbot.contest_manager.RenewalFlag",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "sccc_contestbot.models.Contest.contest_id",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.MagicMock",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sccc_contestbot.models.ContestData",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.contest_manager.RenewalFlag.CHANGED",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "sccc_contestbot.contest_manager.RenewalFlag",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "sccc_contestbot.models.Contest.contest_id",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "sccc_contestbot.models.ContestData",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 57,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "conftest.temp_db_data",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "sccc_contestbot.models.Contest.contest_id",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "sccc_contestbot.models.ContestData",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sccc_contestbot.models.ContestData",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 92,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 93,
"usage_type": "argument"
},
{
"api_name": "sccc_contestbot.models.Contest.contest_id",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "sccc_contestbot.models.Contest.contest_name",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.models.Contest.start_date",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.models.Contest.URL",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.models.Contest.hash_value",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "sccc_contestbot.models.Contest",
"line_number": 109,
"usage_type": "argument"
},
{
"api_name": "sccc_contestbot.models.Contest.contest_id",
"line_number": 109,
"usage_type": "attribute"
}
] |
543168364
|
# code taken and modified from:
# https://github.com/aaron-xichen/pytorch-playground/blob/master/cifar/model.py
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
model_urls = {
'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
}
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class CIFAR(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(CIFAR, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = out_channels
return nn.Sequential(*layers)
def cifar10(dev_id=None):
n_channel = 128
if dev_id is None:
dev_id = "cuda" if torch.cuda.is_available() else "cpu"
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=10)
m = model_zoo.load_url(model_urls['cifar10'], model_dir='.', map_location=dev_id)
model.load_state_dict(m,)
return model
| null |
oppel/demos/cifar10model.py
|
cifar10model.py
|
py
| 2,076 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.model_zoo.load_url",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.utils.model_zoo",
"line_number": 63,
"usage_type": "name"
}
] |
556508327
|
import logging
import numpy as np
from sklearn.linear_model import LinearRegression, ElasticNet, Ridge
from sklearn.utils import check_random_state
from csrank.objectranking.object_ranker import ObjectRanker
from csrank.tunable import Tunable
from csrank.util import normalize, print_dictionary
from ..dataset_reader.objectranking.util import \
complete_linear_regression_dataset
__all__ = ['ExpectedRankRegression']
class ExpectedRankRegression(ObjectRanker, Tunable):
_tunable = None
def __init__(self, n_features, alpha=0.0, l1_ratio=0.5, tol=1e-4,
normalize=True, fit_intercept=True, random_state=None,
**kwargs):
"""Create an expected rank regression model.
This model normalizes the ranks to [0, 1] and treats them as regression
target. For α = 0 we employ simple linear regression. For α > 0 the
model becomes ridge regression (when l1_ratio = 0) or elastic net
(when l1_ratio > 0).
Parameters
----------
n_features : int
Number of features of the object space
alpha : float, optional
Regularization strength
l1_ratio : float, optional
Ratio between pure L2 (=0) or pure L1 (=1) regularization.
tol : float, optional
Optimization tolerance
normalize : bool, optional
If True, the regressors will be normalized before fitting.
fit_intercept : bool, optional
If True, the linear model will also fit an intercept.
random_state : int, RandomState instance or None, optional
Seed of the pseudorandom generator or a RandomState instance
**kwargs
Keyword arguments for the algorithms
References
----------
.. [1] Kamishima, T., Kazawa, H., & Akaho, S. (2005, November).
"Supervised ordering-an empirical survey.",
Fifth IEEE International Conference on Data Mining.
"""
self.normalize = normalize
self.n_features = n_features
self.alpha = alpha
self.l1_ratio = l1_ratio
self.tol = tol
self.logger = logging.getLogger('ERR')
self.fit_intercept = fit_intercept
self.random_state = check_random_state(random_state)
def fit(self, X, Y, **kwargs):
self.logger.debug('Creating the Dataset')
X_train, Y_train = complete_linear_regression_dataset(X, Y)
assert X_train.shape[1] == self.n_features
self.logger.debug('Finished the Dataset')
if self.alpha == 0:
self.model = LinearRegression(normalize=self.normalize,
fit_intercept=self.fit_intercept)
else:
if self.l1_ratio >= 0.01:
self.model = ElasticNet(alpha=self.alpha,
l1_ratio=self.l1_ratio,
normalize=self.normalize,
tol=self.tol,
fit_intercept=self.fit_intercept,
random_state=self.random_state)
else:
self.model = Ridge(alpha=self.alpha, normalize=self.normalize,
tol=self.tol,
fit_intercept=self.fit_intercept,
random_state=self.random_state)
self.logger.debug('Finished Creating the model, now fitting started')
self.model.fit(X_train, Y_train)
self.weights = self.model.coef_.flatten()
if (self.fit_intercept):
self.weights = np.append(self.weights, self.model.intercept_)
self.logger.debug('Fitting Complete')
def _predict_scores_fixed(self, X, **kwargs):
n_instances, n_objects, n_features = X.shape
self.logger.info(
"For Test instances {} objects {} features {}".format(n_instances,
n_objects,
n_features))
scores = np.empty([n_instances, n_objects])
for i, data_test in enumerate(X):
assert data_test.shape[1] == self.n_features
score = self.model.predict(data_test) * -1
normalize(np.array(score))
scores[i] = score
self.logger.info("Done predicting scores")
return np.array(scores)
def predict_scores(self, X, **kwargs):
return super().predict_scores(X, **kwargs)
def predict(self, X, **kwargs):
return super().predict(X, **kwargs)
def predict_pair(self, a, b, **kwargs):
score_a = self.model.predict(a, **kwargs) * -1
score_b = self.model.predict(b, **kwargs) * -1
return [score_a / (score_a + score_b), score_b / (score_a + score_b)]
def set_tunable_parameters(self, alpha=1.0, l1_ratio=0.5, tol=1e-4, **point):
self.tol = tol
self.alpha = alpha
self.l1_ratio = l1_ratio
if len(point) > 0:
self.logger.warning('This ranking algorithm does not support'
' tunable parameters'
' called: {}'.format(print_dictionary(point)))
| null |
csrank/objectranking/expected_rank_regression.py
|
expected_rank_regression.py
|
py
| 5,315 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "csrank.objectranking.object_ranker.ObjectRanker",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "csrank.tunable.Tunable",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "csrank.util.normalize",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.check_random_state",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "dataset_reader.objectranking.util.complete_linear_regression_dataset",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.ElasticNet",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.Ridge",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "csrank.util.normalize",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "csrank.util.print_dictionary",
"line_number": 125,
"usage_type": "call"
}
] |
512908971
|
import json
class Hotel:
def __init__(self, a_name, a_adress, a_description, a_services, a_apartments):
self.name = a_name
self.adress = a_adress
self.description = a_description
self.services = a_services
self.apartments = a_apartments
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__, ensure_ascii=False)
| null |
Model/hotel.py
|
hotel.py
|
py
| 389 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.dumps",
"line_number": 13,
"usage_type": "call"
}
] |
132326671
|
"""solve_problem6.py:
Assignment 1, Problem 6.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar Singh "
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import numpy as np
import scipy as sp
import scipy.integrate as spi
import pylab
import sys
from collections import defaultdict
pylab.style.use('seaborn-whitegrid')
def prob( t, p):
return 1.0 - np.exp(p)
def system( init ):
# Build system using prob with initial conditions.
r = spi.ode( prob )
# y at value t
print('\tSetting x(%s) = %s' % (init[1], init[0] ))
r.set_integrator( 'lsode' )
r.set_initial_value(init[0], init[1])
return r
def main():
t1 = 10
dt = 0.01
# pylab.figure( figsize = (15, 10) )
for j, init in enumerate([ (-1,0), (1, 0), (4, 0) ] ):
r = system(init)
result, tvec = [], []
while r.successful() and r.t < t1:
result.append( r.integrate( r.t + dt ) )
tvec.append( r.t )
pylab.subplot(3, 1, j+1)
pylab.plot( tvec, result )
# pylab.yscale( 'symlog' )
title = '(x,t)=(%s, %s)' % init
pylab.title( title )
# pylab.tight_layout( )
# pylab.show()
pylab.savefig( '%s.png' % sys.argv[0].replace('.', '_') )
if __name__ == '__main__':
main()
| null |
DifferentialEquations2016/Assignment1/solve_problem6.py
|
solve_problem6.py
|
py
| 1,503 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pylab.style.use",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pylab.style",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.ode",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.integrate",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pylab.subplot",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pylab.plot",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pylab.title",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pylab.savefig",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 53,
"usage_type": "attribute"
}
] |
241579253
|
#!/usr/bin/env python
# -----------------------\ n-------------------------------------
# lexer.py
#
# ------------------------------------------------------------
import ply.lex as lex
from ply.lex import TOKEN
import sys
from table import Table
# List of token names.
tokens = (
# KEYWORDS
'IMPORT',
'CLASS',
'INHERITS',
'PUBLIC',
'PRIVATE',
'SEMICOLON',
'COLON',
'ID',
'LSQRBRACKET',
'RSQRBRACKET',
'LPAREN',
'RPAREN',
'LBRACE',
'RBRACE',
'PERIOD',
'MAIN',
'OBJECT',
# # DATA TYPES
'CLASS_TYPE',
'INTEGER',
'INTEGER_TYPE',
'BOOL_TYPE',
'STRING_TYPE',
'SELF_TYPE',
'STRING',
'TRUE',
'FALSE',
'MOD',
'TILDA',
'GT',
'LT',
'EQUAL',
'LTEQ',
'GTEQ',
'GETS',
'OR',
'AND',
'NOT',
# # LOOP AND CONDITIONAL
'IF',
'THEN',
'ELSE',
'FI',
'WHILE',
'LOOP',
'POOL',
'FOR',
'LET',
'IN',
'SELF',
'BREAK',
'CONTINUE',
'ISVOID',
'NEW',
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'COMMA',
# At
'AT'
)
# reserved Keyword
reserved = {
'import' : 'IMPORT',
'class' : 'CLASS',
'inherits' : 'INHERITS',
'public' : 'PUBLIC',
'private' : 'PRIVATE',
'main' : 'MAIN',
'Object' : 'OBJECT',
'while' : 'WHILE',
'loop' : 'LOOP',
'pool' : 'POOL',
'for' : 'FOR',
'let' : 'LET',
'in' : 'IN',
'self' : 'SELF',
'break' : 'BREAK',
'continue' : 'CONTINUE',
'isvoid' : 'ISVOID',
'new' : 'NEW',
'if' : 'IF',
'then' : 'THEN',
'else' : 'ELSE',
'fi' : 'FI',
'and' : 'AND',
'not' : 'NOT',
'or' : 'OR',
'Int' : 'INTEGER_TYPE',
'Bool' : 'BOOL_TYPE',
'String' : 'STRING_TYPE',
'TRUE' : 'TRUE',
'FALSE' : 'FALSE',
'SELF_TYPE' : 'SELF_TYPE',
'CLASS_TYPE' : 'CLASS_TYPE'
}
# Regular expression rules for simple tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_LSQRBRACKET = r'\['
t_RSQRBRACKET = r'\]'
t_SEMICOLON = r';'
t_COLON = r':'
t_TILDA = r'~'
t_GT = r'>'
t_LT = r'<'
t_EQUAL = r'='
t_LTEQ = r'<='
t_GTEQ = r'>='
t_GETS = r'<-'
t_PERIOD = r'\.'
t_COMMA = r','
t_AT = r'@'
# A regular expression rule with some action code
# REgular expression for Identifier
def t_ID(t):
r'[a-z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'ID') # Check for reserved words
return t
# Regex for Class Name
def t_CLASS_TYPE(t):
r'[A-Z][a-zA-Z_0-9]*'
t.type = reserved.get(t.value, 'CLASS_TYPE')
return t
#ssion for integer
def t_INTEGER(t):
r'\d+'
t.value = int(t.value)
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s' at line %d" % (t.value[0],t.lineno))
t.lexer.skip(1)
print("Exiting due to lexing error.....Abort!")
exit()
states = (
('STRING','exclusive'),
('COMMENT','exclusive'),
)
def t_start_string(t): # try removing start -> begin
r'\"'
t.lexer.push_state("STRING")
t.lexer.string_backslashed = False
t.lexer.stringbuffer=''
def t_start_comment(t):
r'\(\*'
t.lexer.push_state("COMMENT")
# t.lexer.no_comment = t.lexer.no_comment + 1
def t_COMMENT_end(t):
r'\*\)'
t.lexer.pop_state()
def t_COMMENT_anything(t):
r'.|\n'
# r'[^(\*\))]'
t_COMMENT_ignore=""
def t_COMMENT_error(t):
print("Illegal COMMENT in line no. {0}, character {1}".format(t.lineno,t.value[0]))
def t_STRING_end(t):
r'\"'
if t.lexer.string_backslashed :
t.lexer.stringbuffer += '"'
t.lexer.string_backslashed = False
else:
t.lexer.pop_state()
t.lexer.stringbuffer += ''
t.value = t.lexer.stringbuffer
t.type = "STRING"
return t
def t_STRING_anything(t):
r'.'
if(t=='\n'):
t.lexer.lineno += 1
if not t.lexer.string_backslashed:
dummy=0
else:
t.lexer.string_backslashed = False
else:
if t.lexer.string_backslashed:
if t.value == 'b':
t.lexer.stringbuffer += '\b'
elif t.value == 'n':
t.lexer.stringbuffer += '\n'
elif t.value == 't':
t.lexer.stringbuffer += '\t'
elif t.value == '\\':
t.lexer.stringbuffer += '\\'
elif t.value == 'f':
t.lexer.stringbuffer += '\f'
else:
t.lexer.stringbuffer += t.value
t.lexer.string_backslashed = False
else:
if t.value != '\\':
t.lexer.stringbuffer += t.value
else:
t.lexer.string_backslashed = True
t_STRING_ignore = ''
def t_STRING_error(t):
print("Illegal character in line no. {0}, character {1}".format(t.lineno,t.value[0]))
lexer = lex.lex()
input_file = sys.argv[1]
with open(input_file) as file:
data = file.read()
# Give the lexer some input
lexer.input(data)
collect = []
# Tokenize
while True:
tok = lexer.token()
if not tok:
break
# print(tok)
#TOKEN HAS TYPE AND VALUE
collect.append(tok)
print_list = []
for single_token in tokens:
print_list_element = []
token_list = []
token_str = ""
token_count = 0
for lex_tokens in collect:
if(single_token == lex_tokens.type):
token_count = token_count + 1
if(lex_tokens.value not in token_list):
token_list.append(lex_tokens.value)
if(token_count > 1):
token_str = token_str + ' '
token_str = token_str + str(lex_tokens.value)
#print(token_str)
if(token_count == 0):
continue
if(single_token in reserved.values()):
print_list.append(["Keyword_"+single_token, str(token_count), token_str])
else:
print_list.append([single_token, str(token_count), token_str])
# print(single_token + ':' + str(token_count))
# print(str(token_list))
# print (single_token in reserved.values())
# print("--------------------------------------------")
#print(print_list)
# print("------------------------------------------------------")
header = Table([["Tokens", "Occurences", "Lexemes"]],20,True)
print(header)
print("------------------------------------------------------")
table = Table(print_list, 20, True)
print(table)
| null |
asgn1/src/lexer.py
|
lexer.py
|
py
| 6,289 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ply.lex.lex",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "ply.lex",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "table.Table",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "table.Table",
"line_number": 315,
"usage_type": "call"
}
] |
353080772
|
import logging
def init(options):
logging.addLevelName(5, 'TRACE')
logging.addLevelName(3, 'MICROTRACE')
log_format = '%(asctime)s - [%(module)s] %(levelname)s - %(message)s'
logging.basicConfig(level=logging.getLevelName(options.log_level),
format=log_format, datefmt='%H:%M:%S')
if options.log_file:
log('INFO', 'Writing log to file: %s at level %s', options.log_file, options.log_file_level)
hdlr = logging.FileHandler(options.log_file, mode='w')
hdlr.setLevel(logging.getLevelName(options.log_file_level))
hdlr.setFormatter(logging.Formatter(log_format, datefmt='%H:%M:%S'))
logging.root.addHandler(hdlr)
else:
log('INFO', 'No log file specified.')
def loglevel(as_int=False):
level = logging.root.level
if as_int:
return level
return logging.getLevelName(level)
def is_lower(than, le=True):
if le:
return loglevel(as_int=True) <= logging.getLevelName(than)
return loglevel(as_int=True) < logging.getLevelName(than)
def log(lvl, msg, *args, **kwargs):
logging.log(logging.getLevelName(lvl), msg, *args, **kwargs)
| null |
old/Pipeline/logger.py
|
logger.py
|
py
| 1,162 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.addLevelName",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.addLevelName",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.getLevelName",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.getLevelName",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.root.addHandler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.root",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.root",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "logging.getLevelName",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.getLevelName",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "logging.getLevelName",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "logging.log",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "logging.getLevelName",
"line_number": 35,
"usage_type": "call"
}
] |
473785587
|
__author__ = "Nikhil Bharadwaj Gosala"
__version__ = 'Python 2.7'
import os
from PIL import Image, ExifTags
def preprocessImages(image_path, wm_path):
"Open the images and resize the watermark to the required size"
is_landscape = False
is_potrait = False
wm = {}
#Open the images
try:
image = Image.open(image_path)
except:
print("Please check the path of the image to be watermarked")
raw_input("Press any key to continue.\n")
quit()
for i in range(len(wm_path)):
try:
wm[i] = Image.open(wm_path[i])
except:
print("Please check the path of the watermark")
raw_input("Press any key to continue.\n")
quit()
#Rotate the image if it was in potrait mode. Use the EXIF tags stored by the camera in the JPEG file
if hasattr(image, '_getexif'): # only present in JPEGs
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation]=='Orientation': #Find where the Orientation header is
break
e = image._getexif() # returns None if no EXIF data
if e is not None:
exif=dict(e.items())
if(orientation in exif):
orientation = exif[orientation]
if orientation == 3: image = image.transpose(Image.ROTATE_180)
elif orientation == 6: image = image.transpose(Image.ROTATE_270)
elif orientation == 8: image = image.transpose(Image.ROTATE_90)
wm_width = {}
wm_height = {}
#Get the dimensions of the images
image_width, image_height = image.size
for i in range(len(wm)):
wm_width[i], wm_height[i] = wm[i].size
#Check if the image is a landscape or a potrait image
if image_width >= image_height:
is_landscape = True
else:
is_potrait = True
wm_height_new = {}
wm_width_new = {}
#Get the dimensions of the watermark to 10% of the width(In case of potrait) and 10% of the height(In case of landscape)
for i in range(len(wm)):
if(is_landscape):
wm_height_new[i] = 0.08*image_height
wm_width_new[i] = (wm_height_new[i]/wm_height[i])*wm_width[i]
wm_height[i], wm_width[i] = wm_height_new[i], wm_width_new[i]
elif(is_potrait):
wm_height_new[i] = 0.1*image_width
wm_width_new[i] = (wm_height_new[i]/wm_height[i])*wm_width[i]
wm_height[i], wm_width[i] = wm_height_new[i], wm_width_new[i]
#Resize the watermark
wm[i].thumbnail((wm_width[i], wm_height[i]), Image.ANTIALIAS)
return image, wm
def overlay(image, wm, pos):
"Overlay the watermark over the image"
wm_width = {}
wm_height = {}
overlay = image
image_width, image_height = image.size
for i in range(len(wm)):
wm_width[i], wm_height[i] = wm[i].size
if pos[i] == 'TL':
overlay.paste(wm[i], (40, 40), wm[i])
elif pos[i] == 'TR':
overlay.paste(wm[i], (image_width-wm_width[i]-40, 40), wm[i])
elif pos[i] == 'BL':
overlay.paste(wm[i], (40, image_height-wm_height[i]-40), wm[i])
elif pos[i] == 'BR':
overlay.paste(wm[i],(image_width-wm_width[i]-40, image_height-wm_height[i]-40), wm[i])
#overlay.show()
return overlay
#overlay.save(imagename + "_edit.jpg", "JPEG")
if __name__ == '__main__':
num = int(input("Enter the number of watermarks\n"))
watermark_path = {}
watermark_pos = {}
for i in range(num):
path = raw_input("Enter the path of the watermark\n")
pos = raw_input("Enter location of watermark(TL, TR, BL, BR)\n").upper()
watermark_path[i] = path
watermark_pos[i] = pos
path = os.getcwd()
images = os.listdir(path)
if not os.path.isdir("Watermarked"):
os.mkdir("Watermarked")
for imagename in images:
#if(imagename.endswith('.jpg') or imagename.endswith('.JPG') and not imagename.startswith('w_')):
if not imagename.startswith('w_') and not imagename.endswith('.py') and not imagename.endswith('.exe') and not os.path.isdir(imagename):
print(imagename)
image_path = imagename
image, wm = preprocessImages(os.path.join(path,image_path), watermark_path)
overlay_image = overlay(image,wm,watermark_pos)
save_path = path + '\\Watermarked'
overlay_image.save(os.path.join(save_path,imagename.split('.')[0]) + '_wm.jpg', 'JPEG')
#image_path = input("Enter the path of the image to be watermarked\n")
#watermark_path = input("Enter the path of the watermark\n")
#pos = input("Enter location of watermark(TL, TR, BL, BR)\n").upper()
raw_input("Press any key to continue.\n")
| null |
auto_watermark_multiple_27.py
|
auto_watermark_multiple_27.py
|
py
| 4,825 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "PIL.Image.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "PIL.ExifTags.TAGS.keys",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.ExifTags.TAGS",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "PIL.ExifTags",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "PIL.ExifTags.TAGS",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "PIL.ExifTags",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ROTATE_180",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ROTATE_270",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ROTATE_90",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 121,
"usage_type": "attribute"
}
] |
344098503
|
import os
import functools
from .utils import printProgressBar
from randomness_testsuite.ApproximateEntropy import ApproximateEntropy as aet
from randomness_testsuite.Complexity import ComplexityTest as ct
from randomness_testsuite.CumulativeSum import CumulativeSums as cst
from randomness_testsuite.FrequencyTest import FrequencyTest as ft
from randomness_testsuite.BinaryMatrix import BinaryMatrix as bm
from randomness_testsuite.Matrix import Matrix as mt
from randomness_testsuite.RandomExcursions import RandomExcursions as ret
from randomness_testsuite.RunTest import RunTest as rt
from randomness_testsuite.Serial import Serial as serial
from randomness_testsuite.Spectral import SpectralTest as st
from randomness_testsuite.TemplateMatching import TemplateMatching as tm
from randomness_testsuite.Universal import Universal as ut
def test_all():
path_to_input = './maximos/Binarizados'
test_function = {
0:ft.monobit_test,
1:ft.block_frequency,
2:rt.run_test,
3:rt.longest_one_block_test,
4:mt.binary_matrix_rank_text,
5:st.sepctral_test,
6:tm.non_overlapping_test,
7:tm.overlapping_patterns,
8:ut.statistical_test,
9:ct.linear_complexity_test,
10:serial.serial_test,
11:aet.approximate_entropy_test,
12:cst.cumulative_sums_test,
13:functools.partial(cst.cumulative_sums_test, mode=1),
14:ret.random_excursions_test,
15:ret.variant_test
}
all_results = {}
for dirpath,dirname, files in os.walk(path_to_input):#Direcion de los archivos binarizados
printProgressBar(0, len(files), prefix = 'Progress:', suffix = 'Complete', length = 50)
for i,archivo in enumerate(files):
#print(archivo)
input_file = open(os.path.join(path_to_input,archivo), 'r')#Direcion de los archivos binarizados
data = input_file.read()
results = []
for test in test_function.values():
results.append(test(data))
all_results[f'{archivo}']= results
printProgressBar(i+1, len(files), prefix = 'Progress:', suffix = 'Complete', length = 50)
output = "Archivo;01. Frequency Test (Monobit);02. Frequency Test within a Block;03. Run Test;04. Longest Run of Ones in a Block;05. Binary Matrix Rank Test;06. Discrete Fourier Transform (Spectral) Test;07. Non-Overlapping Template Matching Test;08. Overlapping Template Matching Test;09. Maurer\s Universal Statistical test;10. Linear Complexity Test;11. Serial test;12. Approximate Entropy Test;13. Cummulative Sums (Forward) Test;14. Cummulative Sums (Reverse) Test;15. Random Excursions Test;16. Random Excursions Variant Test\n"
for result in all_results.keys():
resultados = f"{result}"
for rr in all_results[result]:
resultados += f";{rr}"
output += resultados + "\n"
output_file = open(f'resultados_{archivo}.csv', 'w')
output_file.write(output)
output_file.close()
| null |
scripts/test_all.py
|
test_all.py
|
py
| 3,025 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "randomness_testsuite.FrequencyTest.FrequencyTest.monobit_test",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.FrequencyTest.FrequencyTest",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.FrequencyTest.FrequencyTest.block_frequency",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.FrequencyTest.FrequencyTest",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.RunTest.RunTest.run_test",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.RunTest.RunTest",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.RunTest.RunTest.longest_one_block_test",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.RunTest.RunTest",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.Matrix.Matrix.binary_matrix_rank_text",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.Matrix.Matrix",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.Spectral.SpectralTest.sepctral_test",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.Spectral.SpectralTest",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.TemplateMatching.TemplateMatching.non_overlapping_test",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.TemplateMatching.TemplateMatching",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.TemplateMatching.TemplateMatching.overlapping_patterns",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.TemplateMatching.TemplateMatching",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.Universal.Universal.statistical_test",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.Universal.Universal",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.Complexity.ComplexityTest.linear_complexity_test",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.Complexity.ComplexityTest",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.Serial.Serial.serial_test",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.Serial.Serial",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.ApproximateEntropy.ApproximateEntropy.approximate_entropy_test",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.ApproximateEntropy.ApproximateEntropy",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.CumulativeSum.CumulativeSums.cumulative_sums_test",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.CumulativeSum.CumulativeSums",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "randomness_testsuite.CumulativeSum.CumulativeSums.cumulative_sums_test",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.CumulativeSum.CumulativeSums",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.RandomExcursions.RandomExcursions.random_excursions_test",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.RandomExcursions.RandomExcursions",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "randomness_testsuite.RandomExcursions.RandomExcursions.variant_test",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "randomness_testsuite.RandomExcursions.RandomExcursions",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "os.walk",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "utils.printProgressBar",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "utils.printProgressBar",
"line_number": 57,
"usage_type": "call"
}
] |
602877261
|
# ========================================
# [] File Name : network.py
#
# [] Creation Date : Februray 2018
#
# [] Created By : Ali Gholami ([email protected])
# ========================================
"""
Kaggle competition, predicting the survivals of the Titanic!
"""
# coding: utf-8
# Import Pandas and Numpy libraries. These are the main libraries for array computations.
# In[1]:
import pandas as pd
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
# Define the data root to be take train data from.
# In[2]:
DATA_ROOT = './data/'
# Load the train and test data using read_csv function in the Pandas library.
# In[3]:
train_data = pd.read_csv(DATA_ROOT + 'train.csv')
test_data = pd.read_csv(DATA_ROOT + 'test.csv')
# A function to separate the different outputs of each section. This will be used to display the data in the terminal in a readable way.
# In[4]:
def separate_output(str):
'''
Displays an string as an argument in a clear form
'''
SHARP_COUNT = 100
print('\n')
for i in range(SHARP_COUNT):
if(i == SHARP_COUNT-1):
print("#")
else:
print("#", end="")
# Display info at the center
for i in range(int((SHARP_COUNT/2-len(str)/2))):
print("",end=" ")
print(str)
for i in range(SHARP_COUNT):
print("#", end="")
print('\n')
# Find out how the data looks like. This helps us to get an intuition of features inside the datasets. This is done using the shape class member of a Pandas dataframe.
# In[5]:
separate_output("Train/Test Shapes")
print(train_data.shape)
print(test_data.shape)
# This will provide some statistical knowledge about the data. We can observe the mean, variance, max and minimum of data for each feature. This can be used for data normalization and preprocessing. We have used the describe method from the Pandas dataframe class.
# In[6]:
separate_output("General Data Knowledge")
train_data.describe()
# Some features like PassengerId, Name, Ticket, Cavbin and Embarked can be removed from the dataset.
# In[7]:
# These columns will be dropped
DROPPED_COLS = ['PassengerId',
'Ticket',
'Cabin',
'Embarked']
# Drop the PassengerId column
train_data.drop(DROPPED_COLS, axis=1, inplace=True)
# In[8]:
# Get the shape of data
separate_output("Train/Test Shapes -- Dropped 5 Columns")
print(train_data.shape)
print(test_data.shape)
# Let's plot the pie chart of the sex. We want to analyze if the gender affects the survivals or not. At the first glance, it seems that gender could be a good feature for prediction. These plots confirm this idea.
# In[9]:
import matplotlib.pyplot as plt
# Check if the gender affect the survivals
# Plot the figures for male and female
fig = plt.figure(figsize=(8, 4), dpi=120, facecolor='w', edgecolor='k')
fig.canvas.set_window_title("Analaysis of Gender Effect on Survivals")
male_survival = fig.add_subplot(121)
train_data.Survived[train_data['Sex'] == 'male'].value_counts().plot(kind='pie')
male_survival.set_title("Male Survivals")
female_survival = fig.add_subplot(122)
train_data.Survived[train_data['Sex'] == 'female'].value_counts().plot(kind='pie')
female_survival.set_title("Female Survivals")
plt.show()
# Let's check the datatypes to make sure there are no more objects left in the dataset. Objects are representing a text most of the times(Categorical Data). We obtain this using select_dtypes from the Pandas Dataframe class.
# In[10]:
# Let's see if there are any more categorical data left
separate_output("Datatypes")
print(train_data.select_dtypes(include=[object]))
# We use LabelEncoder to convert the categorical data into the numerical form. To do this, simply create an object of the LabelEncoder class and call the fit_transform function on the desired data column in the dataset.
# In[11]:
from sklearn.preprocessing import LabelEncoder, StandardScaler
# Convert the categorical data into numerical form
train_data['Sex'] = LabelEncoder().fit_transform(train_data['Sex'])
# Split the titles from the passenger names which is itself a feature but also help in calculating missing median age values.
# In[12]:
train_data['Name'] = train_data['Name'].map(lambda x: x.split(',')[1].split('.')[0].strip())
titles = train_data['Name'].unique()
titles
# Sneaking into the Age column, we can see there are some NaN numbers. These are called missing values. In order to increase the number of data samples, we need to fill these NaN values with an appropriate values. Fill the NaN values of Age using median values related to its title.
# In[13]:
train_data['Age'].fillna(-1, inplace=True)
medians = dict()
for title in titles:
median = train_data.Age[(train_data["Age"] != -1) & (train_data['Name'] == title)].median()
medians[title] = median
for index, row in train_data.iterrows():
if row['Age'] == -1:
train_data.loc[index, 'Age'] = medians[row['Name']]
train_data.head()
# Before transforming the Name column into the numerical form, we'll be excavating the distribution of our training data with respect to the Names. We will assign the numbers to these Names according to the distribution of each of these titles shown below.
# In[14]:
fig = plt.figure(figsize=(15,6))
i=1
for title in train_data['Name'].unique():
fig.add_subplot(3, 6, i)
plt.title('Title : {}'.format(title))
train_data.Survived[train_data['Name'] == title].value_counts().plot(kind='pie')
i += 1
# In[15]:
REPLACEMENTS = {
'Don': 0,
'Rev': 0,
'Jonkheer': 0,
'Capt': 0,
'Mr': 1,
'Dr': 2,
'Col': 3,
'Major': 3,
'Master': 4,
'Miss': 5,
'Mrs': 6,
'Mme': 7,
'Ms': 7,
'Mlle': 7,
'Sir': 7,
'Lady': 7,
'the Countess': 7
}
train_data['Name'] = train_data['Name'].apply(lambda x: REPLACEMENTS.get(x))
# We can also fill the NaN values of Fare using by its correlation with Ticket Class.
# In[16]:
train_data['Fare'].fillna(-1, inplace=True)
medians = dict()
for pclass in train_data['Pclass'].unique():
median = train_data.Fare[(train_data["Fare"] != -1) & (train_data['Pclass'] == pclass)].median()
medians[pclass] = median
for index, row in train_data.iterrows():
if row['Fare'] == -1:
train_data.loc[index, 'Fare'] = medians[row['Pclass']]
# Plot the distribution of our data with respect to each class of tickets.
# In[17]:
fig = plt.figure(figsize=(15,4))
i=1
for pclass in train_data['Pclass'].unique():
fig.add_subplot(1, 3, i)
plt.title('Class : {}'.format(pclass))
train_data.Survived[train_data['Pclass'] == pclass].value_counts().plot(kind='pie')
i += 1
# The classes are numeric already. Let's analyze the next feature.
# In[18]:
fig = plt.figure(figsize=(15,8))
i = 0
for parch in train_data['Parch'].unique():
fig.add_subplot(2, 4, i+1)
plt.title('Parents / Child : {}'.format(parch))
train_data.Survived[train_data['Parch'] == parch].value_counts().plot(kind='pie')
i += 1
# In[19]:
CP_REPLACEMENTS = {
6: 0,
4: 0,
5: 1,
0: 2,
2: 3,
1: 4,
3: 5
}
train_data['Parch'] = train_data['Parch'].apply(lambda x: CP_REPLACEMENTS.get(x))
# In[20]:
train_data.head()
# Now the data is almost ready to be trained. We can start training using the predefined models in sklearn library. Following models are used in this example.
# In[21]:
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# An array containing models
MODELS = [
MLPClassifier(),
AdaBoostClassifier(),
SVC(),
QuadraticDiscriminantAnalysis(),
GaussianProcessClassifier()
]
# Since the labels for the test_data is not available, we use train_data for both training and testing. We can use the function train_test_split to split 20% of data for test and 80% for training. The actual labels for the training set is first extracted. The Survived column is dropped. Finally, the train_test_split is called on the tranining data with respective labels.
# In[22]:
from sklearn.model_selection import train_test_split
# Split the train and test data
train_labels = train_data['Survived']
train_data.drop('Survived', axis=1, inplace=True)
X_train, X_test, y_train, y_test = train_test_split(train_data, train_labels, test_size=0.2, random_state=42)
# We have used the recall score and the f1 score to represent the performance of each of these classifiers. The fit functon can be called from each classifier object. It is used to train some data on that specific classifier. The predict function returns the predicted labels.
# In[23]:
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
for model in MODELS:
model.fit(X_train, y_train)
prediction = model.predict(X_test)
rscore = recall_score(y_test, prediction)
f1score = f1_score(y_test, prediction)
score = model.score(X_test, y_test)
print(score)
print("Recall: ", rscore)
print("F-1 Score: ", f1score)
| null |
docs/assignment-1/src/network.py
|
network.py
|
py
| 9,256 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "sklearn.neural_network.MLPClassifier",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.AdaBoostClassifier",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "sklearn.gaussian_process.GaussianProcessClassifier",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.recall_score",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 336,
"usage_type": "call"
}
] |
363043962
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 21 14:48:16 2019
@author: zoe
"""
"""
#import matplotlib
from tkinter import *
main=Tk()
main.title('RoboTeam_RRT_GUI') #renames the title of the main window
mainFrame= Frame(main)
mainFrame.pack()
"""
import numpy as np
import random, math
import pygame.locals
import pygame.draw
#import matplotlib.pyplot as plt
#from matplotlib.path import Path
#import matplotlib.patches as patches
"""
class Shortest_Path():
def __init__(self, vertices, start, end):
self.start = start
self.end = end
self.vertices = vertices
self.sptSet = [self.end]
self.dist_from_end = [float("inf")]*vertices.len()
self.dist_from_end =
"""
class Node():
def __init__(self,location,parent):
self.location = location
self.parent= parent
class Boundaries():
def __init__(self,x1,y1,x2,y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
class RRT():
def __init__(self,start,end,min_length,max_length,node_number,boundaries):
self.start= start
self.startNode = Node(start,None)
self.end=end
self.min_length = min_length
self.max_length=max_length
self.node_number = node_number
self.screen_size=(800,800)
self.color = (255,255,0)
self.color1 = (0,0,255)
self.colorRect= (255,0,255)
self.radius = 20
self.nodes = []
self.nodes.append(start)
#self.node_pairs=[]
#self.node_dict = {}
#self.node_dict[self.start] = []
self.boundaries = boundaries
def dist(self,p1,p2):
return math.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)
def find_closest(self, p1):
closest = self.nodes[0]
for node in self.nodes:
if self.dist(node,p1) < self.dist(closest,p1):
closest = node
return closest
def isValid(self,p1):
for i in self.boundaries:
if (p1[0] > i.x1 and p1[0] < i.x2) and (p1[1] > i.y1 and p1[1] < i.y2):
return False
return True
def new_node(self):
x = int(random.randint(0,self.screen_size[0]+1))
y= int(random.randint(0,self.screen_size[1]+1))
nn=(x,y)
closest = self.find_closest(nn)
if self.dist(closest,nn) > self.max_length:
length = self.dist(closest,nn)
x = closest[0]+int(self.max_length*(x-closest[0])/length)
y = closest[1]+int(self.max_length*(y-closest[1])/length)
nn=(x,y)
closest = self.find_closest(nn)
if self.dist(closest,nn) < self.min_length:
length = self.dist(closest,nn)
while length == 0:
nn = self.new_node()
x = closest[0]+int(self.min_length*(x-closest[0])/length)
y = closest[1]+int(self.min_length*(y-closest[1])/length)
if not self.isValid(nn):
closest,nn = self.new_node()
return closest,nn
def main(self):
pygame.init()
#sets screen size
screen = pygame.display.set_mode(self.screen_size)
#sets the title of the screen
pygame.display.set_caption("RRT Algorithm")
#filles the screen with the color white
white = (255,255,255)
screen.fill(white)
#draws a circle around the startpoint
pygame.draw.circle(screen,self.color1,self.start,self.radius)
#draws a circle around the endpoint
pygame.draw.circle(screen, self.color, self.end, self.radius)
for i in self.boundaries:
pygame.draw.rect(screen,self.colorRect,pygame.Rect(i.x1,i.y1,abs(i.x2-i.x1),abs(i.y2-i.y1)))
#pygame.draw.rect(screen,self.colorRect,)
for n in range(self.node_number):
cl, nn = self.new_node()
"""
x = int(random.randint(0,480+1))
y= int(random.randint(0,640+1))
nn=(x,y)
closest = self.find_closest(nn)
if self.dist(closest,nn) > self.max_length:
angle = math.atan((nn[1]-closest[1])/(nn[0]-closest[0]))
x=int(closest[0]+self.max_length*math.cos(angle))
y = int(closest[1]+self.max_length*math.sin(angle))
nn=(x,y)
closest = self.find_closest(nn)
"""
self.nodes.append(nn)
#self.node_pairs.append((cl,nn))
#self.node_dict[cl].append(nn)
#self.node_dict[nn]=[]
red = (255,0,0)
pygame.draw.circle(screen,red,nn,3)
pygame.draw.line(screen,red,cl,nn,2)
#updates the screen when you add a new node
pygame.display.update()
if (self.dist(nn,self.end) <= self.radius):
break
if (pygame.event.poll().type == pygame.QUIT):
break
while True:
# gets a single event from the event queue
event = pygame.event.poll()
# if the 'close' button of the window is pressed
if event.type == pygame.QUIT:
# stops the application
#print(self.node_dict)
break
if __name__ == '__main__':
boundaries = [Boundaries(100,100,200,200), Boundaries(101,202,303,404)]
RRT((375,375),(500,500),5,20,500,boundaries).main()
| null |
RoboTeam_RRT_Gui.py
|
RoboTeam_RRT_Gui.py
|
py
| 5,473 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "math.sqrt",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pygame.locals.init",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pygame.locals",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "pygame.locals.display.set_mode",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pygame.locals.display",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "pygame.locals.display.set_caption",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "pygame.locals.display",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "pygame.locals.draw.circle",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pygame.locals.draw",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "pygame.locals.draw.circle",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "pygame.locals.draw",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "pygame.locals.draw.rect",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pygame.locals.draw",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "pygame.locals.Rect",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pygame.locals.draw.circle",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pygame.locals.draw",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "pygame.locals.draw.line",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pygame.locals.draw",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "pygame.locals.display.update",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "pygame.locals.display",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "pygame.locals.event.poll",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "pygame.locals.event",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "pygame.locals.QUIT",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals.event.poll",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pygame.locals.event",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "pygame.locals.QUIT",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "pygame.locals",
"line_number": 181,
"usage_type": "name"
}
] |
99877906
|
"""
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
from pathlib import Path
import pytest
from source_file.client import Client
HERE = Path(__file__).parent.absolute()
def check_read(config, expected_columns=10, expected_rows=42):
client = Client(**config)
rows = list(client.read())
assert len(rows) == expected_rows
assert len(rows[0]) == expected_columns
@pytest.mark.parametrize(
"provider_name,file_path,file_format",
[
("ssh", "files/test.csv", "csv"),
("scp", "files/test.csv", "csv"),
("sftp", "files/test.csv", "csv"),
("ssh", "files/test.csv.gz", "csv"), # text in binary
("ssh", "files/test.pkl", "pickle"), # binary
("sftp", "files/test.pkl.gz", "pickle"), # binary in binary
],
)
def test__read_from_private_ssh(provider_config, provider_name, file_path, file_format):
client = Client(dataset_name="output", format=file_format, url=file_path, provider=provider_config(provider_name))
result = next(client.read())
assert result == {"header1": "text", "header2": 1, "header3": 0.2}
@pytest.mark.parametrize(
"provider_name,file_path,file_format",
[
("ssh", "files/file_does_not_exist.csv", "csv"),
("gcs", "gs://gcp-public-data-landsat/file_does_not_exist.csv", "csv"),
],
)
def test__read_file_not_found(provider_config, provider_name, file_path, file_format):
client = Client(dataset_name="output", format=file_format, url=file_path, provider=provider_config(provider_name))
with pytest.raises(FileNotFoundError):
next(client.read())
@pytest.mark.parametrize(
"provider_name, file_path, file_format",
[
("ssh", "files/test.csv", "csv"),
("ssh", "files/test.pkl", "pickle"),
("sftp", "files/test.pkl.gz", "pickle"),
],
)
def test__streams_from_ssh_providers(provider_config, provider_name, file_path, file_format):
client = Client(dataset_name="output", format=file_format, url=file_path, provider=provider_config(provider_name))
streams = list(client.streams)
assert len(streams) == 1
assert streams[0].json_schema["properties"] == {
"header1": {"type": "string"},
"header2": {"type": "number"},
"header3": {"type": "number"},
}
@pytest.mark.parametrize(
"storage_provider, url, columns_nb, separator, has_header",
[
# epidemiology csv
("HTTPS", "https://storage.googleapis.com/covid19-open-data/v2/latest/epidemiology.csv", 10, ",", True),
("HTTPS", "storage.googleapis.com/covid19-open-data/v2/latest/epidemiology.csv", 10, ",", True),
("local", "injected by tests", 10, ",", True),
# landsat compressed csv
("GCS", "gs://gcp-public-data-landsat/index.csv.gz", 18, ",", True),
("GCS", "gs://gcp-public-data-landsat/index.csv.gz", 18, ",", True),
# GDELT csv
("S3", "s3://gdelt-open-data/events/20190914.export.csv", 58, "\\t", False),
("S3", "s3://gdelt-open-data/events/20190914.export.csv", 58, "\\t", False),
],
)
def test__read_from_public_provider(download_gcs_public_data, storage_provider, url, columns_nb, separator, has_header):
# inject temp file path that was downloaded by the test as URL
url = download_gcs_public_data if storage_provider == "local" else url
config = {
"format": "csv",
"dataset_name": "output",
"reader_options": json.dumps({"sep": separator, "nrows": 42}),
"provider": {"storage": storage_provider},
"url": url,
}
check_read(config, expected_columns=columns_nb)
def test__read_from_private_gcs(google_cloud_service_credentials, private_google_cloud_file):
config = {
"dataset_name": "output",
"format": "csv",
"url": private_google_cloud_file,
"reader_options": json.dumps({"sep": ",", "nrows": 42}),
"provider": {
"storage": "GCS",
"service_account_json": json.dumps(google_cloud_service_credentials),
},
}
check_read(config)
def test__read_from_private_aws(aws_credentials, private_aws_file):
config = {
"dataset_name": "output",
"format": "csv",
"url": private_aws_file,
"reader_options": json.dumps({"sep": ",", "nrows": 42}),
"provider": {
"storage": "S3",
"aws_access_key_id": aws_credentials["aws_access_key_id"],
"aws_secret_access_key": aws_credentials["aws_secret_access_key"],
},
}
check_read(config)
| null |
airbyte-integrations/connectors/source-file/integration_tests/client_storage_providers_test.py
|
client_storage_providers_test.py
|
py
| 5,557 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "source_file.client.Client",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "source_file.client.Client",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "source_file.client.Client",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "source_file.client.Client",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 138,
"usage_type": "call"
}
] |
607013936
|
#
# github Repo: https://github.com/clejae
# ------------------------------------------ LOAD PACKAGES ---------------------------------------------------#
import os
import time
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
import numpy as np
# ------------------------------------------ DEFINE FUNCTIONS ------------------------------------------------#
def cm2inch(*tupl):
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
def prepareDataFrame(bl_lst, per, sheet, df_descr):
df_lst = []
for b, bl, in enumerate(bl_lst):
df_pth = r"data\tables\crop_sequence_types\{0}\{0}_{1}_{2}.xlsx".format(bl, per, df_descr)
df = pd.read_excel(df_pth, sheet)
t_area = df['SUM'].sum()
for i in range(1, 10):
df[str(i)] = round(df[str(i)] / t_area * 100, 2)
df_lst.append(df)
mt_lst = []
for mt in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']:
for df in df_lst:
df_mt = df[df['MainType'] == mt]
mt_lst.append(df_mt)
cols = ['MainType', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'SUM']
if mt != 'I':
df_dummy = pd.DataFrame(np.zeros((1, 11)), columns=cols)
mt_lst.append(df_dummy)
df_plt = pd.concat(mt_lst)
return df_plt
def plot_grouped_stacked_bars(df_plt, axs, ix, axs_title):
## plot stacked bars
colors = ['#ffd37f','#e69600','#a87000','#d1ff73','#7aab00','#4c7300','#bee8ff','#73b2ff','#004da8']
df_plt[[str(i)for i in range(1,10)]].plot(kind="bar", stacked=True, color=colors, ax = axs[ix], legend=False)
## set x and label them
axs[ix].set_yticks(np.arange(0, 36, step=5))
axs[ix].set_yticklabels(range(0, 36, 5), fontdict={'size': 10})
x_ticks = list(np.arange(0,26,1))
del x_ticks[2::3]
axs[ix].set_xticks(x_ticks)
# axs[ix].set_xticklabels(9 * ["BB","LS"],fontdict={'size': 10})
axs[ix].set_xticklabels(18 * [""],fontdict={'size': 10})
## add y-grid, adjust tick colors, remove frame
axs[ix].grid(b=True, which='major', axis='y', color='grey', linewidth=0.5)
axs[ix].tick_params(axis='x', colors='white', labelcolor ='black')
axs[ix].tick_params(axis='y', colors='grey', labelcolor ='black')
axs[ix].spines['bottom'].set_color('white')
axs[ix].spines['right'].set_visible(False)
axs[ix].spines['left'].set_visible(False)
axs[ix].spines['top'].set_visible(False)
axs[ix].set_axisbelow(True)
## set title of subplot (name of federl state)
axs[ix].set_title(axs_title, loc='center',fontdict={'size':12})# , 'weight':'semibold'})
# ------------------------------------------ START TIME ------------------------------------------------------#
stime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("start: " + stime)
# ------------------------------------------ USER VARIABLES ------------------------------------------------#
wd = r'\\141.20.140.91\SAN_Projects\FORLand\Clemens\\'
out_pth = r"figures\poster\Landscape21 - farm characteristics.png"
# ------------------------------------------ LOAD DATA & PROCESSING ------------------------------------------#
os.chdir(wd)
## define lists to access data and datasheets
## and to provide federal state names for annotation
bl_lst = ['BB','LS']
strata_cattle = [[0, 1], [1, 50000]]
strata_oeko = [[0, 1], [7, 8]]
per = '2012-2018'
name_lst = [ "Conventional", "Organic"] #"Without cattle","With cattle",
## set plotting parameters
plt.rcParams['legend.handlelength'] = 1
plt.rcParams['legend.handleheight'] = 1.125
plt.rcParams['legend.title_fontsize'] = '10'
plt.rcParams["font.family"] = "Calibri"
## Create plot
ncol = 2
nrow = 1
fig, axs = plt.subplots(nrows=nrow, ncols=ncol, sharey=True, figsize=cm2inch(15, 9))
s = 0 # index for subplot (axs) title
# ## Plot cattle vs. no cattle husbandry
# for strat in strata_cattle:
# sheet = 'Collapsed>={}<{}'.format(strat[0], strat[1])
# ix = np.unravel_index(s, axs.shape)
#
# ## Prepare data frame
# df_descr = 'CSTArea-CattleNumbers_binary'
# df_plt = prepareDataFrame(bl_lst, per, sheet, df_descr)
#
# ## Plot stacked bars
# axs_title = name_lst[s]
# plot_grouped_stacked_bars(df_plt, axs, ix, axs_title)
#
# s += 1
## Plot conventional vs. organic
for strat in strata_oeko:
sheet = 'Collapsed>={}<{}'.format(strat[0], strat[1])
ix = np.unravel_index(s, axs.shape)
## Prepare data frame
df_descr = 'CSTArea-Oeko_AFSD'
df_plt = prepareDataFrame(bl_lst, per, sheet, df_descr)
## Plot stacked bars
axs_title = name_lst[s]
plot_grouped_stacked_bars(df_plt, axs, ix, axs_title)
s += 1
# for ix in [(0,0),(0,1)]:
for s in range(2):
ix = np.unravel_index(s, axs.shape)
## set y ticks and label them
axs[ix].set_yticks(np.arange(-5, 36, step=5))
y_labels = [str(i) for i in range(0, 36, 5)]
y_labels.insert(0,'')
axs[ix].set_yticklabels(y_labels, fontdict={'size': 10})
## annotate main types in top subplot
bbox = dict(facecolor='white', edgecolor='black', boxstyle='round')
x = .5
# for mt in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']:
# axs[ix].annotate(mt, xy=(x, -2.2), fontsize=8) # 35.5 -
# x += 3
for mt in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']:
axs[ix].annotate(mt, xy=(x, 35.5), fontsize=10)
x += 4
## create
bbox = dict(facecolor='white', edgecolor='black', boxstyle='round')
xs = list(np.arange(0, 26, 1))
del xs[2::3]
for i in range(0,len(xs),2):
x = xs[i]
label = 'BB'
axs[ix].annotate(label, xy=(x - .4, -2), fontsize=8, rotation=90) # 35.5 -
for j in range(1,len(xs),2):
x = xs[j]
label = 'LS'
axs[ix].annotate(label, xy=(x - .4, -2), fontsize=8, rotation=90)
## Label x-axis in lower axis
axs[0].set_xlabel('Structural diversity', fontdict={'size': 10})
axs[1].set_xlabel('Structural diversity', fontdict={'size': 10})
## label y axis in lower axes
axs[0].set_ylabel('Share of cropland [%]', fontdict={'size': 10})
# axs[(1,0)].set_ylabel('Share of cropland [%]', fontdict={'size': 10})
# pad = 5
# for ax, row in zip(axs[:,0], ['a\n','b\n']):
# ax.annotate(row, xy=(2.0, 1.55), xytext=(-ax.yaxis.labelpad - pad, 0),
# xycoords=ax.yaxis.label, textcoords='offset points',
# size='large', ha='center', va='center', fontsize=12, fontweight = 'semibold')
# create custom legend
legend_elements = [Patch(facecolor='#ffd37f', edgecolor='#ffd37f',
label='1'),
Patch(facecolor='#e69600', edgecolor='#e69600',
label='2'),
Patch(facecolor='#a87000', edgecolor='#a87000',
label='3'),
Patch(facecolor='#d1ff73', edgecolor='#d1ff73',
label='4'),
Patch(facecolor='#7aab00', edgecolor='#7aab00',
label='5'),
Patch(facecolor='#4c7300', edgecolor='#4c7300',
label='6'),
Patch(facecolor='#bee8ff', edgecolor='#bee8ff',
label='7'),
Patch(facecolor='#73b2ff', edgecolor='#73b2ff',
label='8'),
Patch(facecolor='#004da8', edgecolor='#004da8',
label='9')]
fig.legend(handles=legend_elements, loc='lower center', ncol=9, title='Functional diversity', fontsize=8, frameon=False)# bbox_to_anchor= (0.00, 0.00, 0.1, 0.1))
fig.tight_layout()
fig.subplots_adjust(bottom=0.17)
plt.savefig(out_pth, dpi=300)
# ------------------------------------------ END TIME --------------------------------------------------------#
etime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("start: " + stime)
print("end: " + etime)
# ------------------------------------------ UNUSED BUT USEFUL CODE SNIPPETS ---------------------------------#
| null |
figures_in_paper/Landscape21_grouped_stacked_bars_farm_characteristics.py
|
Landscape21_grouped_stacked_bars_farm_characteristics.py
|
py
| 8,095 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_excel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "numpy.unravel_index",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.unravel_index",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 205,
"usage_type": "call"
}
] |
116665284
|
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(name='updater',
version=version,
description="Consistent update for OpenFlow Networks",
long_description="""\
This package provides update commands to satisfy requirement.""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='OpenFlow',
author='Satoshi Yamazaki',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[paste.paster_command]
updater = updater.commands:Updater
""",
)
| null |
updater/setup.py
|
setup.py
|
py
| 820 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 17,
"usage_type": "call"
}
] |
483445797
|
#!python3
"""
Hi there.
This file doesn't contain any code.
It's just here to give an example of the file naming scheme.
Cheers!
"""
import validators
def isurl(link):
value=validators.url(link)
if(value==True):
print("It is a URL")
else:
print("It is not a URL")
def main():
string=str(input("ENTER THE URL TO CHECK:"))
isurl(string)
if __name__ == '__main__':
main()
| null |
December-09/python_Raahul46_IS_THIS_URL.py
|
python_Raahul46_IS_THIS_URL.py
|
py
| 409 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "validators.url",
"line_number": 12,
"usage_type": "call"
}
] |
696818
|
#!/usr/bin/env python
"""
lib/reykjavik/deps.py
A simple dependency manager that does NOT implement atomicity and concurrency.
What we do not implement here is the textbook semaphore behaviour:
* atomic behaviour
* decrementing and blocking
* incrementing and unblocking one waiting thread
* notification of schedulers that threads are blocked
Instead we just mimic half of the behaviour of a classic semaphore that we need here.
We do this to achieve a barebone distributed locking mechanism for our installation containers.
"""
import os
import time
import memcache
class DependencyManager(object):
"""
Uses a central memcached to implement distributed waiting in containers.
"""
def __init__(self):
return
@classmethod
def get_key(cls, server, service):
"""
Returns the memcache key
"""
return "server-{0}-service-{1}".format(server, service)
def unblock(self,
server=os.environ["SERVER"],
service=os.environ["SERVICE"],
barrier=os.environ["BARRIER"]):
"""
Unblocks a barrier for a given server, service and timestamp.
"""
key = self.get_key(server, service)
print("Unblocking [{0}:{1}]".format(key, barrier))
memcache.Client(['memcache:11211'], debug=0).set(key, barrier)
return 0
def block(self,
server=os.environ["SERVER"],
service="220-install",
barrier=os.environ["BARRIER"]):
"""
Blocks until a service becomes available.
"""
key = self.get_key(server, service)
while True:
print("Blocking on [{0}:{1}]".format(key, barrier))
mcx = memcache.Client(['memcache:11211'], debug=0)
value = mcx.get(key)
if value:
if value == barrier:
print("Barrier [{0}:{1}] has been unlocked.".format(key, barrier))
return 0
print("Sleeping.")
time.sleep(10)
| null |
docker/reykjavik/110-library/opt/reykjavik/lib/reykjavik/deps.py
|
deps.py
|
py
| 2,047 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "memcache.Client",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "memcache.Client",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 69,
"usage_type": "call"
}
] |
628385208
|
from django.conf import settings
import numpy as np
import time
def get_arrangement_permutation(
dist,
mode,
model=None,
clusters=None,
init_perm=None):
start_time = time.time()
if mode == "none":
return [i for i in range(dist.shape[0])]
if mode == "hamilton":
from .hamilton_path import HamiltonPath
hp = HamiltonPath(dist, caller=model)
hp.solve()
perm = hp.path
elif mode == "hamilton_annealing":
from .hamilton_path import HamiltonPath
hp = HamiltonPath(dist, caller=model)
hp.solve_annealing()
perm = hp.path
elif mode == "tsne":
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=1, random_state=0, metric="precomputed")
tsne_result = tsne_model.fit_transform(dist).reshape(-1)
perm = np.argsort(tsne_result)
elif mode == "mds":
from sklearn.manifold import MDS
mds = MDS(
n_components=1,
max_iter=3000,
eps=1e-9,
random_state=0,
dissimilarity="precomputed",
n_jobs=4)
result = mds.fit_transform(dist).reshape(-1)
perm = np.argsort(result)
elif mode == "dendro":
from algo.arranging.dendro_arranger import DendroArranger
da = DendroArranger(dist)
perm = da.arrange()
else:
raise ValueError("Unknown mode: %s" % mode)
if model:
from .quality import NDS, MNR
model.NDS = NDS(dist, perm)
model.log("NDS=%f" % model.NDS)
model.log("MNR=%f" % MNR(dist, perm))
model.log("Time=%f" % (time.time() - start_time))
return perm
| null |
algo/arranging/base.py
|
base.py
|
py
| 1,702 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.time",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "hamilton_path.HamiltonPath",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "hamilton_path.HamiltonPath",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.manifold.TSNE",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sklearn.manifold.MDS",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "algo.arranging.dendro_arranger.DendroArranger",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "quality.NDS",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "quality.MNR",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 54,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.