date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | opendreambox/python-coherence | coherence~extern~louie.py | """
Wrapper module for the louie implementation
"""
import warnings
from coherence.dispatcher import Dispatcher
warnings.warn("extern.louie will soon be deprecated in favor of coherence.dispatcher.")
class Any(object): pass
class All(object): pass
class Anonymous(object): pass
# fake the API
class Dummy(object): pass
signal = Dummy()
sender = Dummy()
#senders
sender.Anonymous = Anonymous
sender.Any = Any
#signals
signal.All = All
# a slightly less raise-y-ish implementation as louie was not so picky, too
class GlobalDispatcher(Dispatcher):
def connect(self, signal, callback, *args, **kw):
if not signal in self.receivers:
# ugly hack
self.receivers[signal] = []
return Dispatcher.connect(self, signal, callback, *args, **kw)
def _get_receivers(self, signal):
try:
return self.receivers[signal]
except KeyError:
return []
global _global_dispatcher
_global_dispatcher = GlobalDispatcher()
_global_receivers_pool = {}
def connect(receiver, signal=All, sender=Any, weak=True):
callback = receiver
if signal in (Any, All):
raise NotImplemented("This is not allowed. Signal HAS to be something")
receiver = _global_dispatcher.connect(signal, callback)
_global_receivers_pool["%s%s" %(callback, signal)] = receiver
return receiver
def disconnect(receiver, signal=All, sender=Any, weak=True):
callback = receiver
if signal in (Any, All):
raise NotImplemented("This is not allowed. Signal HAS to be something")
key = "%s%s" %(callback, signal)
if key in _global_receivers_pool:
receiver = _global_receivers_pool.pop(key)
return _global_dispatcher.disconnect(receiver)
else:
print warnings.warn("louie - cannot disconnect %s" %(key,))
return
def send(signal=All, sender=Anonymous, *arguments, **named):
if signal in (Any, All):
raise NotImplemented("This is not allowed. Signal HAS to be something")
# the first value of the callback shall always be the signal:
return _global_dispatcher.save_emit(signal, *arguments, **named)
def send_minimal(signal=All, sender=Anonymous, *arguments, **named):
return send(signal, sender, *arguments, **named)
def send_exact(signal=All, sender=Anonymous, *arguments, **named):
return send(signal, sender, *arguments, **named)
def send_robust(signal=All, sender=Anonymous, *arguments, **named):
return send(signal, sender, *arguments, **named)
| [] |
2024-01-10 | opendreambox/python-coherence | coherence~upnp~devices~binary_light.py | # -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <[email protected]>
from twisted.internet import task
from twisted.internet import reactor
from twisted.web import resource, static
from coherence import __version__
from coherence.extern.et import ET, indent
from coherence.upnp.services.servers.switch_power_server import SwitchPowerServer
from coherence.upnp.devices.basics import RootDeviceXML, DeviceHttpRoot, BasicDeviceMixin
import coherence.extern.louie as louie
from coherence import log
class HttpRoot(DeviceHttpRoot):
logCategory = 'binarylight'
class BinaryLight(log.Loggable,BasicDeviceMixin):
logCategory = 'binarylight'
device_type = 'BinaryLight'
version = 1
def fire(self,backend,**kwargs):
if kwargs.get('no_thread_needed',False) == False:
""" this could take some time, put it in a thread to be sure it doesn't block
as we can't tell for sure that every backend is implemented properly """
from twisted.internet import threads
d = threads.deferToThread(backend, self, **kwargs)
def backend_ready(backend):
self.backend = backend
def backend_failure(x):
self.warning('backend not installed, %s activation aborted' % self.device_type)
self.debug(x)
d.addCallback(backend_ready)
d.addErrback(backend_failure)
# FIXME: we need a timeout here so if the signal we wait for not arrives we'll
# can close down this device
else:
self.backend = backend(self, **kwargs)
def init_complete(self, backend):
if self.backend != backend:
return
self._services = []
self._devices = []
try:
self.switch_power_server = SwitchPowerServer(self)
self._services.append(self.switch_power_server)
except LookupError,msg:
self.warning( 'SwitchPowerServer', msg)
raise LookupError(msg)
upnp_init = getattr(self.backend, "upnp_init", None)
if upnp_init:
upnp_init()
self.web_resource = HttpRoot(self)
self.coherence.add_web_resource( str(self.uuid)[5:], self.web_resource)
version = self.version
while version > 0:
self.web_resource.putChild( 'description-%d.xml' % version,
RootDeviceXML( self.coherence.hostname,
str(self.uuid),
self.coherence.urlbase,
device_type=self.device_type, version=version,
friendly_name=self.backend.name,
model_description='Coherence UPnP %s' % self.device_type,
model_name='Coherence UPnP %s' % self.device_type,
services=self._services,
devices=self._devices,
icons=self.icons))
version -= 1
self.web_resource.putChild('SwitchPower', self.switch_power_server)
for icon in self.icons:
if icon.has_key('url'):
if icon['url'].startswith('file://'):
self.web_resource.putChild(os.path.basename(icon['url']),
static.File(icon['url'][7:]))
self.register()
self.warning("%s %s (%s) activated with %s" % (self.backend.name, self.device_type, self.backend, str(self.uuid)[5:]))
| [] |
2024-01-10 | opendreambox/python-coherence | coherence~upnp~core~DIDLLite.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2005, Tim Potter <[email protected]>
# Copyright 2006, Frank Scholz <[email protected]>
"""
TODO:
- use more XPath expressions in fromElement() methods
"""
import os
import string
import urllib
from datetime import datetime
DC_NS = 'http://purl.org/dc/elements/1.1/'
UPNP_NS = 'urn:schemas-upnp-org:metadata-1-0/upnp/'
DIDL_NS = 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/'
DLNA_NS = 'urn:schemas-dlna-org:metadata-1-0'
my_namespaces = { DC_NS: 'dc',
UPNP_NS: 'upnp'
}
from coherence.extern.et import ET, namespace_map_update, ElementInterface
namespace_map_update(my_namespaces)
from coherence.upnp.core import utils
from coherence.upnp.core import dlna
from coherence import log
def qname(tag,ns=''):
if len(ns) == 0:
return tag
return "{%s}%s" % (ns,tag)
def is_audio(mimetype):
""" checks for type audio,
expects a mimetype or an UPnP
protocolInfo
"""
test = mimetype.split(':')
if len(test) == 4:
mimetype = test[2]
if mimetype == 'application/ogg':
return True
if mimetype.startswith('audio/'):
return True
return False
def is_video(mimetype):
""" checks for type video,
expects a mimetype or an UPnP
protocolInfo
"""
test = mimetype.split(':')
if len(test) == 4:
mimetype = test[2]
if mimetype.startswith('video/'):
return True
return False
class Resources(list):
""" a list of resources, always sorted after an append """
def __init__(self, *args, **kwargs):
list.__init__(self, *args, **kwargs)
self.sort(cmp=self.p_sort)
def append(self, value):
list.append(self,value)
self.sort(cmp=self.p_sort)
def p_sort(self,x,y):
""" we want the following order
http-get is always at the beginning
rtsp-rtp-udp the second
anything else after that
"""
if x.protocolInfo == None:
return 1
if y.protocolInfo == None:
return -1
x_protocol = x.protocolInfo.split(':')[0]
y_protocol = y.protocolInfo.split(':')[0]
x_protocol = x_protocol.lower()
y_protocol = y_protocol.lower()
if( x_protocol == y_protocol):
return 0
if(x_protocol == 'http-get'):
return -1
if(x_protocol == 'rtsp-rtp-udp' and y_protocol == 'http-get'):
return 1
if(x_protocol == 'rtsp-rtp-udp' and y_protocol != 'http-get'):
return -1
return 1
def get_matching(self, local_protocol_infos, protocol_type = None):
result = []
if not isinstance(local_protocol_infos, list):
local_protocol_infos = [local_protocol_infos]
for res in self:
if res.importUri != None:
continue
#print "res", res.protocolInfo, res.data
remote_protocol,remote_network,remote_content_format,_ = res.protocolInfo.split(':')
#print "remote", remote_protocol,remote_network,remote_content_format
if(protocol_type is not None and
remote_protocol.lower() != protocol_type.lower()):
continue
for protocol_info in local_protocol_infos:
local_protocol,local_network,local_content_format,_ = protocol_info.split(':')
#print "local", local_protocol,local_network,local_content_format
if((remote_protocol == local_protocol or
remote_protocol == '*' or
local_protocol == '*') and
(remote_network == local_network or
remote_network == '*' or
local_network == '*') and
(remote_content_format.startswith(local_content_format) or
remote_content_format == '*' or
local_content_format == '*')):
#print result, res
result.append(res)
return result
def classChooser(mimetype, sub=None):
if mimetype == 'root':
return Container
if mimetype == 'item':
return Item
if mimetype == 'directory':
if sub == 'music':
return MusicAlbum
return Container
else:
if string.find (mimetype,'image/') == 0:
return Photo
if string.find (mimetype,'audio/') == 0:
if sub == 'music': # FIXME: this is stupid
return MusicTrack
return AudioItem
if string.find (mimetype,'video/') == 0:
return VideoItem
if mimetype == 'application/ogg':
if sub == 'music': # FIXME: this is stupid
return MusicTrack
return AudioItem
if mimetype == 'application/x-flac':
if sub == 'music': # FIXME: this is stupid
return MusicTrack
return AudioItem
return None
simple_dlna_tags = ['DLNA.ORG_OP=01', # operations parameter
'DLNA.ORG_PS=1', # play speed parameter
'DLNA.ORG_CI=0', # transcoded parameter
'DLNA.ORG_FLAGS=01100000000000000000000000000000']
def build_dlna_additional_info(content_format,does_playcontainer=False):
additional_info = ['*']
if content_format == 'audio/mpeg':
additional_info = ['DLNA.ORG_PN=MP3']+simple_dlna_tags
if content_format == 'audio/ms-wma':
additional_info = ['DLNA.ORG_PN=WMABASE']+simple_dlna_tags
if content_format == 'image/jpeg':
dlna_tags = simple_dlna_tags[:]
dlna_tags[3] = 'DLNA.ORG_FLAGS=00900000000000000000000000000000'
additional_info = ['DLNA.ORG_PN=JPEG_LRG']+dlna_tags
if content_format == 'image/png':
dlna_tags = simple_dlna_tags[:]
dlna_tags[3] = 'DLNA.ORG_FLAGS=00900000000000000000000000000000'
additional_info = ['DLNA.ORG_PN=PNG_LRG']+dlna_tags
if content_format == 'video/mpeg':
additional_info = ['DLNA.ORG_PN=MPEG_PS_PAL']+simple_dlna_tags
if content_format == 'video/mpegts':
additional_info = ['DLNA.ORG_PN=MPEG_TS_PAL']+simple_dlna_tags
content_format = 'video/mpeg'
if content_format in ['video/mp4','video/x-m4a']:
additional_info = ['DLNA.ORG_PN=AVC_TS_BL_CIF15_AAC']+simple_dlna_tags
if content_format in ['video/x-msvideo','video/avi','video/divx']:
#additional_info = ';'.join(['DLNA.ORG_PN=MPEG4_P2_MP4_SP_AAC']+simple_dlna_tags)
additional_info = ['*']
if content_format == 'video/x-ms-wmv':
additional_info = ['DLNA.ORG_PN=WMV_BASE']+simple_dlna_tags
if content_format == '*':
additional_info = simple_dlna_tags
if does_playcontainer == True:
i = 0
for part in additional_info:
if part.startswith('DLNA.ORG_FLAGS'):
_,bits = part.split('=')
bits = int(bits,16)
bits |= 0x10000000000000000000000000000000
additional_info[i] = 'DLNA.ORG_FLAGS=%.32x' % bits
break
i += 1
return ';'.join(additional_info)
class Resource(object):
"""An object representing a resource."""
def __init__(self, data=None, protocolInfo=None):
self.data = data
self.protocolInfo = protocolInfo
self.bitrate = None
self.size = None
self.duration = None
self.nrAudioChannels = None
self.resolution = None
self.importUri = None
if self.protocolInfo is not None:
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
if additional_info == '*':
self.protocolInfo = ':'.join((protocol,network,content_format,build_dlna_additional_info(content_format)))
elif additional_info == '#':
self.protocolInfo = ':'.join((protocol,network,content_format,'*'))
def get_additional_info(self,upnp_client=''):
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
if upnp_client in ('XBox','Philips-TV',):
""" we don't need the DLNA tags there,
and maybe they irritate these poor things anyway
"""
additional_info = '*'
elif upnp_client == 'PLAYSTATION3':
if content_format.startswith('video/'):
additional_info = '*'
elif upnp_client.startswith("Samsung"): #can be 'Samsung' or 'SamsungDMC10'
if content_format == "video/x-matroska": #Samsung uses a wrong mimetype for mkv
content_format = "video/x-mkv"
a_list = additional_info.split(';')
for part in a_list:
if part == 'DLNA.ORG_PS=1':
a_list.remove(part)
break
additional_info = ';'.join(a_list)
return additional_info
def toElement(self,**kwargs):
root = ET.Element('res')
if kwargs.get('upnp_client','') in ('XBox',):
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
if content_format in ['video/divx','video/x-msvideo']:
content_format = 'video/avi'
if content_format == 'audio/x-wav':
content_format = 'audio/wav'
additional_info = self.get_additional_info(upnp_client=kwargs.get('upnp_client',''))
root.attrib['protocolInfo'] = ':'.join((protocol,network,content_format,additional_info))
else:
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
if content_format == 'video/x-msvideo':
content_format = 'video/divx'
additional_info = self.get_additional_info(upnp_client=kwargs.get('upnp_client',''))
root.attrib['protocolInfo'] = ':'.join((protocol,network,content_format,additional_info))
root.text = self.data
if self.bitrate is not None:
root.attrib['bitrate'] = str(self.bitrate)
if self.size is not None:
root.attrib['size'] = str(self.size)
if self.duration is not None:
root.attrib['duration'] = self.duration
if self.nrAudioChannels is not None:
root.attrib['nrAudioChannels'] = self.nrAudioChannels
if self.resolution is not None:
root.attrib['resolution'] = self.resolution
if self.importUri is not None:
root.attrib['importUri'] = self.importUri
return root
def fromElement(self, elt):
self.protocolInfo = elt.attrib['protocolInfo']
self.data = elt.text
self.bitrate = elt.attrib.get('bitrate')
self.size = elt.attrib.get('size')
self.duration = elt.attrib.get('duration',None)
self.resolution = elt.attrib.get('resolution',None)
self.importUri = elt.attrib.get('importUri',None)
def toString(self,**kwargs):
return ET.tostring(self.toElement(**kwargs),encoding='utf-8')
@classmethod
def fromString(cls, aString):
instance = cls()
elt = utils.parse_xml(aString)
#elt = ElementTree(elt)
instance.fromElement(elt.getroot())
return instance
def transcoded(self,format):
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
dlna_tags = simple_dlna_tags[:]
#dlna_tags[1] = 'DLNA.ORG_OP=00'
dlna_tags[2] = 'DLNA.ORG_CI=1'
if format == 'mp3':
if content_format == 'audio/mpeg':
return None
content_format='audio/mpeg'
dlna_pn = 'DLNA.ORG_PN=MP3'
elif format == 'lpcm':
dlna_pn = 'DLNA.ORG_PN=LPCM'
content_format='audio/L16;rate=44100;channels=2'
elif format == 'mpegts':
if content_format == 'video/mpeg':
return None
dlna_pn = 'DLNA.ORG_PN=MPEG_PS_PAL' # 'DLNA.ORG_PN=MPEG_TS_SD_EU' # FIXME - don't forget HD
content_format='video/mpeg'
else:
return None
additional_info = ';'.join([dlna_pn]+dlna_tags)
new_protocol_info = ':'.join((protocol,network,content_format,additional_info))
new_res = Resource(self.data+'/transcoded/%s' % format,
new_protocol_info)
new_res.size = None
new_res.duration = self.duration
new_res.resolution = self.resolution
return new_res
class PlayContainerResource(Resource):
"""An object representing a DLNA playcontainer resource."""
def __init__(self, udn, sid='urn:upnp-org:serviceId:ContentDirectory',
cid=None,
fid=None,
fii=0,
sc='',md=0,
protocolInfo=None):
Resource.__init__(self)
if cid == None:
raise AttributeError('missing Container Id')
if fid == None:
raise AttributeError('missing first Child Id')
self.protocolInfo = protocolInfo
args = ['sid=' + urllib.quote(sid),
'cid=' + urllib.quote(str(cid)),
'fid=' + urllib.quote(str(fid)),
'fii=' + urllib.quote(str(fii)),
'sc=' + urllib.quote(''),
'md=' + urllib.quote(str(0))]
self.data = 'dlna-playcontainer://' + urllib.quote(str(udn)) \
+ '?' + '&'.join(args)
if self.protocolInfo == None:
self.protocolInfo = 'http-get:*:*:*'
class Object(log.Loggable):
"""The root class of the entire content directory class heirachy."""
logCategory = 'didllite'
upnp_class = 'object'
creator = None
res = None
writeStatus = None
date = None
albumArtURI = None
artist = None
genre = None
genres = None
album = None
originalTrackNumber=None
description = None
longDescription = None
refID = None
server_uuid = None
def __init__(self, id=None, parentID=None, title=None, restricted=False,
creator=None):
self.id = id
self.parentID = parentID
self.title = title
self.creator = creator
self.restricted = restricted
self.res = Resources()
def checkUpdate(self):
return self
def toElement(self,**kwargs):
root = ET.Element(self.elementName)
#if self.id == 1000:
# root.attrib['id'] = '0'
# ET.SubElement(root, 'dc:title').text = 'root'
#else:
# root.attrib['id'] = str(self.id)
# ET.SubElement(root, 'dc:title').text = self.title
root.attrib['id'] = str(self.id)
ET.SubElement(root, qname('title',DC_NS)).text = self.title
#if self.title != None:
# ET.SubElement(root, 'dc:title').text = self.title
#else:
# ET.SubElement(root, 'dc:title').text = 'root'
root.attrib['parentID'] = str(self.parentID)
if(kwargs.get('upnp_client','') != 'XBox'):
if self.refID:
root.attrib['refID'] = str(self.refID)
if kwargs.get('requested_id',None):
if kwargs.get('requested_id') == '0':
t = root.find(qname('title',DC_NS))
t.text = 'root'
#if kwargs.get('requested_id') != '0' and kwargs.get('requested_id') != root.attrib['id']:
if kwargs.get('requested_id') != root.attrib['id']:
if(kwargs.get('upnp_client','') != 'XBox'):
root.attrib['refID'] = root.attrib['id']
r_id = kwargs.get('requested_id')
root.attrib['id'] = r_id
r_id = r_id.split('@',1)
try:
root.attrib['parentID'] = r_id[1]
except IndexError:
pass
if(kwargs.get('upnp_client','') != 'XBox'):
self.info("Changing ID from %r to %r, with parentID %r", root.attrib['refID'], root.attrib['id'], root.attrib['parentID'])
else:
self.info("Changing ID from %r to %r, with parentID %r", self.id, root.attrib['id'], root.attrib['parentID'])
elif kwargs.get('parent_container',None):
if(kwargs.get('parent_container') != '0' and
kwargs.get('parent_container') != root.attrib['parentID']):
if(kwargs.get('upnp_client','') != 'XBox'):
root.attrib['refID'] = root.attrib['id']
root.attrib['id'] = '@'.join((root.attrib['id'],kwargs.get('parent_container')))
root.attrib['parentID'] = kwargs.get('parent_container')
if(kwargs.get('upnp_client','') != 'XBox'):
self.info("Changing ID from %r to %r, with parentID from %r to %r", root.attrib['refID'], root.attrib['id'], self.parentID, root.attrib['parentID'])
else:
self.info("Changing ID from %r to %r, with parentID from %r to %r", self.id, root.attrib['id'], self.parentID, root.attrib['parentID'])
ET.SubElement(root, qname('class',UPNP_NS)).text = self.upnp_class
upnp_client = kwargs.get('upnp_client','')
if upnp_client == 'XBox':
u = root.find(qname('class',UPNP_NS))
if(kwargs.get('parent_container',None) != None and
u.text.startswith('object.container')):
if kwargs.get('parent_container') in ('14','15','16'):
u.text = 'object.container.storageFolder'
if self.upnp_class == 'object.container':
u.text = 'object.container.storageFolder'
if self.restricted:
root.attrib['restricted'] = '1'
else:
root.attrib['restricted'] = '0'
if self.creator is not None:
ET.SubElement(root, qname('creator',DC_NS)).text = self.creator
if self.writeStatus is not None:
ET.SubElement(root, qname('writeStatus',UPNP_NS)).text = self.writeStatus
if self.date is not None:
if isinstance(self.date, datetime):
ET.SubElement(root, qname('date',DC_NS)).text = self.date.isoformat()
else:
ET.SubElement(root, qname('date',DC_NS)).text = self.date
else:
ET.SubElement(root, qname('date',DC_NS)).text = utils.datefaker().isoformat()
if self.albumArtURI is not None:
e = ET.SubElement(root, qname('albumArtURI',UPNP_NS))
e.text = self.albumArtURI
if not upnp_client.startswith("Samsung"): #can be 'Samsung' or 'SamsungDMC10'
e.attrib['xmlns:dlna'] = DLNA_NS
e.attrib['dlna:profileID'] = 'JPEG_TN'
if self.artist is not None:
ET.SubElement(root, qname('artist',UPNP_NS)).text = self.artist
if self.genre is not None:
ET.SubElement(root, qname('genre',UPNP_NS)).text = self.genre
if self.genres is not None:
for genre in self.genres:
ET.SubElement(root, qname('genre',UPNP_NS)).text = genre
if self.originalTrackNumber is not None:
ET.SubElement(root, qname('originalTrackNumber',UPNP_NS)).text = str(self.originalTrackNumber)
if self.description is not None:
ET.SubElement(root, qname('description',DC_NS)).text = self.description
if self.longDescription is not None:
ET.SubElement(root, qname('longDescription',UPNP_NS)).text = self.longDescription
if self.server_uuid is not None:
ET.SubElement(root, qname('server_uuid',UPNP_NS)).text = self.server_uuid
return root
def toString(self,**kwargs):
return ET.tostring(self.toElement(**kwargs),encoding='utf-8')
def fromElement(self, elt):
"""
TODO:
* creator
* writeStatus
"""
self.elementName = elt.tag
self.id = elt.attrib.get('id',None)
self.parentID = elt.attrib.get('parentID',None)
self.refID = elt.attrib.get('refID',None)
if elt.attrib.get('restricted',None) in [1,'true','True','1','yes','Yes']:
self.restricted = True
else:
self.restricted = False
for child in elt.getchildren():
if child.tag.endswith('title'):
self.title = child.text
elif child.tag.endswith('albumArtURI'):
self.albumArtURI = child.text
elif child.tag.endswith('originalTrackNumber'):
self.originalTrackNumber = int(child.text)
elif child.tag.endswith('description'):
self.description = child.text
elif child.tag.endswith('longDescription'):
self.longDescription = child.text
elif child.tag.endswith('artist'):
self.artist = child.text
elif child.tag.endswith('genre'):
if self.genre != None:
if self.genres == None:
self.genres = [self.genre,]
self.genres.append(child.text)
self.genre = child.text
elif child.tag.endswith('album'):
self.album = child.text
elif child.tag.endswith('class'):
self.upnp_class = child.text
elif child.tag.endswith('server_uuid'):
self.server_uuid = child.text
elif child.tag.endswith('res'):
res = Resource.fromString(ET.tostring(child))
self.res.append(res)
@classmethod
def fromString(cls, data):
instance = cls()
elt = utils.parse_xml(data)
#elt = ElementTree(elt)
instance.fromElement(elt.getroot())
return instance
class Item(Object):
"""A class used to represent atomic (non-container) content
objects."""
upnp_class = Object.upnp_class + '.item'
elementName = 'item'
refID = None
director = None
actors = None
language = None
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
def toElement(self,**kwargs):
root = Object.toElement(self,**kwargs)
if self.director is not None:
ET.SubElement(root, qname('director',UPNP_NS)).text = self.director
if self.refID is not None:
ET.SubElement(root, 'refID').text = self.refID
if self.actors is not None:
for actor in self.actors:
ET.SubElement(root, qname('actor',DC_NS)).text = actor
#if self.language is not None:
# ET.SubElement(root, qname('language',DC_NS)).text = self.language
if kwargs.get('transcoding',False) == True:
res = self.res.get_matching(['*:*:*:*'], protocol_type='http-get')
if len(res) > 0 and is_audio(res[0].protocolInfo):
old_res = res[0]
if(kwargs.get('upnp_client','') == 'XBox'):
transcoded_res = old_res.transcoded('mp3')
if transcoded_res != None:
root.append(transcoded_res.toElement(**kwargs))
else:
root.append(old_res.toElement(**kwargs))
else:
for res in self.res:
root.append(res.toElement(**kwargs))
transcoded_res = old_res.transcoded('lpcm')
if transcoded_res != None:
root.append(transcoded_res.toElement(**kwargs))
elif len(res) > 0 and is_video(res[0].protocolInfo):
old_res = res[0]
for res in self.res:
root.append(res.toElement(**kwargs))
transcoded_res = old_res.transcoded('mpegts')
if transcoded_res != None:
root.append(transcoded_res.toElement(**kwargs))
else:
for res in self.res:
root.append(res.toElement(**kwargs))
else:
for res in self.res:
root.append(res.toElement(**kwargs))
return root
def fromElement(self, elt):
Object.fromElement(self, elt)
for child in elt.getchildren():
if child.tag.endswith('refID'):
self.refID = child.text
elif child.tag.endswith('director'):
self.director = child.text
class ImageItem(Item):
upnp_class = Item.upnp_class + '.imageItem'
rating = None
storageMedium = None
publisher = None
rights = None
def toElement(self,**kwargs):
root = Item.toElement(self,**kwargs)
if self.rating is not None:
ET.SubElement(root, qname('rating',UPNP_NS)).text = str(self.rating)
if self.storageMedium is not None:
ET.SubElement(root, qname('storageMedium',UPNP_NS)).text = self.storageMedium
if self.publisher is not None:
ET.SubElement(root, qname('publisher',DC_NS)).text = self.contributor
if self.rights is not None:
ET.SubElement(root, qname('rights',DC_NS)).text = self.rights
return root
class Photo(ImageItem):
upnp_class = ImageItem.upnp_class + '.photo'
album = None
def toElement(self,**kwargs):
root = ImageItem.toElement(self,**kwargs)
if self.album is not None:
ET.SubElement(root, qname('album',UPNP_NS)).text = self.album
return root
class AudioItem(Item):
"""A piece of content that when rendered generates some audio."""
upnp_class = Item.upnp_class + '.audioItem'
publisher = None
language = None
relation = None
rights = None
valid_keys = ['genre', 'description', 'longDescription', 'publisher',
'language', 'relation', 'rights', 'albumArtURI']
#@dlna.AudioItem
def toElement(self,**kwargs):
root = Item.toElement(self,**kwargs)
if self.publisher is not None:
ET.SubElement(root, qname('publisher',DC_NS)).text = self.publisher
if self.language is not None:
ET.SubElement(root, qname('language',DC_NS)).text = self.language
if self.relation is not None:
ET.SubElement(root, qname('relation',DC_NS)).text = self.relation
if self.rights is not None:
ET.SubElement(root, qname('rights',DC_NS)).text = self.rights
return root
def fromElement(self, elt):
Item.fromElement(self, elt)
for child in elt.getchildren():
tag = child.tag
val = child.text
if tag in self.valid_keys:
setattr(self, tag, val)
class MusicTrack(AudioItem):
"""A discrete piece of audio that should be interpreted as music."""
upnp_class = AudioItem.upnp_class + '.musicTrack'
album = None
playlist = None
storageMedium = None
contributor = None
def toElement(self,**kwargs):
root = AudioItem.toElement(self,**kwargs)
if self.album is not None:
ET.SubElement(root, qname('album',UPNP_NS)).text = self.album
if self.playlist is not None:
ET.SubElement(root, qname('playlist',UPNP_NS)).text = self.playlist
if self.storageMedium is not None:
ET.SubElement(root, qname('storageMedium',UPNP_NS)).text = self.storageMedium
if self.contributor is not None:
ET.SubElement(root, qname('contributor',DC_NS)).text = self.contributor
return root
class AudioBroadcast(AudioItem):
upnp_class = AudioItem.upnp_class + '.audioBroadcast'
class AudioBook(AudioItem):
upnp_class = AudioItem.upnp_class + '.audioBook'
class VideoItem(Item):
upnp_class = Item.upnp_class + '.videoItem'
valid_attrs = dict(genre=UPNP_NS, longDescription=UPNP_NS,
producer=UPNP_NS, rating=UPNP_NS,
actor=UPNP_NS, director=UPNP_NS,
description=DC_NS, publisher=DC_NS, language=DC_NS,
relation=DC_NS)
def toElement(self,**kwargs):
root = Item.toElement(self,**kwargs)
for attr_name, ns in self.valid_attrs.iteritems():
value = getattr(self, attr_name, None)
if value:
ET.SubElement(root, qname(attr_name, ns)).text = value
return root
def fromElement(self, elt):
Item.fromElement(self, elt)
for child in elt.getchildren():
tag = child.tag
val = child.text
if tag in self.valid_attrs.keys():
setattr(self, tag, val)
class Movie(VideoItem):
upnp_class = VideoItem.upnp_class + '.movie'
def __init__(self, *args, **kwargs):
VideoItem.__init__(self, *args, **kwargs)
self.valid_attrs.update(dict(storageMedium=UPNP_NS, DVDRegionCode=UPNP_NS,
channelName=UPNP_NS, scheduledStartTime=UPNP_NS,
sccheduledEndTime=UPNP_NS))
class VideoBroadcast(VideoItem):
upnp_class = VideoItem.upnp_class + '.videoBroadcast'
class MusicVideoClip(VideoItem):
upnp_class = VideoItem.upnp_class + '.musicVideoClip'
class PlaylistItem(Item):
upnp_class = Item.upnp_class + '.playlistItem'
class TextItem(Item):
upnp_class = Item.upnp_class + '.textItem'
class Container(Object):
"""An object that can contain other objects."""
upnp_class = Object.upnp_class + '.container'
elementName = 'container'
childCount = None
createClass = None
searchable = None
def __init__(self, id=None, parentID=None, title=None,
restricted = False, creator = None):
Object.__init__(self, id, parentID, title, restricted, creator)
self.searchClass = []
def toElement(self,**kwargs):
root = Object.toElement(self,**kwargs)
if self.childCount is not None:
root.attrib['childCount'] = str(self.childCount)
if self.createClass is not None:
ET.SubElement(root, qname('createclass',UPNP_NS)).text = self.createClass
if not isinstance(self.searchClass, (list, tuple)):
self.searchClass = [self.searchClass]
for i in self.searchClass:
sc = ET.SubElement(root, qname('searchClass',UPNP_NS))
sc.attrib['includeDerived'] = '1'
sc.text = i
if self.searchable is not None:
if self.searchable in (1, '1', True, 'true', 'True'):
root.attrib['searchable'] = '1'
else:
root.attrib['searchable'] = '0'
for res in self.res:
root.append(res.toElement(**kwargs))
return root
def fromElement(self, elt):
Object.fromElement(self, elt)
v = elt.attrib.get('childCount',None)
if v is not None:
self.childCount = int(v)
#self.searchable = int(elt.attrib.get('searchable','0'))
self.searchable = elt.attrib.get('searchable','0') in [1,'True','true','1']
self.searchClass = []
for child in elt.getchildren():
if child.tag.endswith('createclass'):
self.createClass = child.text
elif child.tag.endswith('searchClass'):
self.searchClass.append(child.text)
class Music(Container):
upnp_class = Container.upnp_class + '.music'
class Person(Container):
upnp_class = Container.upnp_class + '.person'
class MusicArtist(Person):
upnp_class = Person.upnp_class + '.musicArtist'
class PlaylistContainer(Container):
upnp_class = Container.upnp_class + '.playlistContainer'
class Album(Container):
upnp_class = Container.upnp_class + '.album'
class MusicAlbum(Album):
upnp_class = Album.upnp_class + '.musicAlbum'
class PhotoAlbum(Album):
upnp_class = Album.upnp_class + '.photoAlbum'
class Genre(Container):
upnp_class = Container.upnp_class + '.genre'
class MusicGenre(Genre):
upnp_class = Genre.upnp_class + '.musicGenre'
class MovieGenre(Genre):
upnp_class = Genre.upnp_class + '.movieGenre'
class StorageSystem(Container):
upnp_class = Container.upnp_class + '.storageSystem'
class StorageVolume(Container):
upnp_class = Container.upnp_class + '.storageVolume'
class StorageFolder(Container):
upnp_class = Container.upnp_class + '.storageFolder'
class DIDLElement(ElementInterface,log.Loggable):
logCategory = 'didllite'
def __init__(self, upnp_client='',
parent_container=None,requested_id=None,
transcoding=False):
ElementInterface.__init__(self, 'DIDL-Lite', {"xmlns" : DIDL_NS})
self._items = []
self.upnp_client = upnp_client
self.parent_container = parent_container
self.requested_id = requested_id
self.transcoding = transcoding
def addContainer(self, id, parentID, title, restricted = False):
e = Container(id, parentID, title, restricted, creator = '')
self.append(e.toElement())
def addItem(self, item):
self.append(item.toElement(upnp_client=self.upnp_client,
parent_container=self.parent_container,
requested_id=self.requested_id,
transcoding=self.transcoding))
self._items.append(item)
def rebuild(self):
self._children = []
for item in self._items:
self.append(item.toElement(upnp_client=self.upnp_client,
parent_container=self.parent_container,
requested_id=self.requested_id,
transcoding=self.transcoding))
def numItems(self):
return len(self)
def getItems(self):
return self._items
def toString(self):
""" sigh - having that optional preamble here
breaks some of the older ContentDirectoryClients
"""
#preamble = """<?xml version="1.0" encoding="utf-8"?>"""
#return preamble + ET.tostring(self,encoding='utf-8')
return ET.tostring(self,encoding='utf-8')
def get_upnp_class(self,name):
try:
return upnp_classes[name]()
except KeyError:
self.warning("upnp_class %r not found, trying fallback", name)
parts = name.split('.')
parts.pop()
while len(parts) > 1:
try:
return upnp_classes['.'.join(parts)]()
except KeyError:
parts.pop()
self.warning("WTF - no fallback for upnp_class %r found ?!?", name)
return None
@classmethod
def fromString(cls, aString):
instance = cls()
elt = utils.parse_xml(aString, 'utf-8')
elt = elt.getroot()
for node in elt.getchildren():
upnp_class_name = node.findtext('{%s}class' % 'urn:schemas-upnp-org:metadata-1-0/upnp/')
if upnp_class_name is None:
#Samsung sends something like <sec:deviceFriendlyName>...</sec:deviceFriendlyName>
#simply ignore that
continue
upnp_class = instance.get_upnp_class(upnp_class_name.strip())
new_node = upnp_class.fromString(ET.tostring(node))
instance.addItem(new_node)
return instance
def element_to_didl(item):
""" a helper method to create a DIDLElement out of one ET element
or XML fragment string
"""
if not isinstance(item,basestring):
item = ET.tostring(item)
didl = """<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:dlna="urn:schemas-dlna-org:metadata-1-0"
xmlns:pv="http://www.pv.com/pvns/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/">""" \
+ item + \
"""</DIDL-Lite>"""
return didl
upnp_classes = {'object': Object,
'object.item': Item,
'object.item.imageItem': ImageItem,
'object.item.imageItem.photo': Photo,
'object.item.audioItem': AudioItem,
'object.item.audioItem.musicTrack': MusicTrack,
'object.item.audioItem.audioBroadcast': AudioBroadcast,
'object.item.audioItem.audioBook': AudioBook,
'object.item.videoItem': VideoItem,
'object.item.videoItem.movie': Movie,
'object.item.videoItem.videoBroadcast': VideoBroadcast,
'object.item.videoItem.musicVideoClip': MusicVideoClip,
'object.item.playlistItem': PlaylistItem,
'object.item.textItem': TextItem,
'object.container': Container,
'object.container.music': Music,
'object.container.person': Person,
'object.container.person.musicArtist': MusicArtist,
'object.container.playlistContainer': PlaylistContainer,
'object.container.album': Album,
'object.container.album.musicAlbum': MusicAlbum,
'object.container.album.photoAlbum': PhotoAlbum,
'object.container.genre': Genre,
'object.container.genre.musicGenre': MusicGenre,
'object.container.genre.movieGenre': MovieGenre,
'object.container.storageSystem': StorageSystem,
'object.container.storageVolume': StorageVolume,
'object.container.storageFolder': StorageFolder,
}
if __name__ == '__main__':
res = Resources()
res.append(Resource('1','file:*:*:*'))
res.append(Resource('2','rtsp-rtp-udp:*:*:*'))
res.append(Resource('3',None))
res.append(Resource('4','internal:*:*:*'))
res.append(Resource('5','http-get:*:*:*'))
res.append(Resource('6','something:*:*:*'))
res.append(Resource('7','http-get:*:*:*'))
for r in res:
print r.data, r.protocolInfo
| [] |
2024-01-10 | opendreambox/python-coherence | coherence~upnp~services~servers~av_transport_server.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006, Frank Scholz <[email protected]>
# AVTransport service
from twisted.web import resource
from coherence.upnp.core.soap_service import UPnPPublisher
from coherence.upnp.core import service
class AVTransportControl(service.ServiceControl,UPnPPublisher):
def __init__(self, server):
self.service = server
self.variables = server.get_variables()
self.actions = server.get_actions()
class AVTransportServer(service.ServiceServer, resource.Resource):
def __init__(self, device, backend=None):
self.device = device
if backend == None:
backend = self.device.backend
resource.Resource.__init__(self)
service.ServiceServer.__init__(self, 'AVTransport', self.device.version, backend)
self.control = AVTransportControl(self)
self.putChild(self.scpd_url, service.scpdXML(self))
self.putChild(self.control_url, self.control)
def listchilds(self, uri):
cl = ''
for c in self.children:
cl += '<li><a href=%s/%s>%s</a></li>' % (uri,c,c)
return cl
def render(self,request):
return '<html><p>root of the AVTransport</p><p><ul>%s</ul></p></html>'% self.listchilds(request.uri)
| [] |
2024-01-10 | opendreambox/python-coherence | coherence~backends~elisa_storage.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006, Frank Scholz <[email protected]>
import re
from twisted.spread import pb
from twisted.internet import reactor
from twisted.python import failure
from coherence.upnp.core.DIDLLite import classChooser, Container, Resource, DIDLElement
from coherence.upnp.core.soap_service import errorCode
import coherence.extern.louie as louie
from coherence.extern.simple_plugin import Plugin
class ElisaMediaStore(Plugin):
""" this is a backend to the Elisa Media DB
Elisa needs to expose two methods
get_root_id(media_type)
if media_type == '*'
this returns the root id of the media collection
if media_type == 'audio'
this returns the root id of the audio collection
get_item_by_id(id)
this returns a dict with the following keys:
id = id in the media db
parent_id = parent_id in the media db
name = title, album name or basename
mimetype = 'directory' or real mimetype
children = list of objects for which this item is the parent
location = filesystem path if item is a file
cover = url by which the cover image can be retrieved (OPTIONAL)
size = in bytes (OPTIONAL)
"""
implements = ['MediaServer']
def __init__(self, server, **kwargs):
self.name = kwargs.get('name','Elisa')
self.host = kwargs.get('host','127.0.0.1')
self.urlbase = kwargs.get('urlbase','')
ignore_patterns = kwargs.get('ignore_patterns',[])
if self.urlbase[len(self.urlbase)-1] != '/':
self.urlbase += '/'
self.server = server
self.update_id = 0
self.root_id = 0
self.get_root_id()
def __repr__(self):
return "Elisa storage"
def get_store(self):
factory = pb.PBClientFactory()
factory.noisy = False
reactor.connectTCP(self.host, 8789, factory)
return factory.getRootObject()
def get_by_id(self,id):
try:
return self.store[int(id)]
except:
return None
def set_root_id( self, id):
self.root_id = id
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def get_root_id( self, media_type='audio'):
""" ask Elisa to tell us the id of the top item
representing the media_type == 'something' collection """
store = self.get_store()
dfr = store.addCallback(lambda object:
object.callRemote('get_cache_manager'))
dfr.addCallback(lambda cache_mgr:
cache_mgr.callRemote("get_media_root_id", media_type))
dfr.addCallback(self.set_root_id)
def upnp_init(self):
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['internal:%s:*:*' % self.host,
'http-get:*:audio/mpeg:*'])
def upnp_Browse(self, *args, **kwargs):
ObjectID = kwargs['ObjectID']
BrowseFlag = kwargs['BrowseFlag']
Filter = kwargs['Filter']
StartingIndex = int(kwargs['StartingIndex'])
RequestedCount = int(kwargs['RequestedCount'])
SortCriteria = kwargs['SortCriteria']
def build_upnp_item(elisa_item):
UPnPClass = classChooser(elisa_item['mimetype'])
upnp_item = None
if UPnPClass:
upnp_item = UPnPClass(elisa_item['id'],
elisa_item['parent_id'],
elisa_item['name'])
if isinstance(upnp_item, Container):
upnp_item.childCount = len(elisa_item.get('children',[]))
if len(Filter) > 0:
upnp_item.searchable = True
upnp_item.searchClass = ('object',)
else:
internal_url = elisa_item['location'].get('internal')
external_url = elisa_item['location'].get('external')
try:
size = elisa_item['size']
except:
size = None
try:
cover = elisa_item['cover']
if cover != '':
upnp_item.albumArtURI = cover
except:
pass
res = Resource(internal_url,
'internal:%s:*:*' %self.host)
res.size = size
upnp_item.res.append(res)
res = Resource(external_url,
'http-get:*:%s:*' % elisa_item['mimetype'])
res.size = size
upnp_item.res.append(res)
return upnp_item
def got_result(elisa_item):
didl = DIDLElement()
children = elisa_item.get('children',[])
if BrowseFlag == 'BrowseDirectChildren':
if RequestedCount == 0:
childs = children[StartingIndex:]
else:
childs = children[StartingIndex:StartingIndex+RequestedCount]
for child in childs:
if child is not None:
item = build_upnp_item(child)
if item:
didl.addItem(item)
total = len(children)
elif elisa_item:
item = build_upnp_item(elisa_item)
if item:
didl.addItem(item)
total = 1
r = { 'Result': didl.toString(), 'TotalMatches': total,
'NumberReturned': didl.numItems()}
if hasattr(elisa_item, 'update_id'):
r['UpdateID'] = item.update_id
else:
r['UpdateID'] = self.update_id
return r
def errback(r):
return failure.Failure(errorCode(701))
id = ObjectID
if id == 0:
id = self.root_id
store = self.get_store()
dfr = store.addCallback(lambda object:
object.callRemote('get_cache_manager'))
dfr.addErrback(errback)
dfr.addCallback(lambda cache_mgr:
cache_mgr.callRemote("get_media_node_with_id", id))
dfr.addCallback(got_result)
return dfr
if __name__ == '__main__':
def main():
p = 'localhost'
def got_result(result):
print result
f = MediaStore(None,'my media',p, 'http://localhost/',())
dfr = f.upnp_Browse(BrowseFlag='BrowseDirectChildren',
RequestedCount=0,
StartingIndex=0,
ObjectID=23,
SortCriteria='*',
Filter='')
dfr.addCallback(got_result)
dfr.addCallback(lambda _: reactor.stop())
reactor.callLater(0.1, main)
reactor.run()
| [] |
2024-01-10 | opendreambox/python-coherence | coherence~upnp~services~servers~content_directory_server.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2005, Tim Potter <[email protected]>
# Copyright 2006 John-Mark Gurney <[email protected]>
# Copyright 2006, Frank Scholz <[email protected]>
# Content Directory service
from twisted.python import failure
from twisted.web import resource
from twisted.internet import defer
from coherence.upnp.core.soap_service import UPnPPublisher
from coherence.upnp.core.soap_service import errorCode
from coherence.upnp.core.DIDLLite import DIDLElement
from coherence.upnp.core import service
from coherence import log
class ContentDirectoryControl(service.ServiceControl,UPnPPublisher):
def __init__(self, server):
self.service = server
self.variables = server.get_variables()
self.actions = server.get_actions()
class ContentDirectoryServer(service.ServiceServer, resource.Resource,
log.Loggable):
logCategory = 'content_directory_server'
def __init__(self, device, backend=None,transcoding=False):
self.device = device
self.transcoding=transcoding
if backend == None:
backend = self.device.backend
resource.Resource.__init__(self)
service.ServiceServer.__init__(self, 'ContentDirectory', self.device.version, backend)
self.control = ContentDirectoryControl(self)
self.putChild('scpd.xml', service.scpdXML(self, self.control))
self.putChild('control', self.control)
self.set_variable(0, 'SystemUpdateID', 0)
self.set_variable(0, 'ContainerUpdateIDs', '')
def listchilds(self, uri):
cl = ''
for c in self.children:
cl += '<li><a href=%s/%s>%s</a></li>' % (uri,c,c)
return cl
def render(self,request):
return '<html><p>root of the ContentDirectory</p><p><ul>%s</ul></p></html>'% self.listchilds(request.uri)
def upnp_Search(self, *args, **kwargs):
ContainerID = kwargs['ContainerID']
Filter = kwargs['Filter']
StartingIndex = int(kwargs['StartingIndex'])
RequestedCount = int(kwargs['RequestedCount'])
SortCriteria = kwargs['SortCriteria']
SearchCriteria = kwargs['SearchCriteria']
total = 0
root_id = 0
item = None
items = []
parent_container = str(ContainerID)
didl = DIDLElement(upnp_client=kwargs.get('X_UPnPClient', ''),
parent_container=parent_container,
transcoding=self.transcoding)
def build_response(tm):
r = {'Result': didl.toString(), 'TotalMatches': tm,
'NumberReturned': didl.numItems()}
if hasattr(item, 'update_id'):
r['UpdateID'] = item.update_id
elif hasattr(self.backend, 'update_id'):
r['UpdateID'] = self.backend.update_id # FIXME
else:
r['UpdateID'] = 0
return r
def got_error(r):
return r
def process_result(result,total=None,found_item=None):
if result == None:
result = []
l = []
def process_items(result, tm):
if result == None:
result = []
for i in result:
if i[0] == True:
didl.addItem(i[1])
return build_response(tm)
for i in result:
d = defer.maybeDeferred( i.get_item)
l.append(d)
if found_item != None:
def got_child_count(count):
dl = defer.DeferredList(l)
dl.addCallback(process_items, count)
return dl
d = defer.maybeDeferred(found_item.get_child_count)
d.addCallback(got_child_count)
return d
elif total == None:
total = item.get_child_count()
dl = defer.DeferredList(l)
dl.addCallback(process_items, total)
return dl
def proceed(result):
if(kwargs.get('X_UPnPClient', '') == 'XBox' and
hasattr(result, 'get_artist_all_tracks')):
d = defer.maybeDeferred( result.get_artist_all_tracks, StartingIndex, StartingIndex + RequestedCount)
else:
d = defer.maybeDeferred( result.get_children, StartingIndex, StartingIndex + RequestedCount)
d.addCallback(process_result,found_item=result)
d.addErrback(got_error)
return d
try:
root_id = ContainerID
except:
pass
wmc_mapping = getattr(self.backend, "wmc_mapping", None)
if kwargs.get('X_UPnPClient', '') == 'XBox':
if(wmc_mapping != None and
wmc_mapping.has_key(ContainerID)):
""" fake a Windows Media Connect Server
"""
root_id = wmc_mapping[ContainerID]
if callable(root_id):
item = root_id()
if item is not None:
if isinstance(item, list):
total = len(item)
if int(RequestedCount) == 0:
items = item[StartingIndex:]
else:
items = item[StartingIndex:StartingIndex+RequestedCount]
return process_result(items,total=total)
else:
if isinstance(item,defer.Deferred):
item.addCallback(proceed)
return item
else:
return proceed(item)
item = self.backend.get_by_id(root_id)
if item == None:
return process_result([],total=0)
if isinstance(item,defer.Deferred):
item.addCallback(proceed)
return item
else:
return proceed(item)
item = self.backend.get_by_id(root_id)
if item == None:
return failure.Failure(errorCode(701))
if isinstance(item,defer.Deferred):
item.addCallback(proceed)
return item
else:
return proceed(item)
def upnp_Browse(self, *args, **kwargs):
try:
ObjectID = kwargs['ObjectID']
except:
self.debug("hmm, a Browse action and no ObjectID argument? An XBox maybe?")
try:
ObjectID = kwargs['ContainerID']
except:
ObjectID = 0
BrowseFlag = kwargs['BrowseFlag']
Filter = kwargs['Filter']
StartingIndex = int(kwargs['StartingIndex'])
RequestedCount = int(kwargs['RequestedCount'])
SortCriteria = kwargs['SortCriteria']
parent_container = None
requested_id = None
item = None
total = 0
items = []
if BrowseFlag == 'BrowseDirectChildren':
parent_container = str(ObjectID)
else:
requested_id = str(ObjectID)
self.info("upnp_Browse request %r %r %r %r", ObjectID, BrowseFlag, StartingIndex, RequestedCount)
didl = DIDLElement(upnp_client=kwargs.get('X_UPnPClient', ''),
requested_id=requested_id,
parent_container=parent_container,
transcoding=self.transcoding)
def got_error(r):
return r
def process_result(result,total=None,found_item=None):
if result == None:
result = []
if BrowseFlag == 'BrowseDirectChildren':
l = []
def process_items(result, tm):
if result == None:
result = []
for i in result:
if i[0] == True:
didl.addItem(i[1])
return build_response(tm)
for i in result:
d = defer.maybeDeferred( i.get_item)
l.append(d)
if found_item != None:
def got_child_count(count):
dl = defer.DeferredList(l)
dl.addCallback(process_items, count)
return dl
d = defer.maybeDeferred(found_item.get_child_count)
d.addCallback(got_child_count)
return d
elif total == None:
total = item.get_child_count()
dl = defer.DeferredList(l)
dl.addCallback(process_items, total)
return dl
else:
didl.addItem(result)
total = 1
return build_response(total)
def build_response(tm):
r = {'Result': didl.toString(), 'TotalMatches': tm,
'NumberReturned': didl.numItems()}
if hasattr(item, 'update_id'):
r['UpdateID'] = item.update_id
elif hasattr(self.backend, 'update_id'):
r['UpdateID'] = self.backend.update_id # FIXME
else:
r['UpdateID'] = 0
return r
def proceed(result):
if BrowseFlag == 'BrowseDirectChildren':
d = defer.maybeDeferred( result.get_children, StartingIndex, StartingIndex + RequestedCount)
else:
d = defer.maybeDeferred( result.get_item)
d.addCallback(process_result,found_item=result)
d.addErrback(got_error)
return d
root_id = ObjectID
wmc_mapping = getattr(self.backend, "wmc_mapping", None)
if(kwargs.get('X_UPnPClient', '') == 'XBox' and
wmc_mapping != None and
wmc_mapping.has_key(ObjectID)):
""" fake a Windows Media Connect Server
"""
root_id = wmc_mapping[ObjectID]
if callable(root_id):
item = root_id()
if item is not None:
if isinstance(item, list):
total = len(item)
if int(RequestedCount) == 0:
items = item[StartingIndex:]
else:
items = item[StartingIndex:StartingIndex+RequestedCount]
return process_result(items,total=total)
else:
if isinstance(item,defer.Deferred):
item.addCallback(proceed)
return item
else:
return proceed(item)
item = self.backend.get_by_id(root_id)
if item == None:
return process_result([],total=0)
if isinstance(item,defer.Deferred):
item.addCallback(proceed)
return item
else:
return proceed(item)
item = self.backend.get_by_id(root_id)
if item == None:
return failure.Failure(errorCode(701))
if isinstance(item,defer.Deferred):
item.addCallback(proceed)
return item
else:
return proceed(item)
| [] |
2024-01-10 | opendreambox/python-coherence | coherence~extern~telepathy~tube.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009 Philippe Normand <[email protected]>
from telepathy.interfaces import CHANNEL_INTERFACE, CONNECTION_INTERFACE_REQUESTS, \
CHANNEL_TYPE_DBUS_TUBE, ACCOUNT
from telepathy.constants import CONNECTION_HANDLE_TYPE_ROOM, \
SOCKET_ACCESS_CONTROL_CREDENTIALS
from coherence.extern.telepathy.client import Client
class TubePublisherMixin(object):
def __init__(self, tubes_to_offer):
self._tubes_to_offer = tubes_to_offer
def muc_joined(self):
self.info("muc joined. Offering the tubes")
conn_iface = self.conn[CONNECTION_INTERFACE_REQUESTS]
params = {CHANNEL_INTERFACE + ".ChannelType": CHANNEL_TYPE_DBUS_TUBE,
CHANNEL_INTERFACE + ".TargetHandleType": CONNECTION_HANDLE_TYPE_ROOM,
CHANNEL_INTERFACE + ".TargetID": self.muc_id}
for interface in self._tubes_to_offer.keys():
params[CHANNEL_TYPE_DBUS_TUBE + ".ServiceName"] = interface
conn_iface.CreateChannel(params)
def got_tube(self, tube):
super(TubePublisherMixin, self).got_tube(tube)
initiator_handle = tube.props[CHANNEL_INTERFACE + ".InitiatorHandle"]
if initiator_handle == self.self_handle:
self.finish_tube_offer(tube)
def finish_tube_offer(self, tube):
self.info("offering my tube located at %r", tube.object_path)
service_name = tube.props[CHANNEL_TYPE_DBUS_TUBE + ".ServiceName"]
params = self._tubes_to_offer[service_name]
try:
initiator = self.account["account"]
except TypeError:
params = self.account.Get(ACCOUNT, "Parameters")
initiator = params["account"]
params["initiator"] = initiator
address = tube[CHANNEL_TYPE_DBUS_TUBE].Offer(params,
SOCKET_ACCESS_CONTROL_CREDENTIALS)
tube.local_address = address
self.info("local tube address: %r", address)
def close_tubes(self):
for object_path, channel in self._tubes.iteritems():
channel.Close()
class TubePublisher(TubePublisherMixin, Client):
logCategory = "tube_publisher"
def __init__(self, manager, protocol, account, muc_id, conference_server, tubes_to_offer):
TubePublisherMixin.__init__(self, tubes_to_offer)
Client.__init__(self, manager, protocol, account, muc_id, conference_server)
class TubeConsumerMixin(object):
logCategory = "tube_consumer"
def __init__(self, found_peer_callback=None, disapeared_peer_callback=None):
self.found_peer_callback = found_peer_callback
self.disapeared_peer_callback = disapeared_peer_callback
def got_tube(self, tube):
super(TubeConsumerMixin, self).got_tube(tube)
self.accept_tube(tube)
def accept_tube(self, tube):
if self.pre_accept_tube(tube):
self.info("accepting tube %r", tube.object_path)
tube_iface = tube[CHANNEL_TYPE_DBUS_TUBE]
tube.local_address = tube_iface.Accept(SOCKET_ACCESS_CONTROL_CREDENTIALS)
else:
self.warning("tube %r not allowed", tube)
def pre_accept_tube(self, tube):
return True
def tube_closed(self, tube):
self.disapeared_peer_callback(tube)
super(TubeConsumerMixin, self).tube_closed(tube)
class TubeConsumer(TubeConsumerMixin, Client):
logCategory = "tube_consumer"
def __init__(self, manager, protocol,
account, muc_id, conference_server, found_peer_callback=None,
disapeared_peer_callback=None):
TubeConsumerMixin.__init__(self, found_peer_callback=found_peer_callback,
disapeared_peer_callback=disapeared_peer_callback)
Client.__init__(self, manager, protocol, account, muc_id, conference_server)
| [] |
2024-01-10 | opendreambox/python-coherence | coherence~upnp~services~clients~av_transport_client.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006-2008, Frank Scholz <[email protected]>
from coherence import log
class AVTransportClient(log.Loggable):
logCategory = 'avtransportclient'
def __init__(self, service):
self.service = service
self.namespace = service.get_type()
self.url = service.get_control_url()
self.service.subscribe()
self.service.client = self
#def __del__(self):
# #print "AVTransportClient deleted"
# pass
def remove(self):
self.service.remove()
self.service = None
self.namespace = None
self.url = None
del self
def subscribe_for_variable(self, var_name, callback,signal=False):
self.service.subscribe_for_variable(var_name, instance=0, callback=callback,signal=signal)
def set_av_transport_uri(self, instance_id=0, current_uri='', current_uri_metadata=''):
action = self.service.get_action('SetAVTransportURI')
return action.call( InstanceID=instance_id,
CurrentURI=current_uri,
CurrentURIMetaData=current_uri_metadata)
def set_next_av_transport_uri(self, instance_id=0, next_uri='', next_uri_metadata=''):
action = self.service.get_action('SetNextAVTransportURI')
if action: # optional
return action.call( InstanceID=instance_id,
NextURI=next_uri,
NextURIMetaData=next_uri_metadata)
return None
def get_media_info(self, instance_id=0):
action = self.service.get_action('GetMediaInfo')
return action.call( InstanceID=instance_id)
def get_media_info_ext(self, instance_id=0):
action = self.service.get_action('GetMediaInfo_Ext')
return action.call( InstanceID=instance_id)
def get_transport_info(self, instance_id=0):
action = self.service.get_action('GetTransportInfo')
return action.call( InstanceID=instance_id)
def get_position_info(self, instance_id=0):
action = self.service.get_action('GetPositionInfo')
return action.call( InstanceID=instance_id)
def get_device_capabilities(self, instance_id=0):
action = self.service.get_action('GetDeviceCapabilities')
return action.call( InstanceID=instance_id)
def get_transport_settings(self, instance_id=0):
action = self.service.get_action('GetTransportSettings')
return action.call( InstanceID=instance_id)
def pause(self, instance_id=0):
action = self.service.get_action('Pause')
if action: # optional
return action.call( InstanceID=instance_id)
return None
def play(self, instance_id=0, speed=1):
action = self.service.get_action('Play')
return action.call( InstanceID=instance_id,Speed=speed)
def stop(self, instance_id=0):
action = self.service.get_action('Stop')
return action.call( InstanceID=instance_id)
def record(self, instance_id=0):
action = self.service.get_action('Record')
if action: # optional
return action.call( InstanceID=instance_id)
return None
def seek(self, instance_id=0, unit='', target=0):
action = self.service.get_action('Seek')
return action.call( InstanceID=instance_id,
Unit=unit,
Target=target)
def next(self, instance_id=0):
action = self.service.get_action('Next')
return action.call( InstanceID=instance_id)
def previous(self, instance_id=0):
action = self.service.get_action('Previous')
return action.call( InstanceID=instance_id)
def get_current_transport_actions(self, instance_id=0):
action = self.service.get_action('GetCurrentTransportActions')
return action.call( InstanceID=instance_id)
| [] |
2024-01-10 | opendreambox/python-coherence | misc~EOG-Plugin~upnp-coherence.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <[email protected]>
import pygtk
pygtk.require("2.0")
import gtk
from coherence.ui.av_widgets import TreeWidget
from coherence.ui.av_widgets import UDN_COLUMN,UPNP_CLASS_COLUMN,SERVICE_COLUMN
import eog
class UPnPClient(eog.Plugin):
def __init__ (self):
eog.Plugin.__init__(self)
def button_pressed(self, widget, event):
if event.button == 3:
x = int(event.x)
y = int(event.y)
try:
row_path,column,_,_ = self.ui.treeview.get_path_at_pos(x, y)
selection = self.ui.treeview.get_selection()
if not selection.path_is_selected(row_path):
self.ui.treeview.set_cursor(row_path,column,False)
print "button_pressed", row_path, (row_path[0],)
iter = self.ui.store.get_iter((row_path[0],))
udn, = self.ui.store.get(iter,UDN_COLUMN)
iter = self.ui.store.get_iter(row_path)
upnp_class,url = self.ui.store.get(iter,UPNP_CLASS_COLUMN,SERVICE_COLUMN)
print udn, upnp_class, url
if(not upnp_class.startswith('object.container') and
not upnp_class == 'root'):
self.create_item_context(has_delete=self.ui.device_has_action(udn,'ContentDirectory','DestroyObject'))
self.context.popup(None,None,None,event.button,event.time)
return 1
except TypeError:
pass
return 1
def create_item_context(self,has_delete=False):
""" create context menu for right click in treeview item"""
def action(menu, text):
selection = self.ui.treeview.get_selection()
model, selected_rows = selection.get_selected_rows()
if text == 'item.delete':
for row_path in selected_rows:
self.ui.destroy_object(row_path)
return
if(len(selected_rows) > 0 and
text ==' item.play'):
row_path = selected_rows.pop(0)
iter = self.ui.store.get_iter(row_path)
url, = self.ui.store.get(iter,SERVICE_COLUMN)
app = eog.eog_application_get_instance()
app.open_uri_list((url,))
for row_path in selected_rows:
iter = self.ui.store.get_iter(row_path)
url, = self.ui.store.get(iter,SERVICE_COLUMN)
app = eog.eog_application_get_instance()
app.open_uri_list((url,))
if not hasattr(self, 'context_no_delete'):
self.context_no_delete = gtk.Menu()
play_menu = gtk.MenuItem("Play")
play_menu.connect("activate", action, 'item.play')
self.context_no_delete.append(play_menu)
self.context_no_delete.show_all()
if not hasattr(self, 'context_with_delete'):
self.context_with_delete = gtk.Menu()
play_menu = gtk.MenuItem("Display")
play_menu.connect("activate", action, 'item.play')
self.context_with_delete.append(play_menu)
self.context_with_delete.append(gtk.SeparatorMenuItem())
menu = gtk.MenuItem("Delete")
menu.connect("activate", action, 'item.delete')
self.context_with_delete.append(menu)
self.context_with_delete.show_all()
if has_delete:
self.context = self.context_with_delete
else:
self.context = self.context_no_delete
def activate (self, window):
self.eog_object = window
print "activate", window
self.ui = TreeWidget()
self.ui.cb_item_right_click = self.button_pressed
self.ui.window.show_all()
selection = self.ui.treeview.get_selection()
selection.set_mode(gtk.SELECTION_MULTIPLE)
sidebar = self.eog_object.get_sidebar()
sidebar.add_page("Coherence DLNA/UPnP Client", self.ui.window)
sidebar.show_all()
def load_and_play(url):
app = eog.eog_application_get_instance()
app.open_uri_list((url,))
self.ui.cb_item_dbl_click = load_and_play
def deactivate (self, window):
#totem_object.remove_sidebar_page ("upnp-coherence")
print "deactivate", window
| [] |
2024-01-10 | opendreambox/python-coherence | misc~upnp-tester.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <[email protected]>
# upnp-tester.py
#
# very basic atm
#
# provides these functions:
#
# list - display all devices
# extract <uuid> - extract device and service xml files and put them in a
# /tmp/<uuid> directory
# send <uuid> - pack the before extracted xml files in a tar.gz and
# send them via email to the Coherence googlemail account
#
import os
from sets import Set
from twisted.internet import stdio
from twisted.protocols import basic
from twisted.internet import protocol
try:
from twisted.mail import smtp
from twisted.names import client as namesclient
from twisted.names import dns
import StringIO
class SMTPClient(smtp.ESMTPClient):
""" build an email message and send it to our googlemail account
"""
def __init__(self, mail_from, mail_to, mail_subject, mail_file, *args, **kwargs):
smtp.ESMTPClient.__init__(self, *args, **kwargs)
self.mailFrom = mail_from
self.mailTo = mail_to
self.mailSubject = mail_subject
self.mail_file = mail_file
self.mail_from = mail_from
def getMailFrom(self):
result = self.mailFrom
self.mailFrom = None
return result
def getMailTo(self):
return [self.mailTo]
def getMailData(self):
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
msg = MIMEMultipart()
msg['Subject'] = self.mailSubject
msg['From'] = self.mail_from
msg['To'] = self.mailTo
fp = open(self.mail_file, 'rb')
tar = MIMEApplication(fp.read(),'x-tar')
fp.close()
tar.add_header('Content-Disposition', 'attachment', filename=os.path.basename(self.mail_file))
msg.attach(tar)
return StringIO.StringIO(msg.as_string())
def sentMail(self, code, resp, numOk, addresses, log):
print 'Sent', numOk, 'messages'
class SMTPClientFactory(protocol.ClientFactory):
protocol = SMTPClient
def __init__(self, mail_from, mail_to, mail_subject, mail_file, *args, **kwargs):
self.mail_from = mail_from
self.mail_to = mail_to
self.mail_subject = mail_subject
self.mail_file = mail_file
def buildProtocol(self, addr):
return self.protocol(self.mail_from, self.mail_to,
self.mail_subject, self.mail_file,
secret=None, identity='localhost')
except ImportError:
pass
from twisted.internet import reactor, defer
from twisted.web import client
from coherence.base import Coherence
class UI(basic.LineReceiver):
from os import linesep as delimiter
def connectionMade(self):
self.print_prompt()
def lineReceived(self, line):
args = line.strip().split()
if args:
cmd = args[0].lower()
if hasattr(self, 'cmd_%s' % cmd):
getattr(self, 'cmd_%s' % (cmd))(args[1:])
elif cmd == "?":
self.cmd_help(args[1:])
else:
self.transport.write("""Unknown command '%s'\n"""%(cmd))
self.print_prompt()
def cmd_help(self,args):
"help -- show help"
methods = Set([ getattr(self, x) for x in dir(self) if x[:4] == "cmd_" ])
self.transport.write("Commands:\n")
for method in methods:
if hasattr(method, '__doc__'):
self.transport.write("%s\n"%(method.__doc__))
def cmd_list(self, args):
"list -- list devices"
self.transport.write("Devices:\n")
for d in self.coherence.get_devices():
self.transport.write(str("%s %s [%s/%s/%s]\n" % (d.friendly_name, ':'.join(d.device_type.split(':')[3:5]), d.st, d.usn.split(':')[1], d.host)))
def cmd_extract(self, args):
"extract <uuid> -- download xml files from device"
device = self.coherence.get_device_with_id(args[0])
if device == None:
self.transport.write("device %s not found - aborting\n" % args[0])
else:
self.transport.write(str("extracting from %s @ %s\n" % (device.friendly_name, device.host)))
try:
l = []
def device_extract(workdevice, path):
tmp_dir = os.path.join(path,workdevice.get_uuid())
os.mkdir(tmp_dir)
d = client.downloadPage(workdevice.get_location(),os.path.join(tmp_dir,'device-description.xml'))
l.append(d)
for service in workdevice.services:
d = client.downloadPage(service.get_scpd_url(),os.path.join(tmp_dir,'%s-description.xml'%service.service_type.split(':',3)[3]))
l.append(d)
for ed in workdevice.devices:
device_extract(ed, tmp_dir)
def finished(result):
self.transport.write(str("\nextraction of device %s finished\nfiles have been saved to /tmp/%s\n" %(args[0],args[0])))
self.print_prompt()
device_extract(device,'/tmp')
dl = defer.DeferredList(l)
dl.addCallback(finished)
except Exception, msg:
self.transport.write(str("problem creating download directory %s\n" % msg))
def cmd_send(self, args):
"send <uuid> -- send before extracted xml files to the Coherence home base"
if os.path.isdir(os.path.join('/tmp',args[0])) == 1:
cwd = os.getcwd()
os.chdir('/tmp')
import tarfile
tar = tarfile.open(os.path.join('/tmp',args[0]+'.tgz'), "w:gz")
for file in os.listdir(os.path.join('/tmp',args[0])):
tar.add(os.path.join(args[0],file))
tar.close()
os.chdir(cwd)
def got_mx(result):
mx_list = result[0]
mx_list.sort(lambda x, y: cmp(x.payload.preference, y.payload.preference))
if len(mx_list) > 0:
import posix, pwd
import socket
reactor.connectTCP(str(mx_list[0].payload.name), 25,
SMTPClientFactory('@'.join((pwd.getpwuid(posix.getuid())[0],socket.gethostname())), '[email protected]', 'xml-files', os.path.join('/tmp',args[0]+'.tgz')))
mx = namesclient.lookupMailExchange('googlemail.com')
mx.addCallback(got_mx)
def cmd_quit(self, args):
"quit -- quits this program"
reactor.stop()
cmd_exit = cmd_quit
def print_prompt(self):
self.transport.write('>>> ')
if __name__ == '__main__':
c = Coherence({'logmode':'none'})
ui = UI()
ui.coherence = c
stdio.StandardIO(ui)
reactor.run()
| [] |
2024-01-10 | opendreambox/python-coherence | coherence~backends~tracker_storage.py | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <[email protected]>
import os.path
from twisted.internet import reactor, defer
from twisted.python import failure, util
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.soap_service import errorCode
from coherence.upnp.core import utils
import dbus
import dbus.service
import coherence.extern.louie as louie
from coherence.backend import BackendItem, BackendStore
ROOT_CONTAINER_ID = 0
AUDIO_CONTAINER_ID = 100
AUDIO_ALL_CONTAINER_ID = 101
AUDIO_ARTIST_CONTAINER_ID = 102
AUDIO_ALBUM_CONTAINER_ID = 103
AUDIO_PLAYLIST_CONTAINER_ID = 104
AUDIO_GENRE_CONTAINER_ID = 105
VIDEO_CONTAINER_ID = 200
VIDEO_ALL_CONTAINER_ID = 201
IMAGE_CONTAINER_ID = 300
IMAGE_ALL_CONTAINER_ID = 301
BUS_NAME = 'org.freedesktop.Tracker'
OBJECT_PATH = '/org/freedesktop/tracker'
tracks_query = """
<rdfq:Condition>\
<rdfq:equals>\
<rdfq:Property name="Audio:Title" />\
<rdf:String>*</rdf:String>\
</rdfq:equals>\
</rdfq:Condition>\
"""
video_query = """
<rdfq:Condition>\
<rdfq:equals>\
<rdfq:Property name="File:Name" />\
<rdf:String>*</rdf:String>\
</rdfq:equals>\
</rdfq:Condition>\
"""
image_query = """
<rdfq:Condition>\
<rdfq:equals>\
<rdfq:Property name="File:Name" />\
<rdf:String>*</rdf:String>\
</rdfq:equals>\
</rdfq:Condition>\
"""
class Container(BackendItem):
logCategory = 'tracker_store'
def __init__(self, id, parent_id, name, store=None, children_callback=None, container_class=DIDLLite.Container):
self.id = id
self.parent_id = parent_id
self.name = name
self.mimetype = 'directory'
self.item = container_class(id, parent_id,self.name)
self.item.childCount = 0
self.update_id = 0
if children_callback != None:
self.children = children_callback
else:
self.children = util.OrderedDict()
self.item.childCount = None #self.get_child_count()
if store!=None:
self.get_url = lambda: store.urlbase + str(self.id)
def add_child(self, child):
id = child.id
if isinstance(child.id, basestring):
_,id = child.id.split('.')
self.children[id] = child
if self.item.childCount != None:
self.item.childCount += 1
def get_children(self,start=0,end=0):
self.info("container.get_children %r %r", start, end)
if callable(self.children):
return self.children(start,end-start)
else:
children = self.children.values()
if end == 0:
return children[start:]
else:
return children[start:end]
def get_child_count(self):
if self.item.childCount != None:
return self.item.childCount
if callable(self.children):
return len(self.children())
else:
return len(self.children)
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
class Artist(BackendItem):
logCategory = 'tracker_store'
def __init__(self, store, id, name):
self.store = store
self.id = 'artist.%d' % int(id)
self.name = name
self.children = {}
self.sorted_children = None
def add_child(self, child):
_,id = child.id.split('.')
self.children[id] = child
def sort_children(self):
if self.sorted_children == None:
def childs_sort(x,y):
r = cmp(self.children[x].name,self.children[y].name)
return r
self.sorted_children = self.children.keys()
self.sorted_children.sort(cmp=childs_sort)
return self.sorted_children
def get_artist_all_tracks(self,start=0,request_count=0):
children = []
for album in self.sort_children():
children += album.get_children()
if request_count == 0:
return children[start:]
else:
return children[start:request_count]
def get_children(self,start=0,end=0):
children = []
for key in self.sort_children():
children.append(self.children[key])
if end == 0:
return children[start:]
else:
return children[start:end]
def get_child_count(self):
return len(self.children)
def get_item(self, parent_id = AUDIO_ARTIST_CONTAINER_ID):
item = DIDLLite.MusicArtist(self.id, parent_id, self.name)
return item
def get_id(self):
return self.id
def get_name(self):
return self.name
class Album(BackendItem):
logCategory = 'tracker_store'
def __init__(self, store, id, title, artist):
self.store = store
self.id = 'album.%d' % int(id)
self.name = unicode(title)
self.artist = unicode(artist)
self.cover = None
self.children = {}
self.sorted_children = None
def add_child(self, child):
_,id = child.id.split('.')
self.children[id] = child
def get_children(self,start=0,end=0):
children = []
if self.sorted_children != None:
for key in self.sorted_children:
children.append(self.children[key])
else:
def childs_sort(x,y):
r = cmp(self.children[x].track_nr,self.children[y].track_nr)
return r
self.sorted_children = self.children.keys()
self.sorted_children.sort(cmp=childs_sort)
for key in self.sorted_children:
children.append(self.children[key])
if end == 0:
return children[start:]
else:
return children[start:end]
def get_child_count(self):
return len(self.children)
def get_item(self, parent_id = AUDIO_ALBUM_CONTAINER_ID):
item = DIDLLite.MusicAlbum(self.id, parent_id, self.name)
item.childCount = self.get_child_count()
item.artist = self.artist
item.albumArtURI = self.cover
return item
def get_id(self):
return self.id
def get_name(self):
return self.name
def get_cover(self):
return self.cover
class Track(BackendItem):
logCategory = 'tracker_store'
def __init__(self,store,
id,parent_id,
file,title,
artist,album,genre,\
duration,\
track_number,\
size,mimetype):
self.store = store
self.id = 'song.%d' % int(id)
self.parent_id = parent_id
self.path = unicode(file)
duration = str(duration).strip()
duration = duration.split('.')[0]
if len(duration) == 0:
duration = 0
seconds = int(duration)
hours = seconds / 3600
seconds = seconds - hours * 3600
minutes = seconds / 60
seconds = seconds - minutes * 60
self.duration = ("%d:%02d:%02d") % (hours, minutes, seconds)
self.bitrate = 0
self.title = unicode(title)
self.artist = unicode(artist)
self.album = unicode(album)
self.genre = unicode(genre)
track_number = str(track_number).strip()
if len(track_number) == 0:
track_number = 1
self.track_nr = int(track_number)
self.cover = None
self.mimetype = str(mimetype)
self.size = int(size)
self.url = self.store.urlbase + str(self.id)
def get_children(self, start=0, end=0):
return []
def get_child_count(self):
return 0
def get_item(self, parent_id=None):
self.debug("Track get_item %r @ %r" %(self.id,self.parent_id))
# create item
item = DIDLLite.MusicTrack(self.id,self.parent_id)
item.album = self.album
item.artist = self.artist
#item.date =
item.genre = self.genre
item.originalTrackNumber = self.track_nr
item.title = self.title
item.albumArtURI = self.cover
# add http resource
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
if self.size > 0:
res.size = self.size
if self.duration > 0:
res.duration = str(self.duration)
if self.bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
#if self.store.server.coherence.config.get('transcoding', 'no') == 'yes':
# if self.mimetype in ('audio/mpeg',
# 'application/ogg','audio/ogg',
# 'audio/x-m4a',
# 'application/x-flac'):
# dlna_pn = 'DLNA.ORG_PN=LPCM'
# dlna_tags = DIDLLite.simple_dlna_tags[:]
# dlna_tags[1] = 'DLNA.ORG_CI=1'
# #dlna_tags[2] = 'DLNA.ORG_OP=00'
# new_res = DIDLLite.Resource(self.url+'?transcoded=lpcm',
# 'http-get:*:%s:%s' % ('audio/L16;rate=44100;channels=2', ';'.join([dlna_pn]+dlna_tags)))
# new_res.size = None
# if self.duration > 0:
# new_res.duration = str(self.duration)
# item.res.append(new_res)
# if self.mimetype != 'audio/mpeg':
# new_res = DIDLLite.Resource(self.url+'?transcoded=mp3',
# 'http-get:*:%s:*' % 'audio/mpeg')
# new_res.size = None
# if self.duration > 0:
# new_res.duration = str(self.duration)
# item.res.append(new_res)
return item
def get_id(self):
return self.id
def get_name(self):
return self.title
def get_url(self):
return self.url
def get_path(self):
return self.path
class Video(BackendItem):
logCategory = 'tracker_store'
def __init__(self,store,
id,parent_id,
file,title,
duration,\
size,mimetype):
self.store = store
self.id = 'video.%d' % int(id)
self.parent_id = parent_id
self.path = unicode(file)
duration = str(duration).strip()
duration = duration.split('.')[0]
if len(duration) == 0:
duration = 0
seconds = int(duration)
hours = seconds / 3600
seconds = seconds - hours * 3600
minutes = seconds / 60
seconds = seconds - minutes * 60
self.duration = ("%d:%02d:%02d") % (hours, minutes, seconds)
self.title = unicode(title)
self.cover = None
self.mimetype = str(mimetype)
self.size = int(size)
self.url = self.store.urlbase + str(self.id)
def get_children(self, start=0, end=0):
return []
def get_child_count(self):
return 0
def get_item(self, parent_id=None):
self.debug("Video get_item %r @ %r" %(self.id,self.parent_id))
# create item
item = DIDLLite.VideoItem(self.id,self.parent_id)
#item.date =
item.title = self.title
item.albumArtURI = self.cover
# add http resource
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
if self.size > 0:
res.size = self.size
if self.duration > 0:
res.duration = str(self.duration)
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
return self.title
def get_url(self):
return self.url
def get_path(self):
return self.path
class Image(BackendItem):
logCategory = 'tracker_store'
def __init__(self,store,
id,parent_id,
file,title,album,
date,width,height,\
size,mimetype):
self.store = store
self.id = 'image.%d' % int(id)
self.parent_id = parent_id
self.path = unicode(file)
self.title = unicode(title)
self.album = unicode(album.strip())
self.mimetype = str(mimetype)
self.size = int(size)
self.url = self.store.urlbase + str(self.id)
def get_children(self, start=0, end=0):
return []
def get_child_count(self):
return 0
def get_item(self, parent_id=None):
self.debug("Image get_item %r @ %r" %(self.id,self.parent_id))
# create item
item = DIDLLite.ImageItem(self.id,self.parent_id)
#item.date =
item.title = self.title
# add http resource
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
if self.size > 0:
res.size = self.size
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
return self.title
def get_url(self):
return self.url
def get_path(self):
return self.path
class TrackerStore(BackendStore):
""" this is a backend to Meta Tracker
http://www.gnome.org/projects/tracker/index.html
"""
implements = ['MediaServer']
logCategory = 'tracker_store'
def __init__(self, server, **kwargs):
if server.coherence.config.get('use_dbus','no') != 'yes':
raise Exception('this backend needs use_dbus enabled in the configuration')
BackendStore.__init__(self,server,**kwargs)
self.config = kwargs
self.name = kwargs.get('name','Tracker')
self.update_id = 0
self.token = None
self.songs = 0
self.albums = 0
self.artists = 0
self.playlists = 0
self.genres = 0
self.videos = 0
self.images = 0
self.bus = dbus.SessionBus()
tracker_object = self.bus.get_object(BUS_NAME,OBJECT_PATH)
self.tracker_interface = dbus.Interface(tracker_object, 'org.freedesktop.Tracker')
self.search_interface = dbus.Interface(tracker_object, 'org.freedesktop.Tracker.Search')
self.keywords_interface = dbus.Interface(tracker_object, 'org.freedesktop.Tracker.Keywords')
self.metadata_interface = dbus.Interface(tracker_object, 'org.freedesktop.Tracker.Metadata')
self.query_id = -1
self.containers = {}
self.tracks = {}
self.containers[ROOT_CONTAINER_ID] = \
Container(ROOT_CONTAINER_ID,-1,self.name,store=self)
def queries_finished(r):
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def queries_failed(r):
error = ''
louie.send('Coherence.UPnP.Backend.init_failed', None, backend=self, msg=error)
services = kwargs.get('service','Music,Videos,Images')
services = map(lambda x: x.strip().lower(),services.split(','))
l = []
mapping = {'music':self.get_tracks,
'videos':self.get_videos,
'images':self.get_images}
for service in services:
try:
l.append(mapping[service]())
except KeyError:
self.warning('Wrong Tracker service definition - %r' % service)
if len(l)>0:
dl = defer.DeferredList(l)
dl.addCallback(queries_finished)
dl.addErrback(lambda x: louie.send('Coherence.UPnP.Backend.init_failed', None, backend=self, msg='Connection to Tracker service(s) failed!'))
else:
louie.send('Coherence.UPnP.Backend.init_failed', None, backend=self, msg='No Tracker service defined!')
def __repr__(self):
return "TrackerStore"
def get_by_id(self,id):
self.info("looking for id %r", id)
if isinstance(id, basestring):
id = id.split('@',1)
id = id[0]
if isinstance(id, basestring) and id.startswith('artist_all_tracks_'):
try:
return self.containers[id]
except:
return None
item = None
try:
id = int(id)
item = self.containers[id]
except (ValueError,KeyError):
try:
type,id = id.split('.')
if type == 'song':
return self.containers[AUDIO_ALL_CONTAINER_ID].children[id]
if type == 'album':
return self.containers[AUDIO_ALBUM_CONTAINER_ID].children[id]
if type == 'artist':
return self.containers[AUDIO_ARTIST_CONTAINER_ID].children[id]
if type == 'video':
return self.containers[VIDEO_ALL_CONTAINER_ID].children[id]
if type == 'image':
return self.containers[IMAGE_ALL_CONTAINER_ID].children[id]
except (ValueError,KeyError):
return None
return item
def get_videos(self):
def handle_error(error):
print error
return error
def parse_videos_query_result(resultlist):
videos = []
for video in resultlist:
file,_,title,\
duration,\
size,mimetype = video
title = title.strip()
if len(title) == 0:
title = os.path.basename(file)
if mimetype == 'video/x-theora+ogg':
mimetype = u'video/ogg'
video_item = Video(self,
self.videos,VIDEO_ALL_CONTAINER_ID,
file,title,\
duration,\
size,mimetype)
self.videos += 1
videos.append(video_item)
videos.sort(cmp=lambda x,y : cmp(x.get_name().lower(),y.get_name().lower()))
for video_item in videos:
self.containers[VIDEO_ALL_CONTAINER_ID].add_child(video_item)
self.containers[VIDEO_CONTAINER_ID] = \
Container(VIDEO_CONTAINER_ID,ROOT_CONTAINER_ID,'Video',store=self)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[VIDEO_CONTAINER_ID])
self.containers[VIDEO_ALL_CONTAINER_ID] = \
Container( VIDEO_ALL_CONTAINER_ID,VIDEO_CONTAINER_ID,'All Videos',
store=self,
children_callback=None)
self.containers[VIDEO_CONTAINER_ID].add_child(self.containers[VIDEO_ALL_CONTAINER_ID])
fields=[u'Video:Title',u'Video:Duration',
u'File:Size',u'File:Mime']
d = defer.Deferred()
d.addCallback(parse_videos_query_result)
d.addErrback(handle_error)
self.search_interface.Query(self.query_id,'Videos',fields,'','',video_query,False,0,-1,
reply_handler=lambda x: d.callback(x),error_handler=lambda x: d.errback(x))
return d
def get_images(self):
def handle_error(error):
return error
def parse_images_query_result(resultlist):
print "images", resultlist
images = []
for image in resultlist:
file,_,title,album,\
date,width, height, \
size,mimetype = image
title = title.strip()
if len(title) == 0:
title = os.path.basename(file)
image_item = Image(self,
self.images,IMAGE_ALL_CONTAINER_ID,
file,title,album,\
date,width,height,\
size,mimetype)
self.images += 1
images.append(image_item)
images.sort(cmp=lambda x,y : cmp(x.get_name().lower(),y.get_name().lower()))
for image_item in images:
self.containers[IMAGE_ALL_CONTAINER_ID].add_child(image_item)
self.containers[IMAGE_CONTAINER_ID] = \
Container(IMAGE_CONTAINER_ID,ROOT_CONTAINER_ID,'Images',store=self)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[IMAGE_CONTAINER_ID])
self.containers[IMAGE_ALL_CONTAINER_ID] = \
Container(IMAGE_ALL_CONTAINER_ID,IMAGE_CONTAINER_ID,'All Images',
store=self,
children_callback=None)
self.containers[IMAGE_CONTAINER_ID].add_child(self.containers[IMAGE_ALL_CONTAINER_ID])
fields=[u'Image:Title',u'Image:Album',
u'Image:Date',u'Image:Width',u'Image:Height',
u'File:Size',u'File:Mime']
d = defer.Deferred()
d.addCallback(parse_images_query_result)
d.addErrback(handle_error)
self.search_interface.Query(self.query_id,'Images',fields,'','',image_query,False,0,-1,
reply_handler=lambda x: d.callback(x),error_handler=lambda x: d.errback(x))
return d
def get_tracks(self):
def handle_error(error):
return error
def parse_tracks_query_result(resultlist):
albums = {}
artists = {}
tracks = []
for track in resultlist:
file,service,title,artist,album,genre,\
duration,album_track_count,\
track_number,codec,\
size,mimetype = track
if mimetype == 'video/x-vorbis+ogg':
mimetype = 'audio/ogg'
track_item = Track(self,
self.songs,AUDIO_ALL_CONTAINER_ID,
file,title,artist,album,genre,\
duration,\
track_number,\
size,mimetype)
self.songs += 1
tracks.append(track_item)
tracks.sort(cmp=lambda x,y : cmp(x.get_name(),y.get_name()))
for track_item in tracks:
self.containers[AUDIO_ALL_CONTAINER_ID].add_child(track_item)
try:
album_item = albums[track_item.album]
album_item.add_child(track_item)
except:
album_item = Album(self, self.albums, track_item.album, track_item.artist)
albums[unicode(track_item.album)] = album_item
self.albums += 1
album_item.add_child(track_item)
try:
artist_item = artists[track_item.artist]
artist_item.add_child(album_item)
except:
artist_item = Artist(self, self.artists, track_item.artist)
artists[unicode(track_item.artist)] = artist_item
self.artists += 1
artist_item.add_child(album_item)
sorted_keys = albums.keys()
sorted_keys.sort()
for key in sorted_keys:
self.containers[AUDIO_ALBUM_CONTAINER_ID].add_child(albums[key])
sorted_keys = artists.keys()
sorted_keys.sort()
for key in sorted_keys:
self.containers[AUDIO_ARTIST_CONTAINER_ID].add_child(artists[key])
self.containers[AUDIO_CONTAINER_ID] = \
Container(AUDIO_CONTAINER_ID,ROOT_CONTAINER_ID,'Audio',store=self)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[AUDIO_CONTAINER_ID])
self.containers[AUDIO_ALL_CONTAINER_ID] = \
Container( AUDIO_ALL_CONTAINER_ID,AUDIO_CONTAINER_ID,'All Tracks',
store=self,
children_callback=None)
self.containers[AUDIO_CONTAINER_ID].add_child(self.containers[AUDIO_ALL_CONTAINER_ID])
self.containers[AUDIO_ALBUM_CONTAINER_ID] = \
Container( AUDIO_ALBUM_CONTAINER_ID,AUDIO_CONTAINER_ID,'Albums',
store=self,
children_callback=None)
self.containers[AUDIO_CONTAINER_ID].add_child(self.containers[AUDIO_ALBUM_CONTAINER_ID])
self.containers[AUDIO_ARTIST_CONTAINER_ID] = \
Container( AUDIO_ARTIST_CONTAINER_ID,AUDIO_CONTAINER_ID,'Artists',
store=self,
children_callback=None)
self.containers[AUDIO_CONTAINER_ID].add_child(self.containers[AUDIO_ARTIST_CONTAINER_ID])
self.containers[AUDIO_PLAYLIST_CONTAINER_ID] = \
Container( AUDIO_PLAYLIST_CONTAINER_ID,AUDIO_CONTAINER_ID,'Playlists',
store=self,
children_callback=None,
container_class=DIDLLite.PlaylistContainer)
self.containers[AUDIO_CONTAINER_ID].add_child(self.containers[AUDIO_PLAYLIST_CONTAINER_ID])
self.containers[AUDIO_GENRE_CONTAINER_ID] = \
Container( AUDIO_GENRE_CONTAINER_ID,AUDIO_CONTAINER_ID,'Genres',
store=self,
children_callback=None)
self.containers[AUDIO_CONTAINER_ID].add_child(self.containers[AUDIO_GENRE_CONTAINER_ID])
self.wmc_mapping.update({'4': lambda : self.get_by_id(AUDIO_ALL_CONTAINER_ID), # all tracks
'5': lambda : self.get_by_id(AUDIO_GENRE_CONTAINER_ID), # all genres
'6': lambda : self.get_by_id(AUDIO_ARTIST_CONTAINER_ID), # all artists
'7': lambda : self.get_by_id(AUDIO_ALBUM_CONTAINER_ID), # all albums
'13': lambda : self.get_by_id(AUDIO_PLAYLIST_CONTAINER_ID), # all playlists
})
fields=[u'Audio:Title',u'Audio:Artist',
u'Audio:Album',u'Audio:Genre',
u'Audio:Duration',u'Audio:AlbumTrackCount',
u'Audio:TrackNo',u'Audio:Codec',
u'File:Size', u'File:Mime']
d = defer.Deferred()
d.addCallback(parse_tracks_query_result)
d.addErrback(handle_error)
self.search_interface.Query(self.query_id,'Music',fields,'','',tracks_query,False,0,-1,
reply_handler=lambda x: d.callback(x),error_handler=lambda x: d.errback(x))
return d
def upnp_init(self):
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:audio/mpeg:*',
'internal:%s:audio/mpeg:*' % self.server.coherence.hostname,
'http-get:*:application/ogg:*',
'internal:%s:application/ogg:*' % self.server.coherence.hostname,
'http-get:*:audio/ogg:*',
'internal:%s:audio/ogg:*' % self.server.coherence.hostname,
'http-get:*:video/ogg:*',
'internal:%s:video/ogg:*' % self.server.coherence.hostname,
'http-get:*:video/mpeg:*',
'internal:%s:video/mpeg:*' % self.server.coherence.hostname,
'http-get:*:video/x-msvideo:*',
'internal:%s:video/x-msvideo:*' % self.server.coherence.hostname,
'http-get:*:video/avi:*',
'internal:%s:video/avi:*' % self.server.coherence.hostname,
'http-get:*:video/mp4:*',
'internal:%s:video/mp4:*' % self.server.coherence.hostname,
'http-get:*:video/quicktime:*',
'internal:%s:video/quicktime:*' % self.server.coherence.hostname,
'http-get:*:image/jpg:*',
'internal:%s:image/jpg:*' % self.server.coherence.hostname,
'http-get:*:image/png:*',
'internal:%s:image/png:*' % self.server.coherence.hostname,
'http-get:*:image/gif:*',
'internal:%s:image/gif:*' % self.server.coherence.hostname,])
| [] |
2024-01-10 | fuegoplatforms/VerityChain-AutoGPT | autogpts~autogpt~autogpt~core~runner~client_lib~logging.py | import logging
import sys
from colorama import Fore, Style
from openai.util import logger as openai_logger
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(message)s"
DEBUG_LOG_FORMAT = (
"%(asctime)s.%(msecs)03d %(levelname)s %(filename)s:%(lineno)d %(message)s"
)
def configure_root_logger():
console_formatter = FancyConsoleFormatter(SIMPLE_LOG_FORMAT)
stdout = logging.StreamHandler(stream=sys.stdout)
stdout.setLevel(logging.DEBUG)
stdout.addFilter(BelowLevelFilter(logging.WARNING))
stdout.setFormatter(console_formatter)
stderr = logging.StreamHandler()
stderr.setLevel(logging.WARNING)
stderr.setFormatter(console_formatter)
logging.basicConfig(level=logging.DEBUG, handlers=[stdout, stderr])
# Disable debug logging from OpenAI library
openai_logger.setLevel(logging.INFO)
def get_client_logger():
# Configure logging before we do anything else.
# Application logs need a place to live.
client_logger = logging.getLogger("autogpt_client_application")
client_logger.setLevel(logging.DEBUG)
return client_logger
class FancyConsoleFormatter(logging.Formatter):
"""
A custom logging formatter designed for console output.
This formatter enhances the standard logging output with color coding. The color
coding is based on the level of the log message, making it easier to distinguish
between different types of messages in the console output.
The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
"""
# level -> (level & text color, title color)
LEVEL_COLOR_MAP = {
logging.DEBUG: Fore.LIGHTBLACK_EX,
logging.INFO: Fore.BLUE,
logging.WARNING: Fore.YELLOW,
logging.ERROR: Fore.RED,
logging.CRITICAL: Fore.RED + Style.BRIGHT,
}
def format(self, record: logging.LogRecord) -> str:
# Make sure `msg` is a string
if not hasattr(record, "msg"):
record.msg = ""
elif not type(record.msg) == str:
record.msg = str(record.msg)
# Determine default color based on error level
level_color = ""
if record.levelno in self.LEVEL_COLOR_MAP:
level_color = self.LEVEL_COLOR_MAP[record.levelno]
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
# Determine color for message
color = getattr(record, "color", level_color)
color_is_specified = hasattr(record, "color")
# Don't color INFO messages unless the color is explicitly specified.
if color and (record.levelno != logging.INFO or color_is_specified):
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
return super().format(record)
class BelowLevelFilter(logging.Filter):
"""Filter for logging levels below a certain threshold."""
def __init__(self, below_level: int):
super().__init__()
self.below_level = below_level
def filter(self, record: logging.LogRecord):
return record.levelno < self.below_level
| [] |
2024-01-10 | RachithP/RL | atari~breakout-deterministic~wrappers.py | """
Adapted from OpenAI Baselines
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
"""
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def reset(self):
return self.env.reset()
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def make_atari(env_id):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to num_channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(old_shape[-1], old_shape[0], old_shape[1]), dtype=np.uint8)
def observation(self, observation):
return np.swapaxes(observation, 2, 0)
def wrap_pytorch(env):
return ImageToPyTorch(env) | [] |
2024-01-10 | superoreoooooo/S231 | ProjectN~GPT3Connect.py | import openai
def getAns(condition, Q) :
f = open("ProjectN/Data/ApiKey.txt", "r", encoding="UTF-8")
openai.api_key = f.readline()
f.close()
result = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [
{"role" : "system", "content" : condition},
{"role" : "user", "content" : Q}
]
)
#print(result['choices'][0]['message']['content'])
return (result['choices'][0]['message']['content'])
#print(getAns("넌 지금부터 내가 알려주는 배경과 역할에 따라 게임 NPC의 이름과 스토리, 그리고 역할에 따른 퀘스트 (배경, 역할과 관련있는 물품(물품 이름도 배경에 맞게 지어줘, 가져와야 하는 물품 개수와 그 물품을 어떻게 구할 수 있는지도)을 모아오기 또는 배경, 역할과 관련있는 무언가(몬스터나 적대적인 생명체도 괜찮고, 동물 사냥도 괜찮아. 다만 그걸 배경에 따라 이름또는 종족 등을 명시해서 명확하게 작성해줘. 그리고 배경에 따라 어디서 그 무언가를 잡을 수 있는지도 작성해 줘.)를 몇마리 잡아오기)를 작성해 주면 돼. 퀘스트를 완료했을때 그에 따른 보상도 작성해줘. 보상은 NPC의 직업과 퀘스트의 내용과 관련이 있어야 하고, 어떤 아이템을 몇개, 그리고 그 아이템의 종류(사용 아이템, 장비(갑옷 등), 무기, 기타 아이템 등..)도 작성해 줘야 해. 또한, 퀘스트 보상에는 경험치또한 아이템과 같이 주어야만 해. 그리고 퀘스트 보상에는 돈이 포함될 수 있어. 예상되는 퀘스트의 난이도에 따른 적당한 양의 재화를 추가해 줬으면 좋겠어. 형식은 NPC 이름 : 이름, 스토리 : 스토리, 퀘스트 : 퀘스트1: 이름 : 내용 : 태스크 : 보상 / 퀘스트2: 이름 : 내용 : 태스크 : 보상 .. 의 형식으로 작성해주면 될 것 같아. 이름과 내용, 태스크과 보상 사이에는 하나의 줄띄움을 해줘. 한번의 대답에는 한명의 NPC만 작성해야만 하고, 퀘스트는 4개가 되어야 해. 스토리에 잘 어울리고 퀘스트와 역할 사이가 자연스러우면 고마울 것 같아.", "네가 아까전에 작성해준 리아나 처럼 형식을 맞춰줬으면 좋겠어."))
"""
@deprecated
c = "넌 지금부터 내가 알려주는 배경과 역할에 따라 게임 NPC의 이름과 스토리, 그리고 역할에 따른 퀘스트 (배경, 역할과 관련있는 물품(물품 이름도 배경에 맞게 지어줘, 가져와야 하는 물품 개수와 그 물품을 어떻게 구할 수 있는지도)을 모아오기 또는 배경, 역할과 관련있는 무언가(몬스터나 적대적인 생명체도 괜찮고, 동물 사냥도 좋아. 다만 그걸 배경에 따라 이름을 명시해서 정확하게 작성해줘. 그리고 배경에 따라 어디서 그 무언가를 잡을 수 있는지도 작성해 줘야 해.)를 몇마리 잡아오기)를 작성해 주면 돼. 퀘스트를 완료했을때 그에 따른 보상도 작성해줘. 보상은 NPC의 직업과 퀘스트의 내용과 관련이 있어야 하고, 어떤 아이템을 몇개, 그리고 그 아이템의 종류(사용 아이템, 장비(갑옷 등), 무기, 기타 아이템 등..)도 작성해 줘야 해. 또한, 퀘스트 보상에는 경험치또한 아이템과 같이 주어야만 해. 그리고 퀘스트 보상에는 돈이 포함될 수 있어. 예상되는 퀘스트의 난이도에 따른 적당한 양의 재화를 추가해 줬으면 좋겠어. 형식은 NPC 이름 : 이름, 스토리 : 스토리, 퀘스트 : \n 퀘스트1이름 : 내용 : 태스크 : 보상 / \n 퀘스트2이름 : 내용 : 태스크 : 보상 .. 의 형식으로 작성해주면 될 것 같아. 한번의 대답에는 한명의 NPC만 작성해야만 하고, 퀘스트는 4개가 되어야 해. 스토리에 잘 어울리고 퀘스트와 역할 사이가 자연스러우면 고마울 것 같아."
bg = "판타지"
role = "사냥꾼"
q = "배경은 " + bg + " 배경이고, 역할은 " + role + "으로 해줘."
getAns(c, q)
""" | [] |
2024-01-10 | jzbjyb/knn-transformers | models~templates.py | from typing import List, Dict, Any, Tuple, Union
from operator import itemgetter
import copy
from collections import namedtuple
import spacy
import stanza
from nltk.tokenize.punkt import PunktSentenceTokenizer
import tiktoken
import openai
from .utils import openai_api_call, Utils
class CtxPrompt:
ctx_position: str = 'begin'
ret_instruction: "RetrievalInstruction" = None
instruction: str = None
format_reference_method: str = 'default'
clean_reference: bool = False
add_ref_suffix: str = None
add_ref_prefix: str = None
def __init__(
self,
demo: List["CtxPrompt"] = [],
ctx: str = None,
ctxs: List[Tuple[str, str]] = [],
case: str = None,
question: str = None,
qid: str = None,
gold_output: str = None,
):
assert self.ctx_position in {'before_case', 'begin'}
self.demo = demo
self.did = None
self.ctx = ctx
self.ctxs = ctxs
self._ctxs = [] # used for ctx alwayed being used
self.ctxs_idx = 0
self.case = case
self.question = question or case
self.qid = qid
self.gold_output = gold_output
self.ind = 1 # ctx index
self.gen_len = 0
self.gold_used_len = 0
@staticmethod
def get_append_retrieval(ret_to_append: str, index: int = None):
if index is not None:
return f'Reference {index}: {ret_to_append}\n'
return f'Reference: {ret_to_append}\n'
@classmethod
def from_dict(cls, adict):
adict = dict(adict)
if 'demo' in adict:
adict['demo'] = [cls.from_dict(d) for d in adict['demo']]
return cls(**{k: adict[k] for k in ['demo', 'ctx', 'ctxs', 'case', 'question', 'qid', 'gold_output'] if k in adict})
@classmethod
def clean_rets(cls, rets: List[str]) -> List[str]:
return [ret.replace('\n', ' ').strip() for ret in rets if ret.replace('\n', ' ').strip()]
@classmethod
def chatgpt_get_response(cls, prompt: Union[str, List[str]], examplars: List[List[Tuple[str, str]]] = [[]], max_tokens: int = 2048, api_key: str = None):
is_single = type(prompt) is str
if is_single:
prompt = [prompt]
examplars = examplars or [[]]
if len(prompt) != len(examplars):
examplars = [[] for _ in range(len(prompt))]
for p in prompt:
assert len(p.split()) <= max_tokens
responses = openai_api_call(
api_key=api_key,
model='gpt-3.5-turbo-0301',
messages=[[
{'role': 'user' if i == 0 else 'assistant', 'content': e[i]} for e in es for i in range(2)
] + [
{'role': 'user', 'content': p},
] for p, es in zip(prompt, examplars)],
temperature=0.0,
top_p=0.0,
max_tokens=max_tokens)
generations = [r['choices'][0]['message']['content'] for r in responses]
if is_single:
assert len(generations) == 1
return generations[0]
return generations
@classmethod
def canonicalize_text(cls, text: Union[str, List[str]], field: str = 'paragraph', api_key: str = None, debug: bool = False):
is_single = type(text) is not list
if is_single:
text = [text]
prompts = [f'For the following {field}, remove unnecessary spaces and capitalize words properly.\n{field.capitalize()}:\n{t}' for t in text]
clean_texts = cls.chatgpt_get_response(prompts, api_key=api_key)
post_clean_texts = []
for ct, t in zip(clean_texts, text):
if ct.strip().startswith(f'Sorry, there is no {field} provided'):
post_clean_texts.append(t)
else:
post_clean_texts.append(ct)
if debug:
for p, ct in zip(prompts, post_clean_texts):
print('-' * 10)
print(p)
print('-' * 10)
print(ct)
print('-' * 10)
input()
if is_single:
assert len(post_clean_texts) == 1
return post_clean_texts[0]
return post_clean_texts
@classmethod
def annotate_low_confidence_terms(cls, tokens: List[str], probs: List[float], low: float = 0.0, special_symbol: str = '*', min_gap: int = 5):
# mark with symbol
text = []
prev_is_low = -1
has = False
for i, (token, prob) in enumerate(zip(tokens, probs)):
if prob <= low:
if prev_is_low == -1 or i - prev_is_low >= min_gap:
has = True
leading_spaces = len(token) - len(token.lstrip())
if leading_spaces <= 0:
text.append(f'*{token}')
else:
text.append(f'{token[:leading_spaces]}*{token[leading_spaces:]}')
prev_is_low = i
else:
text.append(token)
else:
text.append(token)
text = ''.join(text)
return text, has
@classmethod
def extract_low_confidence_terms_rule(
cls,
tokens: List[str],
probs: List[float],
low: float = 0.0,
min_gap: int = 5, # TODO: the minimal token-based gap to separate two terms
expand: bool = True,
exclude_punct: bool = True,
always_extract_low: bool = False,
api_key: str = None):
prev_low_pos = -1
has = False
terms: List[List[str]] = []
spans: List[Tuple[int, int]] = []
for i, (token, prob) in enumerate(zip(tokens, probs)):
if prob <= low:
if prev_low_pos == -1 or i - prev_low_pos >= min_gap:
# new term
terms.append([token])
spans.append((i, i + 1))
else:
# old term
for j in range(prev_low_pos + 1, i + 1):
terms[-1].append(tokens[j])
spans[-1] = (spans[-1][0], i + 1)
prev_low_pos = i
terms = [''.join(term).strip() for term in terms]
if len(spans) <= 0:
return terms
if expand:
new_terms = cls.extract_constituents(tokens, spans=spans, api_key=api_key)
assert len(new_terms) == len(terms)
if always_extract_low:
terms = [nt if nt is not None else t for nt, t in zip(new_terms, terms)]
else:
terms = [nt for nt in new_terms if nt is not None]
if exclude_punct:
terms = [t for t in terms if t not in Utils.punctuations]
return terms
@classmethod
def extract_constituents(cls, tokens: List[str], spans: List[Tuple[int, int]], api_key: str = None, special_symbol: str = '*', debug: bool = False):
examplars = [
("Egypt has one of the longest histories of any country, tracing its heritage along *the Nile Delta back to the 6th–4th millennia BCE.", "*the Nile", "the Nile Delta"),
("The settlement, which legal experts said was the largest struck by an American media company, was announced by the two sides and the judge in the case at the *11th hour.", "*11th", "11th hour"),
("In his only surviving love letter to her, written a few months before their wedding, Tyler promised, \"*Whether I float or sink in the stream of fortune, you may be assured of this, that I shall never cease to love you.\"", "*Whether I float", "Whether I float or sink in the stream of fortune")
]
prompt_format = lambda sent, term: f"{sent}\n\nGiven the above sentence, extract the term/entity/phrase starting with \"{term}\"."
# add special_symbol
ori_sent = ''.join(tokens)
cases: List[Tuple[str, str]] = []
for start_ind, end_ind in spans:
start_token = tokens[start_ind]
n_lead_spaces = len(start_token) - len(start_token.lstrip())
if n_lead_spaces <= 0:
tokens[start_ind] = f'*{start_token}'
else:
tokens[start_ind] = f'{start_token[:n_lead_spaces]}*{start_token[n_lead_spaces:]}'
sent = ''.join(tokens).strip()
term = ''.join(tokens[start_ind:end_ind]).strip()
cases.append((sent, term))
tokens[start_ind] = start_token # convert tokens back to the original state
# call
prompts: List[str] = [prompt_format(s, t) for s, t in cases]
examplars: List[Tuple[str, str]] = [(prompt_format(s, t), out) for s, t, out in examplars]
responses = cls.chatgpt_get_response(prompt=prompts, examplars=[examplars] * len(prompts), api_key=api_key)
# post-process
constituents: List[str] = []
for r, (sent, term), prompt in zip(responses, cases, prompts):
if term.startswith(special_symbol): # trim special_symbol
term = term[len(special_symbol):].strip()
if debug:
print('-' * 10)
print(prompt)
print('-' * 10)
print(r)
print('-' * 10)
r = r.strip().split('\n', 1)[0].strip()
if r.startswith(special_symbol): # trim special_symbol
r = r[len(special_symbol):].strip()
if not r.startswith(term): # not an expansion
r = None
elif r not in ori_sent: # skip non-existent terms
r = None
elif not r: # empty
r = None
constituents.append(r)
return constituents
@classmethod
def extract_low_confidence_terms(cls, context: str, tokens: List[str], probs: List[float], low: float = 0.0, api_key: str = None, special_symbol: str = '*', debug: bool = False):
examplars = [
('*Egypt has one of the longest histories of any country, tracing its heritage along *the Nile Delta back to the *6th–4th millennia BCE.', '*Egypt\n*the Nile Delta\n*6th–4th'),
('The settlement, which *legal experts said was *the largest struck by an American media company, was *announced by the two sides and the judge in the case at the 11th hour.', '*legal experts\n*the largest struck\n*announced'),
('In his only *surviving love letter to her, written a few months before their wedding, Tyler promised, "*Whether I *float or sink in the stream of fortune, you may be assured of this, that I shall never *cease to love you."', '*surviving love letter\n*Whether\n*float or sink\n*cease to love you')
]
original_text = ''.join(tokens)
text, has = cls.annotate_low_confidence_terms(tokens=tokens, probs=probs, low=low, special_symbol=special_symbol)
if not has:
return []
# extract terms
#prompt_format = lambda x: f'Given the previous context and the last sentence, extract all terms/entities in the last sentence starting with the symbol "{special_symbol}", one at a line.\nPrevious context:\n{context}\nLast sentence:\n{x}'
prompt_format = lambda x: f'Given the following sentence, extract all terms/entities starting with the symbol "{special_symbol}", one at a line.\n{x}'
examplars = [(prompt_format(inp), out) for inp, out in examplars]
prompt = prompt_format(text)
response = cls.chatgpt_get_response(prompt, examplars=examplars, api_key=api_key)
terms = [t.strip() for t in response.strip().split('\n') if t.strip().startswith(special_symbol)] # remove outlier
terms = [t.lstrip(special_symbol) for t in terms if t in text and t.lstrip(special_symbol) in original_text] # remove non-exist terms
if debug:
print('-' * 10)
print(prompt)
print('-' * 10)
print(response)
print('-' * 10)
print(terms)
print('-' * 10)
return terms
@classmethod
def replace_low_confidence_terms(cls, context: str, tokens: List[str], probs: List[float], low: float = 0.0, api_key: str = None, special_symbol: str = '*', replace_symbol: str = 'XXX', debug: bool = False):
text, has = cls.annotate_low_confidence_terms(tokens=tokens, probs=probs, low=low, special_symbol=special_symbol)
if not has:
return text
# replace terms
prompt = f'Given the previous context and the last sentence, detect all terms/entities in the last sentence starting with the symbol "{special_symbol}", then replace them with "{replace_symbol}".\nPrevious context:\n{context}\nLast sentence:\n{text}'
replaced_text = cls.chatgpt_get_response(prompt, api_key=api_key)
if debug:
print('-' * 10)
print(prompt)
print('-' * 10)
print(replaced_text)
print('-' * 10)
return replaced_text
@classmethod
def replace_low_confidence_terms_by_extract(cls, context: str, tokens: List[str], probs: List[float], low: float = 0.0, api_key: str = None, special_symbol: str = '*', replace_symbol: str = 'XXX', min_term_length: int = 0):
text = ''.join(tokens)
terms = cls.extract_low_confidence_terms(context=context, tokens=tokens, probs=probs, low=low, api_key=api_key, special_symbol=special_symbol)
for term in terms:
if min_term_length and len(term) <= min_term_length: # ignore short terms
continue
text = text.replace(term, replace_symbol)
return text
@classmethod
def decontextualize_text(cls, context: str, text: str, api_key: str = None, debug: bool = False):
examplars = [
("The first American author to use natural diction and a pioneer of colloquialism, John Neal is the first to use the phrase son-of-a-bitch in a work of fiction.", "He attained his greatest literary achievements between 1817 and 1835, during which time he was America's first daily newspaper columnist, the first American published in British literary journals, author of the first history of American literature, America's first art critic, a short story pioneer, a children's literature pioneer, and a forerunner of the American Renaissance.", "John Neal attained his greatest literary achievements between 1817 and 1835, during which time he was America's first daily newspaper columnist, the first American published in British literary journals, author of the first history of American literature, America's first art critic, a short story pioneer, a children's literature pioneer, and a forerunner of the American Renaissance."),
("The Scottish wildcat is a European wildcat (Felis silvestris silvestris) population in Scotland.", "It was once widely distributed across Great Britain, but the population has declined drastically since the turn of the 20th century due to habitat loss and persecution.", "The Scottish wildcat was once widely distributed across Great Britain, but the population has declined drastically since the turn of the 20th century due to habitat loss and persecution."),
]
examplars = []
#prompt = f'Given the previous context and the last sentence, make minimal changes to the last sentence to make it self-contained by resolving pronoun references.\nPrevious context:\n{context}\nLast sentence:\n{text}'
#prompt_format = lambda x, y: f'Given the previous context and the last text, copy the last text and only replace pronouns (if any) with corresponding references to make the text self-contained.\n=== Previous context ===\n{x.strip()}\n=== Last text ===\n{y.strip()}'
#indicator = '---'
#prompt_format = lambda x, y: f'Replace pronouns in the following text with their corresponding references.\n\n=== Text (start) ===\n{x.strip()}\n{indicator}\n{y.strip()}\n=== Text (end) ==='
start_sym, end_sym = "=== Text (start) ===", "=== Text (start) ==="
prompt_format = lambda x, y: f'Replace pronouns in the following text with their corresponding references.\n\n{x.strip()}\n{start_sym}\n{y.strip()}\n{end_sym}'
examplars = [(prompt_format(e[0], e[1]), e[2]) for e in examplars]
prompt = prompt_format(context, text)
#decontext_text = cls.chatgpt_get_response(prompt, examplars=examplars, api_key=api_key).split(indicator, 1)[-1].strip()
decontext_text = cls.chatgpt_get_response(prompt, examplars=examplars, api_key=api_key).strip()
decontext_text = decontext_text.split(start_sym, 1)[-1].strip()
decontext_text = decontext_text[:-len(end_sym)] if decontext_text.endswith(end_sym) else decontext_text
if debug:
print('-' * 10)
print(prompt)
print('-' * 10)
print(decontext_text)
print('-' * 10)
return decontext_text
@classmethod
def ask_question_text(
cls,
context: str,
text: str,
terms: List[str],
api_key: str = None,
debug: bool = False,
filter_question: bool = True,
ask_full_text: bool = False,
use_full_text: bool = True,
):
questions: List[str] = []
cases: List[str] = []
for term in terms:
term = term.strip('"')
#case = f'{context.lstrip()}{text.rstrip()}\n\nGiven the above passage, generate a question that can be used to look up relevant information to verify the following term "{term}".'
#case = f'{context.lstrip()}{text.rstrip()}\n\nThe term "{term}" in the above passage might be wrong. Generate a question that can be used to look up relevant information to verify it.'
case = f'{context.lstrip()}{text.rstrip()}\n\nGiven the above passage, ask a question to which the answer is the term/entity/phrase "{term}".'
cases.append(case)
if ask_full_text and len(terms) <= 0:
case = f'{context.lstrip()}{text.rstrip()}\n\nGiven the above passage, ask a question to which the answer is the information contained in the last sentence "{text.strip()}".'
cases.append(case)
elif use_full_text and len(terms) <= 0:
return [text.strip()]
responses = cls.chatgpt_get_response(cases, api_key=api_key)
questions: List[str] = []
for case, question in zip(cases, responses):
question = question.strip()
if filter_question and not question.endswith('?'):
continue
questions.append(question)
if debug:
print('-' * 10)
print(case)
print('-' * 10)
print(question)
print('-' * 10)
return questions
@classmethod
def get_queries_from_text_for_retrieval(
cls,
context: str,
tokens: List[str],
probs: List[float],
low: float = 0.0,
api_key: str = None,
replace_symbol: str = 'XXX',
detect_low_terms: bool = False,
decontextualize: bool = False,
askquestion: bool = False,
debug: bool = False,
) -> List[str]:
text = ''.join(tokens)
if debug:
print('0->', context)
print('1->', text)
print(list(zip(tokens, probs)))
if detect_low_terms:
#text = cls.replace_low_confidence_terms_by_extract(context=context, tokens=tokens, probs=probs, low=low, api_key=api_key, replace_symbol=replace_symbol)
#terms = cls.extract_low_confidence_terms(context=context, tokens=tokens, probs=probs, low=low, api_key=api_key)
terms = cls.extract_low_confidence_terms_rule(tokens=tokens, probs=probs, low=low, api_key=api_key)
if debug:
print('2->', terms)
if decontextualize:
text = cls.decontextualize_text(context=context, text=text, api_key=api_key)
if debug:
print('3->', text)
elif askquestion:
questions = cls.ask_question_text(context=context, text=text, terms=terms, api_key=api_key)
if detect_low_terms:
if decontextualize:
#text = text.replace(replace_symbol, ' ')
for term in terms:
questions = [text.replace(term, ' ')]
elif askquestion:
pass
if debug:
print('4->', questions)
input()
return questions
def get_query_for_retrieval(self):
if self.gen_len == 0:
return self.question
#question = self.question[:self.question.find('(A)')].strip() # TODO: debug
#return question
else:
return self.case
def get_all_ctxs(self) -> List[str]:
return self.ctxs
def add_generation(self, cont: str):
self.case += cont
self.gen_len += len(cont)
if self.gold_used_len != 0: # use gold
self.gold_output = self.gold_output[self.gold_used_len:]
self.gold_used_len = 0
def reset_generation(self):
if self.gen_len <= 0:
return
self.case = self.case[:-self.gen_len]
self.gen_len = 0
def change_ctx(self):
assert len(self.ctxs)
if self.ctxs_idx >= len(self.ctxs):
return self.did, self.ctx
self.did, self.ctx = self.ctxs[self.ctxs_idx]
self.ctxs_idx += 1
return self.did, self.ctx
def reinit_ctx(self):
self.ctx = None
self.ind = 1
def check_ctx(self, method):
if self.ctx:
return
if self._ctxs:
self.update_retrieval([], method=method)
def append_retrieval(self, rets: List[str], add_index: bool = False):
rets = self.clean_rets(rets)
self.case += self.get_append_retrieval(rets, index=self.ind if add_index else None) # TODO: fix list bug
self.ind = (self.ind + 1) if add_index else self.ind
def update_retrieval(
self,
rets: List[Tuple[str, str]] = [],
method: str = 'replace',
dedup: bool = True,
add_index: bool = True,
):
if self._ctxs: # merge with kept ctxs
exist_ids = set([_id for _id, t in self._ctxs])
new_rets = copy.deepcopy(self._ctxs)
for _id, t in rets:
if _id not in exist_ids:
new_rets.append((_id, t))
exist_ids.add(_id)
rets = new_rets
rets = list(map(itemgetter(1), rets))
rets = self.clean_rets(rets)
def merge_rets():
if add_index:
return '\n'.join(f'[{self.ind + i}]: {ret}' for i, ret in enumerate(rets))
return '\n'.join(rets)
assert method in {'replace', 'append'}
merge_ret = merge_rets()
if self.ctx is None:
self.ctx = merge_ret
else:
if method == 'replace':
self.ctx = merge_ret
elif method == 'append':
if dedup:
if merge_ret.lower() not in self.ctx.lower():
self.ctx += '\n' + merge_ret
self.ind += len(rets)
else:
self.ctx += '\n' + merge_ret
self.ind += len(rets)
else:
raise NotImplementedError
@classmethod
def format_reference(cls, ref: str, api_key: str = None):
if cls.add_ref_suffix and not ref.endswith(cls.add_ref_suffix):
ref += cls.add_ref_suffix
if cls.add_ref_prefix and not ref.startswith(cls.add_ref_prefix):
ref = cls.add_ref_prefix + ref
if cls.clean_reference:
ref = cls.canonicalize_text(ref, field='text', api_key=api_key)
method = cls.format_reference_method
assert method in {'default', 'searchresults', 'searchresultsrank', 'ignore', 'ignore_for_retrieval_instruct', 'short_ignore'}
if method == 'default':
return 'Reference: ' + ref
if method == 'searchresults':
return 'Search results :\n' + ref
if method == 'searchresultsrank':
return 'Search results ranked based on relevance in descending order:\n' + ref
if method == 'ignore':
formatted = [
'1. The reference below might be helpful when answering questions but it is noisy. Free free to ignore irrelevant information in it.', ref.strip(),
'2. You should write out the reasoning steps and then draw your conclusion, where the reasoning steps should utilize the Search API "[Search(term)]" to look up information about "term" whenever possible. For example:']
return '\n\n'.join(formatted)
if method == 'ignore_for_retrieval_instruct':
formatted = ['The reference below might be helpful when answering questions but it is noisy. Free free to ignore irrelevant information in it.', ref.strip()]
return '\n\n'.join(formatted)
if method == 'short_ignore':
formatted = ['The reference below might be helpful but it is noisy. Free free to ignore irrelevant information in it:', ref.strip()]
return ' '.join(formatted)
raise NotImplementedError
def get_prefix(
self,
qagent: "QueryAgent",
prefix_method: str = 'sentence') -> Tuple[str, int]:
if not self.gold_output: # finish
return qagent.final_stop_sym, 0
if prefix_method == 'sentence':
prefix, self.gold_used_len = ApiReturn.get_sent(self.gold_output, position='begin')
return prefix, 0
elif prefix_method == 'all':
prefix, self.gold_used_len = self.gold_output, len(self.gold_output)
return prefix, 0
elif prefix_method.startswith('sentence_first:'):
firstk = int(prefix_method[len('sentence_first:'):])
prefix, self.gold_used_len = ApiReturn.get_sent(self.gold_output, position='begin')
prefix = qagent.get_tokens(prefix, topk=firstk)[0]
return prefix, None
elif prefix_method.startswith('freq:'):
firstk = int(prefix_method[len('freq:'):])
prefix, self.gold_used_len = qagent.get_tokens(self.gold_output, topk=firstk)
return prefix, 0
else:
raise NotImplementedError
def format(
self,
use_ctx: bool = False,
use_ret_instruction: bool = True,
use_instruction: bool = True,
is_chat_model: bool = False,
api_key: str = None
):
# run on demo
demo_formatted: List[str] = [d.format(use_ctx=use_ctx, use_ret_instruction=False, use_instruction=False)[0] for d in self.demo]
use_ctx = use_ctx and bool(self.ctx) # do not use ctx when it's None or empty string
use_ret_instruction = use_ret_instruction and self.ret_instruction is not None
ref = self.format_reference(self.ctx, api_key=api_key) if use_ctx else None
task, ret, ensemble = self.ret_instruction.format(use_ctx=use_ctx) if use_ret_instruction else (None, None, None)
elements: List[str] = []
if use_ctx and self.ctx_position == 'begin':
elements.append(ref)
# append retrieval instructionj
if use_ret_instruction:
elements.append(ret)
# append task instruction
if use_ret_instruction:
elements.append(task)
# append additional instruction
if use_instruction and self.instruction is not None:
elements.append(self.instruction)
# append demo
if len(demo_formatted) and not is_chat_model:
elements.extend(demo_formatted)
# append ensemble
if use_ret_instruction:
elements.append(ensemble)
if use_ctx and self.ctx_position == 'before_case':
elements.append(ref + '\n' + self.case)
else:
elements.append(self.case)
return '\n\n'.join(elements), self.gen_len, demo_formatted
Sentence = namedtuple('Sentence', 'text start_char end_char')
class ApiReturn:
EOS = '<|endoftext|>'
spacy_nlp = spacy.load('en_core_web_sm')
psentencizer = PunktSentenceTokenizer()
stanza_nlp = stanza.Pipeline(lang='en', processors='tokenize')
use_sentencizer = 'nltk'
min_sent_len = 5
def __init__(
self,
prompt: str,
text: str,
tokens: List[str] = None,
probs: List[float] = None,
offsets: List[int] = None,
finish_reason: str = 'stop',
model: str = None,
skip_len: int = 0,
):
self.model = model
self.prompt = prompt
self.text = text
self.tokens = tokens
self.probs = probs
self.offsets = offsets
if self.has_tokens:
assert len(tokens) == len(probs) == len(offsets)
self.finish_reason = finish_reason
if self.finish_reason is None:
self.finish_reason = 'stop' # TODO: a bug from openai?
if skip_len: # skip `skip_len` chars at the beginning
self.text = self.text[skip_len:]
if self.has_tokens:
i = 0
for i, off in enumerate(self.offsets):
if off == skip_len:
break
elif off > skip_len: # the previous token span across the boundary
i = i - 1
assert i >= 0
break
self.tokens = self.tokens[i:]
self.probs = self.probs[i:]
self.offsets = self.offsets[i:]
@property
def has_tokens(self):
return self.tokens is not None
@property
def token_probs(self):
if self.has_tokens:
return self.probs
else:
return []
@property
def num_tokens(self):
if self.has_tokens:
return len(self.tokens)
else:
return len(tiktoken.encoding_for_model(self.model).encode(self.text))
@property
def has_endoftext(self):
return self.EOS in self.tokens
@property
def is_empty(self):
return len(self.text.strip()) == 0
@classmethod
def get_sent(cls, text: str, position: str = 'begin'):
if cls.use_sentencizer == 'spacy':
sents = list(cls.spacy_nlp(text).sents)
elif cls.use_sentencizer == 'nltk':
sents = [Sentence(text[s:e], s, e) for s, e in cls.psentencizer.span_tokenize(text)]
else:
raise NotImplementedError
if position == 'begin':
break_at = len(text)
for sent in sents:
# remove trailing spaces which is usually tokenized into the next token of the next sentence by GPT tokeniers
num_trail_spaces = len(sent.text) - len(sent.text.rstrip())
if sent.end_char - num_trail_spaces >= cls.min_sent_len:
break_at = sent.end_char - num_trail_spaces
break
return text[:break_at], break_at
if position == 'end':
break_at = 0
for i in range(len(sents)):
sent = sents[len(sents) - i - 1]
if len(text) - sent.start_char >= cls.min_sent_len: # TODO: argument
break_at = sent.start_char
break
return text[break_at:], break_at
raise NotImplementedError
def truncate_at_prob(self, low: float):
assert self.has_tokens, 'not supported'
if self.num_tokens <= 1:
return self
break_point = self.num_tokens
for i in range(self.num_tokens):
t, p, o = self.tokens[i], self.probs[i], self.offsets[i]
if p <= low:
break_point = i
break
if break_point == 0 and self.num_tokens > 0: # avoid deadlock
break_point = 1
while break_point < self.num_tokens: # truncation
assert break_point > 0
keep = self.offsets[break_point] - len(self.prompt)
if keep <= 0:
break_point += 1
continue
self.text = self.text[:keep]
self.tokens = self.tokens[:break_point]
self.probs = self.probs[:break_point]
self.offsets = self.offsets[:break_point]
self.finish_reason = 'boundary'
break
return self
def truncate_at_boundary(self, unit: str = 'sentence'):
if self.num_tokens <= 1:
return self
if unit == 'sentence':
if self.use_sentencizer == 'spacy':
sents = list(self.spacy_nlp(self.text).sents)
#print(self.text)
#print('---')
#print(list(map(str, sents)))
#print('---')
elif self.use_sentencizer == 'nltk':
sents = [Sentence(self.text[s:e], s, e) for s, e in self.psentencizer.span_tokenize(self.text)]
else:
raise NotImplementedError
break_at = len(self.text)
for sent in sents:
# remove trailing spaces which is usually tokenized into the next token of the next sentence by GPT tokeniers
num_trail_spaces = len(sent.text) - len(sent.text.rstrip())
if sent.end_char - num_trail_spaces >= self.min_sent_len:
break_at = sent.end_char - num_trail_spaces
break
if break_at > 0 and break_at < len(self.text): # truncation
if self.has_tokens:
i = 0
for i in range(self.num_tokens):
if self.offsets[i] - len(self.prompt) >= break_at:
break_at = self.offsets[i] - len(self.prompt)
break
assert i > 0
self.tokens = self.tokens[:i]
self.probs = self.probs[:i]
self.offsets = self.offsets[:i]
assert break_at > 0
self.text = self.text[:break_at]
self.finish_reason = 'boundary'
else:
raise NotImplementedError
return self
def truncate_at_substring(self, substr: str):
position = self.text.find(substr)
if position == -1:
return
self.text = self.text[:position]
if self.has_tokens:
i = 0
for i, off in enumerate(self.offsets):
if off - len(self.prompt) == position:
break
elif off - len(self.prompt) > position: # the previous token span across the boundary
i = i - 1
assert i >= 0
break
self.tokens = self.tokens[:i]
self.probs = self.probs[:i]
self.offsets = self.offsets[:i]
def use_as_query(
self,
low_prob: float = None,
mask_prob: float = None,
mask_method: str = 'simple',
n_gen_char_in_prompt: int = 0,
api_key: str = None,
):
if not low_prob and not mask_prob:
return self.text
assert self.has_tokens, 'not supported'
if low_prob:
ok = False
for p in self.probs:
if p <= low_prob:
ok = True
break
if not ok:
return ''
if mask_prob:
if mask_method == 'simple':
keep = [(t if p > mask_prob else ' ') for t, p in zip(self.tokens, self.probs)]
keep = ''.join(keep).strip()
return keep
elif mask_method in {'wholeterm-decontextualize', 'wholeterm-askquestion'}:
if n_gen_char_in_prompt == 0:
context = ''
else:
context = self.prompt[-n_gen_char_in_prompt:]
decontextualize = 'decontextualize' in mask_method
askquestion = 'askquestion' in mask_method
keep = CtxPrompt.get_queries_from_text_for_retrieval(
context=context,
tokens=self.tokens,
probs=self.probs,
low=mask_prob,
api_key=api_key,
detect_low_terms=True,
decontextualize=decontextualize,
askquestion=askquestion)
return keep
else:
raise NotImplementedError
else:
return self.text
class RetrievalInstruction:
cot_instruction: Dict[str, Any] = {
'retrieval': 'Skill 1. Use the Search API to look up relevant information by writing "[Search(term)]" where "term" is the search term you want to look up. For example:',
'task': 'Skill 2. Answer questions by thinking step-by-step. First, write out the reasoning steps, then draw the conclusion. For example:',
'ensemble': 'Now, combine the aforementioned two skills. First, write out the reasoning steps, then draw the conclusion, where the reasoning steps should also utilize the Search API "[Search(term)]" whenever possible.',
'examplars': [
{
'question': 'But what are the risks during production of nanomaterials?',
'ctxs': [(None, 'The increased production of manufactured nanomaterials (MNMs) and their use in consumer and industrial products means that workers in all countries will be at the front line of any exposure, placing...')],
'answer': '[Search(nanomaterial production risks)] Some nanomaterials may give rise to various kinds of lung damage.',
},
{
'question': 'The colors on the flag of Ghana have the following meanings.',
'ctxs': [(None, "The flag of Ghana comprises of the Pan-African colors of red, yellow and green. These colors are horizontal stripes that make up the background of the flag. Red is represents the nation's fight for independence, the gold is a sign of the country's mineral wealth, and the green is a representation of the country's natural wealth...")],
'answer': 'Red is for [Search(Ghana flag red meaning)] the blood of martyrs, green for forests, and gold for mineral wealth.',
},
{
'question': 'Metformin is the first-line drug for what?',
'ctxs': [(None, "Metformin, sold under the brand name Glucophage, among others, is the main first-line medication for the treatment of type 2 diabetes,[6][7][8][9] particularly in people who are overweight.[7] It is also used in the treatment of polycystic ovary syndrome...")],
'answer': '[Search(Metformin first-line drug)] patients with type 2 diabetes and obesity.'
}
]
}
strategyqa_instruction: Dict[str, Any] = {
'task': 'Skill 2. Answer questions by thinking step-by-step. First, write out the reasoning steps, then generate a yes or no answer. For example:',
'ensemble': 'Now, combine the aforementioned two skills. First, write out the reasoning steps, then generate a yes or no answer, where the reasoning steps should also utilize the Search API "[Search(term)]" whenever possible.',
}
summary_instruction: Dict[str, Any] = {
'task': '2. You should generate a short paragraph of summary for an entity. For example:',
'ensemble': '3. Now, you should combine the aforementioned two abilities. You should generate a short paragraph of summary for an entity and utilize the Search API "[Search(term)]" whenever possible.',
}
def __init__(self, method: str = 'cot', fewshot: int = None):
self.instruction = getattr(self, f'{method}_instruction')
for k, v in self.cot_instruction.items():
if k not in self.instruction:
self.instruction[k] = v
self.fewshot = len(self.instruction['examplars']) if fewshot is None else self.fewshot
def format(self, use_ctx: bool = False) -> Tuple[str, str]:
use_ctx = False # no ctx for examplars
demos: List[str] = []
for i in range(self.fewshot):
q = self.instruction['examplars'][i]['question']
a = self.instruction['examplars'][i]['answer']
if use_ctx:
ctxs = self.instruction['examplars'][i]['ctxs']
assert CtxPrompt.ctx_position == 'before_case'
ref = CtxPrompt.format_reference(' '.join(map(itemgetter(1), ctxs)))
demo = f'{ref}\nQuestion: {q}\nAnswer (with Search): {a}'
else:
demo = f'Question: {q}\nAnswer (with Search): {a}'
demos.append(demo)
task = self.instruction['task']
ret = self.instruction['retrieval'] + '\n\n' + '\n\n'.join(demos)
ensemble = self.instruction['ensemble']
return task, ret, ensemble
| [
"<function <lambda> at 0x11617ff60>",
"<function <lambda> at 0x116450ea0>",
"Given the previous context and the last sentence, detect all terms/entities in the last sentence starting with the symbol \"PLACEHOLDER\", then replace them with \"PLACEHOLDER\".\nPrevious context:\nPLACEHOLDER\nLast sentence:\nPLACEHOLDER",
"<function <lambda> at 0x11617f560>"
] |
2024-01-10 | RainbowGamer333/SINTEFStuff | src~openAI~remoteapi.py | import os
import openai
import yaml
def load_credential():
# change credential filepath to match your own
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_filepath = os.path.join(os.path.dirname(current_dir), "openai.credential")
with open(credential_filepath, 'r') as stream:
credential_data = yaml.safe_load(stream)
openai_config = credential_data['openai']
openai.api_type = "azure"
openai.api_base = openai_config['endpoint']
openai.api_version = "2023-03-15-preview"
openai.api_key = openai_config["key"]
| [] |
2024-01-10 | bbarclay/NLP_incentived_review | bin~Model.py |
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import pickle
import json
from copy import deepcopy
import itertools
from sklearn.feature_extraction.text import CountVectorizer
from gensim.utils import simple_preprocess
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
import re
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from collections import defaultdict
from gensim.models import word2vec
from gensim import models
from sklearn.preprocessing import Normalizer, normalize
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans, MiniBatchKMeans
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import csv
from sklearn.preprocessing import Imputer, StandardScaler, LabelEncoder, OneHotEncoder
from sklearn_pandas import DataFrameMapper, CategoricalImputer
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.metrics import recall_score
# ## Load data
# In[ ]:
## Not in use
# product_org = pd.DataFrame(df_raw)
# products=deepcopy(product_org)
# products['brand_id']=products['Brand'].apply(lambda x: x['Id'])
# products['brand_name']=products['Brand'].apply(lambda x: x['Name'])
# products['rec_cnt']=products['ReviewStatistics'].apply(lambda x: x['RecommendedCount'])
# products['avg_rating']=products['ReviewStatistics'].apply(lambda x: x['AverageOverallRating'])
# products['helpful_cnt']=products['ReviewStatistics'].apply(lambda x: x['HelpfulVoteCount'])
# products.head()
# In[ ]:
# def product_parser(product):
# agg = dict()
# fields = ['brand_id', 'brand_name','product_id','Name','CategoryId', 'Description', 'rec_cnt', 'avg_rating','helpful_cnt','TotalReviewCount']
# for field in fields:
# value = product.get(field, None)
# try:
# agg[field] = value
# except:
# agg[field] = None
# return agg
# In[ ]:
# pd.DataFrame(product_parser(products)).to_pickle('product_table.pickle')
# df1=pd.read_pickle('reviewlist_all.pickle')
# df1_org=pd.DataFrame(df1)
# In[ ]:
# def review_parser(review):
# # Reference variables
# agg = dict()
# fields = ['AuthorId','IsFeatured','IsRatingsOnly','IsRecommended', 'product_id', 'Rating', 'ReviewText','isemployee','freeproduct','Helpfulness','Title']
# for field in fields:
# value = review.get(field, None)
# #value = product.__dict__.get(field, None)
# try:
# agg[field] = value
# #agg[field] = unicode(value).encode('ascii', errors='ignore') if value is not None else None
# except:
# agg[field] = None
# return agg
# In[ ]:
# with open('reviewlist_all.pickle', 'rb') as f:
# # The protocol version used is detected automatically, so we do not
# # have to specify it.
# data = pickle.load(f)
# In[ ]:
# review_i = pd.DataFrame([i for i in data if 'IncentivizedReview' in i['ContextDataValues']])
# review_i['isemployee']=review_i['ContextDataValues'].apply(lambda x: x['StaffContext']['Value'])
# review_i['freeproduct']=review_i['ContextDataValues'].apply(lambda x: x['IncentivizedReview']['Value'])
# reviews=pd.DataFrame(review_parser(review_i))
# reviews.to_pickle('review_table.pickle')
# In[3]:
new=pd.read_pickle('reviewdata_sub.pickle')
# In[4]:
col=list(new.columns)
# In[5]:
col
# In[6]:
new.shape
# ## Parse
# In[7]:
def review_parser_new(review):
# Reference variables
agg = dict()
fields = ['AuthorId','IsFeatured','IsRatingsOnly','IsRecommended', 'Rating', 'Title','ReviewText',
'ContextDataValues.StaffContext.Value','ContextDataValues.IncentivizedReview.Value',
'ContextDataValues.age.Value','ContextDataValues.beautyInsider.Value','Helpfulness','product_id',
'productPrice','']
for field in fields:
value = review.get(field, None)
#value = product.__dict__.get(field, None)
try:
agg[field] = value
#agg[field] = unicode(value).encode('ascii', errors='ignore') if value is not None else None
except:
agg[field] = None
return agg
# In[8]:
reviews_new=pd.DataFrame(review_parser_new(new))
# In[9]:
reviews_new['incentivized']=0
reviews_new.loc[(reviews_new['ContextDataValues.IncentivizedReview.Value']=='true')|(reviews_new['ContextDataValues.StaffContext.Value']=='true'),'incentivized']=1
# In[10]:
reviews_new['age']=reviews_new['ContextDataValues.age.Value'].fillna('unknow')
# In[11]:
reviews_new['vib']=reviews_new['ContextDataValues.beautyInsider.Value'].fillna('unknow')
# In[12]:
reviews_new['incentivized']=0
reviews_new.loc[(reviews_new['ContextDataValues.StaffContext.Value']=='true')|(reviews_new['ContextDataValues.IncentivizedReview.Value']=='true'),'incentivized']=1
# In[13]:
reviews_new.reset_index(inplace=True)
# In[14]:
reviews_new=reviews_new[['index', 'AuthorId', 'IsFeatured', 'IsRatingsOnly', 'IsRecommended',
'Rating', 'Title', 'ReviewText', 'Helpfulness', 'product_id', 'productPrice', 'incentivized', 'age','vib']]
# In[15]:
reviews_new.columns=['review_id', 'AuthorId', 'IsFeatured', 'IsRatingsOnly', 'IsRecommended',
'Rating', 'Title', 'ReviewText', 'Helpfulness', 'product_id',
'productPrice', 'incentivized', 'age', 'vib']
# In[16]:
reviews_new.incentivized.value_counts()
# ### Review analyze
# In[17]:
df=pd.DataFrame(reviews_new[['ReviewText','incentivized','review_id']])
# In[18]:
#sentence tokenizer for each review
punkt_param = PunktParameters()
punkt_param.abbrev_types = set(['dr', 'vs', 'mr', 'mrs'])
tokenizer = PunktSentenceTokenizer(punkt_param)
df['review_sentences'] = df['ReviewText'].map(lambda text: tokenizer.tokenize(text))
# In[19]:
df.head()
# In[ ]:
df.to_csv('review_table.csv')
# In[20]:
# split long sentences in review_sentences
def split_long_sentence(sentences):
shorter_sentences = []
for sentence in sentences:
if len(sentence) >= 50:
sub_sentences = re.split('&|!|;|and|,|~|but|\.|so i|\s-\s|\(|\)', sentence.lower())
sub_sentences = [s.strip() for s in sub_sentences]
shorter_sentences += sub_sentences
else:
shorter_sentences.append(sentence.lower())
shorter_sentences = filter(lambda s: len(s) > 13
and not s.startswith('i have')
and not s.startswith('i also have')
and not s.startswith('i\'m')
and not s.startswith('i had')
and not s.startswith('i\'ve been')
and not s.startswith('i thought')
and not s.startswith('i was ')
and not s.startswith('i use ')
and not s.startswith('i used to')
and not s.startswith('if you have')
and not s.startswith('i suffer')
and not ('i do have' in s)
and not ('looking for' in s)
and not ('i purchase' in s)
and not ('i bought' in s)
, shorter_sentences)
return list(shorter_sentences)
# In[22]:
#Generate table of all the sentences with review_ids
review_ids = []
review_sentences = []
for review in df.as_matrix():
curr_review_id = review[2]
curr_review_sentences = review[-1]
# Divide long sentences even longer if possible!
shorter_sentences = split_long_sentence(curr_review_sentences)
review_ids += [curr_review_id] * len(shorter_sentences)
review_sentences += shorter_sentences
df_review_sentences = pd.DataFrame({'review_id': review_ids, 'sentence': review_sentences})
df_review_sentences.sample(10)
# In[23]:
df_review_sentences.shape
# In[24]:
## Tokenize sentences
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
# In[25]:
tokenizer_regex = re.compile(r"[\s]")
def tokenize(text):
clean_text = re.sub(r'[,!.$\d%&~?()#<>"=/-]', ' ', text)
clean_text = ' '.join(clean_text.split())
tokens = [tok.strip().lower() for tok in tokenizer_regex.split(clean_text)]
filtered_tokens = tokens# filter(lambda tok: tok not in stop_words, tokens)
return list(filtered_tokens)
# In[26]:
def get_word_weights(docs):
tfidf = TfidfVectorizer(stop_words=frozenset(stop_words),
tokenizer=tokenize,
ngram_range=(1,1))
tfidf.fit(docs)
max_idf = max(tfidf.idf_)
word2weight = defaultdict(lambda: max_idf,
[(w, tfidf.idf_[i]) for w, i in tfidf.vocabulary_.items()])
return word2weight
test_docs2 = ["dog cat fish","dog cat cat","fish bird", 'bird fish. bird', 'blah cat', 'tata harper']
get_word_weights(test_docs2)
# In[27]:
tfidf = TfidfVectorizer(stop_words=frozenset(stop_words),
tokenizer=tokenize,
ngram_range=(1,1))
tfidf.fit(test_docs2)
tfidf.idf_
# In[28]:
tfidf.vocabulary_.items()
# In[29]:
w2v = models.KeyedVectors.load_word2vec_format("GoogleNews-vectors-negative300.bin",binary=True)
#model = gensim.models.Word2Vec(texts, size=100, window=5, min_count=1, workers=4,sg=1)
# In[ ]:
#w2v.most_similar(positive=['wrinkle'], topn=25)
# In[30]:
'review' in w2v.vocab
# In[31]:
df_review_sentences['tokenized_words'] = df_review_sentences['sentence'].map(lambda sentence: tokenize(sentence))
df_review_sentences['tokenized_filtered_words'] = df_review_sentences['tokenized_words'].map(
lambda tokenized_words: [word for word in tokenized_words if word in w2v.vocab])
df_review_sentences['tokenized_filtered_words_length'] = df_review_sentences['tokenized_filtered_words'].map(
lambda tokenized_filtered_words: len(tokenized_filtered_words))
# In[32]:
df_review_sentences = df_review_sentences[df_review_sentences.tokenized_filtered_words_length > 3]
df_review_sentences = df_review_sentences.reset_index()
word2weight = get_word_weights(df_review_sentences['sentence'])
# In[33]:
df_review_sentences.head()
# In[35]:
def get_docs_vocab(docs):
count_vectorizer = CountVectorizer(ngram_range=(1,1),
stop_words=frozenset(stop_words),
tokenizer=tokenize)
count_vectorizer.fit_transform(docs)
vocab = count_vectorizer.vocabulary_.keys()
return vocab
get_docs_vocab(['cat mouse dog', 'mouse dog'])
# In[36]:
def get_pos_weight(tokens):
word_pos = nltk.pos_tag(tokens)
word_to_weight = {}
for word, pos in word_pos:
if pos.startswith('JJ') | pos.startswith('RB'):
word_to_weight[word] = 2 # adjective or adverb
elif (pos == 'VBD') | (pos == 'VBG') | (pos == 'VBN'):
word_to_weight[word] = 1.3 # verb
# elif (pos == 'NN'):
# word_to_weight[word] = 1.1 # noun
else:
word_to_weight[word] = 1
return word_to_weight
# In[37]:
import nltk
test_pos = defaultdict( list )
test_words = ['refreshed', 'tingling', 'tried', 'redness', 'dried', 'dry', 'added',
'eczema', 'sensitive', 'tight', 'recommend', 'pick', 'matte', 'removed', 'slippery',
'irritated', 'pleased', 'feels', 'five', 'forever', 'milky', 'hydrated', 'favorite', 'didn\'t']
test_words_pos = nltk.pos_tag(test_words)
for word, pos in test_words_pos:
test_pos[pos].append(word)
#get_pos_weight(test_words)
print(test_pos)
# In[38]:
import nltk
nltk.download('averaged_perceptron_tagger')
# In[39]:
docs_vocab = get_docs_vocab(df_review_sentences['sentence'])
# In[41]:
pos_weights = get_pos_weight(list(docs_vocab))
# In[42]:
def word2vec_pos_weight(tokenized_filtered_words):
return np.mean([w2v[w] * pos_weights.get(w, 1) * word2weight[w]
for w in tokenized_filtered_words], axis=0)
# In[43]:
def word2vec_tfidf(tokenized_filtered_words):
return np.mean([w2v[w] * word2weight[w]
for w in tokenized_filtered_words], axis=0)
# In[44]:
df_review_sentences['word2vec'] = df_review_sentences['tokenized_filtered_words'].apply(
# lambda tokenized_filtered_words: np.mean(w2v[tokenized_filtered_words], axis=0)
#lambda tokenized_filtered_words: word2vec_tfidf(tokenized_filtered_words)
lambda tokenized_filtered_words: word2vec_pos_weight(tokenized_filtered_words)
)
# In[45]:
sentence_word_vectors = np.array(df_review_sentences['word2vec'].values.tolist())
sentence_word_vectors.shape
# In[425]:
def plot_kmeans_inertia(data):
"""Figure out optimized number of clusters for KMeans"""
max_number_clusters = 30
inertia_values = []
for cluster_count in range(1, max_number_clusters+1):
print('fitting cluster ', cluster_count)
km = KMeans(n_clusters=cluster_count)
km.fit(data)
inertia_values.append(km.inertia_)
plt.plot(range(1, max_number_clusters+1), inertia_values)
plt.savefig('kmeans_inertia.png', dpi=500)
# In[426]:
plot_kmeans_inertia(sentence_word_vectors_truncated_sub)
# ## Kmeans
# In[46]:
number_sentences = sentence_word_vectors.shape[0]
df_review_sentences_truncated = df_review_sentences.iloc[0:number_sentences, :]
sentence_word_vectors_truncated = sentence_word_vectors[0:number_sentences, :]
sentence_word_vectors_truncated = normalize(sentence_word_vectors_truncated)
sentence_word_vectors_truncated.shape
# In[47]:
cluster_count = 25
km = MiniBatchKMeans(n_clusters=cluster_count,random_state=1)
#km = KMeans(n_clusters=cluster_count,random_state=3)
review_word2vec_clusters = km.fit_predict(sentence_word_vectors_truncated)
len(review_word2vec_clusters)
# In[48]:
df_sentence_cluster = pd.DataFrame({})
cluster_columns = ['feat_' + str(i) for i in range(0, cluster_count)]
for i in range(0, cluster_count):
cluster_column = cluster_columns[i]
df_sentence_cluster[cluster_column] = (review_word2vec_clusters == i).astype(int)
df_sentence = pd.concat([df_review_sentences, df_sentence_cluster], axis=1)
df_sentence[df_sentence['feat_0'] == 1].head()
# In[49]:
df_sentence_all = pd.merge(df_sentence, df, on='review_id', how='left')
df_sentence_all.head(2)
# In[50]:
ratio=[]
for i in range(25):
a=df_sentence_all[df_sentence_all['feat_'+str(i)] ==1]
ratio.append(sum(a['incentivized'])/len(a['incentivized']))
n=np.argmax(ratio)
print(n,ratio[n])
# In[43]:
df_sentence_all[df_sentence_all['feat_15'] == 1]['sentence'].to_csv('feature.csv')
# In[51]:
text=''.join(list(df_sentence_all[df_sentence_all['feat_15'] == 1]['sentence']))
wordcloud = WordCloud(width=480, height=480,background_color="white",margin=0,colormap="Reds",
stopwords=["samplei", "sephora",'this','i','thisi'],max_words=20).generate(text)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
#plt.show()
plt.savefig('wordcloud.png', dpi=500,transparent=True)
# In[52]:
from collections import Counter
c = Counter(text.split())
c.most_common(15)
# In[53]:
text=''.join(list(df_sentence_all[df_sentence_all['feat_23'] == 1]['sentence']))
wordcloud = WordCloud(width=480, height=480, margin=0,background_color="white",colormap="Reds",
stopwords=['was','in','skinmy','skin','my','is'],max_words=50).generate(text)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
# In[ ]:
#!pip3 install pyldavis
import pyLDAvis, pyLDAvis.sklearn
from IPython.display import display
from sklearn import datasets
categories = ['alt.atheism', 'comp.graphics', 'rec.sport.baseball']
ng_train = datasets.fetch_20newsgroups(subset='train',
categories=categories,
remove=('headers',
'footers', 'quotes'))
count_vectorizer = CountVectorizer(ngram_range=(1, 2),
stop_words='english',
token_pattern="\\b[a-z][a-z]+\\b")
X = count_vectorizer.fit_transform(ng_train.data)
n_topics = 3
n_iter = 10
lda = LatentDirichletAllocation(n_topics=n_topics,
max_iter=n_iter,
random_state=42)
data = lda.fit_transform(X)
# Setup to run in Jupyter notebook
pyLDAvis.enable_notebook()
# Create the visualization
vis = pyLDAvis.sklearn.prepare(lda, X, count_vectorizer)
# Export as a standalone HTML web page
# pyLDAvis.save_html(vis, 'lda.html')
# Let's view it!
display(vis)
# ## Classification model
# ### Without text feature
# In[54]:
reviews_new['n_reviews']=reviews_new['review_id'].groupby(reviews_new['AuthorId']).transform('count')
reviews_new['Helpfulness']=reviews_new['Helpfulness'].fillna(0)
reviews_new['productPrice']=reviews_new['productPrice'].apply(lambda x: float(x))
reviews_new['vib']=reviews_new['vib'].apply(lambda x: 0 if x=='no' or x=='unknow' else 1)
# In[55]:
reviews_new['complimentary']=reviews_new['ReviewText'].apply(lambda x: 1 if 'complimentary'in x.split()
or 'Influenster' in x.split()
or 'influenster' in x.split()
else 0)
# In[56]:
reviews_new.loc[reviews_new['complimentary']==1]['incentivized'].value_counts()
# In[57]:
# add sentiment scores
sent=pd.read_pickle('sent_data.pickle')
reviews_new['sent_compound']=list(sent[0])
reviews_new['sent_neg']=list(sent[1])
reviews_new['sent_neu']=list(sent[2])
reviews_new['sent_pos']=list(sent[3])
# In[58]:
reviews_new.columns
# In[59]:
observations_nt=reviews_new[['review_id','Rating','Helpfulness','n_reviews','productPrice',
'age','vib','incentivized','sent_compound', 'sent_neg', 'sent_neu', 'sent_pos','complimentary']]
observations_nt = pd.get_dummies(observations_nt, columns=['age'])
observations_nt.columns = list(map(lambda x: x.lower().replace(' ', '_').replace('/', '_').replace('__','_'), observations_nt.columns))
# In[60]:
mapper = DataFrameMapper([
(['rating','helpfulness','n_reviews','productprice','complimentary'], [Imputer(strategy='median'),StandardScaler()]),
(['vib','age_13to17','age_18to24',
'age_25to34', 'age_35to44', 'age_45to54', 'age_over54',
'sent_neg', 'sent_neu', 'sent_pos'], None)#[Imputer(),StandardScaler()])
])
# In[108]:
y_nt =observations_nt['incentivized']
X_nt=mapper.fit_transform(observations_nt)
X_train_nt, X_test_nt, y_train_nt, y_test_nt = train_test_split(X_nt, y_nt, test_size=0.4, random_state=43)
# In[62]:
nb_nt = GaussianNB()
nb_nt.fit(X_train_nt, y_train_nt)
print(roc_auc_score(y_train_nt, nb_nt.predict(X_train_nt)))
print(roc_auc_score(y_test_nt, nb_nt.predict(X_test_nt)))
y_pred_nb_nt = nb_nt.predict_proba(X_test_nt)[:, 1]
fpr_nb_nt, tpr_nb_nt, _ = roc_curve(y_test_nt, y_pred_nb_nt)
# In[109]:
gradboost_nt =GradientBoostingClassifier()
gradboost_nt.fit(X_train_nt, y_train_nt)
print(roc_auc_score(y_train_nt, gradboost_nt.predict(X_train_nt)))
print(roc_auc_score(y_test_nt, gradboost_nt.predict(X_test_nt)))
# In[131]:
y_pred_grd_nt = gradboost_nt.predict_proba(X_test_nt)[:,1]
fpr_grd_nt, tpr_grd_nt, _ = roc_curve(y_test_nt, y_pred_grd_nt)
y_pred_grd_nt
# In[129]:
from sklearn.metrics import auc
fpr, tpr, thresholds = roc_curve(y_test_nt, y_pred_grd_nt)
auc(fpr, tpr)
# ### With text features
# In[135]:
all_feature_columns=df_sentence.iloc[:,7:32].copy()
all_feature_columns['review_id']=list(df_sentence['review_id'])
# In[136]:
all_feature_columns_reviews=all_feature_columns.groupby(['review_id'],as_index=False).sum()
# In[137]:
all_feature_columns_reviews.head(2)
# In[138]:
text_feature_merged = pd.merge(all_feature_columns_reviews, observations_nt, on='review_id', how='inner')
text_feature_merged.head(2)
# In[139]:
text_feature_merged.columns
# In[140]:
mapper_merged = DataFrameMapper([
(['rating','helpfulness','n_reviews','productprice'],[Imputer(strategy='median'),StandardScaler()]),
(['vib','age_13to17','age_18to24',
'age_25to34', 'age_35to44', 'age_45to54', 'age_over54',
'feat_0', 'feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5',
'feat_6', 'feat_7', 'feat_8', 'feat_9', 'feat_10', 'feat_11', 'feat_12',
'feat_13', 'feat_14', 'feat_15', 'feat_16', 'feat_17', 'feat_18',
'feat_19', 'feat_20', 'feat_21', 'feat_22', 'feat_23','feat_24',
'sent_neg', 'sent_neu', 'sent_pos'], None)#[Imputer(),StandardScaler()])
])
# In[141]:
y_mg =text_feature_merged['incentivized']
X_mg=mapper_merged.fit_transform(text_feature_merged)
X_train_mg, X_test_mg, y_train_mg, y_test_mg = train_test_split(X_mg, y_mg, test_size=0.4, random_state=43)
# In[142]:
nb = GaussianNB()
nb.fit(X_train_mg, y_train_mg)
print(roc_auc_score(y_train_mg, nb.predict(X_train_mg)))
print(roc_auc_score(y_test_mg, nb.predict(X_test_mg)))
# In[71]:
trail_list=list(reviews_new.loc[(reviews_new['complimentary']==1)&(reviews_new['incentivized']!=1)]['review_id'])
# In[74]:
trail=text_feature_merged.loc[text_feature_merged['review_id']==372744]#372728#88121
# In[75]:
x_t=mapper_merged.transform(trail)
# In[78]:
nb.predict_proba(x_t)
# In[143]:
gradboost.predict_proba(x_t)
# In[81]:
text_feature_merged.loc[text_feature_merged['review_id']==108910][['incentivized']]
# In[80]:
reviews_new.loc[reviews_new['review_id']==39536]
reviews_new['ReviewText'][108910]
# In[671]:
## prediction result on test dataset
gradboost.predict(X_test_mg).sum()/len(gradboost.predict(X_test_mg))
# In[672]:
sum(y_test_mg)/len(y_test_mg)
# In[130]:
plt.figure(figsize=(6,6))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr, label='without topic features',c='grey',alpha=.7)
#plt.plot(fpr_nb_lsa, tpr_nb_lsa, label='with review (LSA)')
plt.plot(fpr_grd, tpr_grd, label='with topic features',c='red',alpha=.7)##941717
plt.xlabel('False positive rate',fontsize='12')
plt.ylabel('True positive rate',fontsize='12')
plt.title('ROC curve', fontsize='15')
plt.legend(loc='best')
plt.savefig('ROC.png', dpi=500);
# ## Balanced data
# In[84]:
g = text_feature_merged.groupby('incentivized')
subset=g.apply(lambda x: x.sample((g.size().min())))
# In[85]:
y_s = subset['incentivized']
X_s = mapper_merged.fit_transform(subset)
X_strain, X_stest, y_strain, y_stest = train_test_split(X_s, y_s, test_size=0.4,random_state=42)
# In[86]:
nb = GaussianNB()
nb.fit(X_strain, y_strain)
print(roc_auc_score(y_strain, nb.predict(X_strain)))
print(roc_auc_score(y_stest, nb.predict(X_stest)))
y_pred_nb = nb.predict_proba(X_stest)[:, 1]
fpr_nb, tpr_nb, _ = roc_curve(y_stest, y_pred_nb)
# In[725]:
print(recall_score(y_test_nt,nb_nt.predict(X_test_nt)))
print(classification_report(y_test_nt,nb_nt.predict(X_test_nt)))
# In[726]:
print(recall_score(y_test_mg,gradboost.predict(X_test_mg)))
print(classification_report(y_test_mg,gradboost.predict(X_test_mg)))
# In[89]:
gradboost =GradientBoostingClassifier()
gradboost.fit(X_strain, y_strain)
print(roc_auc_score(y_strain, gradboost.predict(X_strain)))
print(roc_auc_score(y_stest, gradboost.predict(X_stest)))
y_pred_grd = gradboost.predict_proba(X_stest)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_stest, y_pred_grd)
# In[294]:
from sklearn.metrics import f1_score
f1_score(y_stest,gradboost.predict(X_stest))
# In[101]:
randomforest = RandomForestClassifier(n_estimators=80,max_depth=10,max_features= 'sqrt',random_state=42)
randomforest.fit(X_strain, y_strain)
print(roc_auc_score(y_strain, randomforest.predict(X_strain)))
print(roc_auc_score(y_stest, randomforest.predict(X_stest)))
# In[260]:
importance=list(gradboost.feature_importances_)
# In[105]:
columns=['rating','helpfulness','n_reviews','productprice','vib','age_13to17','age_18to24',
'age_25to34', 'age_35to44', 'age_45to54', 'age_over54',
'feat_0', 'feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5',
'feat_6', 'feat_7', 'feat_8', 'feat_9', 'feat_10', 'feat_11', 'feat_12',
'feat_13', 'feat_14', 'feat_15', 'feat_16', 'feat_17', 'feat_18',
'feat_19', 'feat_20', 'feat_21', 'feat_22', 'feat_23','feat_24',
'sent_neg', 'sent_neu', 'sent_pos']
# In[261]:
zipped=dict(zip(columns,importance))
fi = pd.DataFrame().append(zipped, ignore_index=True).T.sort_values(by=0,ascending=False)
# In[262]:
list(fi[0])
# In[263]:
fi.columns=['importance']
# In[264]:
fi
# In[269]:
fi = fi.sort_values(by='importance')
# In[533]:
from matplotlib import cm
# In[551]:
fi.plot(kind='barh',legend=False,figsize=(6,8),title='Feature Importance',fontsize='10',color='#941717',alpha=.7)
plt.tight_layout()
plt.savefig('features.png', dpi=500,pad_inches=None)
# In[349]:
reviews_new.columns
# In[696]:
def bar_quartile(var, label, r, c):
df = reviews_new.loc[:,['incentivized',var]]
df['qt'] = pd.qcut(reviews_new[var],5,labels=["Q1", "Q2", "Q3","Q4","Q5"])
return df.groupby('qt').incentivized.value_counts(normalize =True).unstack()[1].plot.bar(color='#941717',legend=False,title=label,ylim=(0,.15), rot=0, alpha=.7 );
bar_quartile('productPrice','price',0,0);
# In[697]:
text_feature_merged.groupby('feat_15').incentivized.value_counts(normalize =True).unstack()[1].plot.bar(color='#941717',
legend=False,title='feature 15',ylim=(0,.55),alpha=.7);
# In[698]:
text_feature_merged.groupby('vib').incentivized.value_counts(normalize =True).unstack()[1].plot.bar(color='#941717',
legend=False,title='vib',ylim=(0,.2),alpha=.7);
# In[704]:
text_feature_merged.groupby('n_reviews').incentivized.value_counts(normalize =True).unstack()[1].plot.bar(color='#941717',
legend=False,title='number of reviews',ylim=(0,1),alpha=.7);
# In[ ]:
# plt.figure(figsize=(6,6))
# plt.plot([0, 1], [0, 1], 'k--')
# plt.plot(fpr_grd, tpr_grd, label='GBT')
# plt.plot(fpr_nb, tpr_nb, label='NB')
# plt.xlabel('False positive rate',fontsize='12')
# plt.ylabel('True positive rate',fontsize='12')
# plt.title('ROC curve with features from text', fontsize='15')
# plt.legend(loc='best');
#plt.savefig('ROC.png', dpi=500);
# ## LSA
# In[144]:
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import Normalizer
# In[150]:
review_list=list(df['ReviewText'])
# In[151]:
vectorizer = CountVectorizer(min_df = 4, strip_accents='ascii',stop_words = 'english')
dtm = vectorizer.fit_transform(review_list) # dtm: Document-Term Matrix
pd.DataFrame(dtm.toarray(), index=review_list, columns=vectorizer.get_feature_names()).head(10)
# In[153]:
lsa = TruncatedSVD(25, algorithm = 'randomized')
dtm_lsa = lsa.fit_transform(dtm)
# In[154]:
pd.DataFrame(lsa.components_.round(5),index = ["component_1","component_2","component_3","component_4",
"component_5","component_6","component_7","component_8",
"component_9","component_10",
"component_11","component_12","component_13","component_14",
"component_15","component_16","component_17","component_18",
"component_19","component_20","component_21","component_22",
"component_23","component_24","component_25"],
columns = vectorizer.get_feature_names())['sample']
# In[155]:
LSA_matrix=pd.DataFrame(dtm_lsa.round(5), index = review_list,
columns = ["component_1","component_2","component_3","component_4",
"component_5","component_6","component_7","component_8",
"component_9","component_10",
"component_11","component_12","component_13","component_14",
"component_15","component_16","component_17","component_18",
"component_19","component_20","component_21","component_22",
"component_23","component_24","component_25"])
# In[156]:
LSA_matrix['review_id']=list(df['review_id'])
# In[157]:
LSA_matrix['incentivized']=list(df['incentivized'])
# In[158]:
observations_lsa=reviews_new[['review_id','Rating','Helpfulness','n_reviews','productPrice',
'age','vib','sent_compound','sent_neg','sent_neu','sent_pos']]
# In[159]:
observations_lsa=observations_lsa.merge(LSA_matrix,how='inner',on='review_id')
# In[160]:
observations_lsa.shape
# In[161]:
observations_lsa = pd.get_dummies(observations_lsa, columns=['age'])
# In[162]:
mapper_lsa = DataFrameMapper([
(['Rating','Helpfulness','n_reviews','productPrice'], [Imputer(strategy='median'),StandardScaler()]),
(['vib','component_1', 'component_2', 'component_3', 'component_4',
'component_5', 'component_6', 'component_7', 'component_8',
'component_9', 'component_10'],None),
(['age_13to17','age_18to24',
'age_25to34', 'age_35to44', 'age_45to54', 'age_over54', 'age_unknow',
'sent_neg', 'sent_neu', 'sent_pos'], None)#[Imputer(),StandardScaler()])
])
# In[340]:
g_lsa= observations_lsa.groupby('incentivized')
subset_lsa=g_lsa.apply(lambda x: x.sample((g_lsa.size().min())))
# In[341]:
y_lsa =subset_lsa['incentivized']
X_lsa=mapper_lsa.fit_transform(subset_lsa)
# In[342]:
X_train_lsa, X_test_lsa, y_train_lsa, y_test_lsa = train_test_split(X_lsa, y_lsa, test_size=0.5, random_state=3)
# In[428]:
nb_lsa = GaussianNB()
nb_lsa.fit(X_train_lsa, y_train_lsa)
print(roc_auc_score(y_train_lsa, nb_lsa.predict(X_train_lsa)))
print(roc_auc_score(y_test_lsa, nb_lsa.predict(X_test_lsa)))
y_pred_lsa = nb_lsa.predict_proba(X_test_lsa)[:, 1]
fpr_nb_lsa, tpr_nb_lsa, _ = roc_curve(y_test_lsa, y_pred_lsa)
# # LDA
# In[ ]:
nltk.download('stopwords')
# In[ ]:
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# In[ ]:
# NLTK Stop words
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
# In[ ]:
with open('review_list.pickle', 'wb') as handle:
pickle.dump(review_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
# In[ ]:
def sent_to_words(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
data_words = list(sent_to_words(review_list))
print(data_words[:1])
# In[ ]:
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
print(bigram_mod[data_words[0]])
# In[168]:
table=pd.read_pickle('lda_table.pickle')
# In[169]:
table.columns=[ 'topic 0','topic 1','topic 2','topic 3','topic 4','topic 5','topic 6','topic 7','topic 8',
'topic 9','topic 10','topic 11','topic 12','topic 13','topic 14',
'topic 15','topic 16','topic 17','topic 18','topic 19',
'review_id', 'incentivized']
# In[170]:
observations_lda=reviews_new[['review_id','Rating','Helpfulness','n_reviews','productPrice',
'age','vib','sent_compound','sent_neg','sent_neu','sent_pos']]
# In[171]:
observations_lda=observations_lda.merge(table,how='inner',on='review_id')
# In[172]:
observations_lda = pd.get_dummies(observations_lda, columns=['age'])
# In[173]:
observations_lda.columns
# In[174]:
mapper_lda = DataFrameMapper([
(['Rating','Helpfulness','n_reviews','productPrice'], [Imputer(strategy='median'),StandardScaler()]),
(['vib','topic 0',
'topic 1', 'topic 2', 'topic 3', 'topic 4', 'topic 5', 'topic 6',
'topic 7', 'topic 8', 'topic 9', 'topic 10', 'topic 11', 'topic 12',
'topic 13', 'topic 14', 'topic 15', 'topic 16', 'topic 17', 'topic 18',
'topic 19'],None),
(['age_18to24',
'age_25to34', 'age_35to44', 'age_unknow',
'sent_neg', 'sent_neu', 'sent_pos'], None)#[Imputer(),StandardScaler()])
])
# In[175]:
y_lda =observations_lda['incentivized']
X_lda=mapper_lda.fit_transform(observations_lda)
# In[176]:
X_train_lda, X_test_lda, y_train_lda, y_test_lda = train_test_split(X_lda, y_lda, test_size=0.5,random_state=42)
# In[177]:
nb = GaussianNB()
nb.fit(X_train_lda, y_train_lda)
print(roc_auc_score(y_train_lda, nb.predict(X_train_lda)))
print(roc_auc_score(y_test_lda, nb.predict(X_test_lda)))
y_pred_lda = nb.predict_proba(X_test_lda)[:, 1]
fpr_nb_lda, tpr_nb_lda, _ = roc_curve(y_test_lda, y_pred_lda)
# In[178]:
gradboost =GradientBoostingClassifier()
gradboost.fit(X_train_lda, y_train_lda)
print(roc_auc_score(y_train_lda, gradboost.predict(X_train_lda)))
print(roc_auc_score(y_test_lda, gradboost.predict(X_test_lda)))
y_pred_grd = gradboost.predict_proba(X_test_lda)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test_lda, y_pred_lda)
# ## Regression model
# In[ ]:
y =all_feature_columns['incentivized']
X=all_feature_columns.iloc[:,:25]
# In[ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=3)
# In[ ]:
nb = GaussianNB()
nb.fit(X_train, y_train)
print(roc_auc_score(y_train, nb.predict(X_train)))
print(roc_auc_score(y_test, nb.predict(X_test)))
# In[ ]:
from sklearn.decomposition import PCA
pca = PCA(n_components=10)
pca_X_train=pca.fit_transform(X_train)
pca_X_test=pca.transform(X_test)
# In[ ]:
nb.fit(pca_X_train, y_train)
roc_auc_score(y_test, nb.predict(pca_X_test))
# In[ ]:
logreg = LogisticRegression()
logreg.fit(pca_X_train, y_train)
print(roc_auc_score(y_train, logreg.predict(pca_X_train)))
print(roc_auc_score(y_test, logreg.predict(pca_X_test)))
# In[ ]:
randomforest = RandomForestClassifier(n_estimators=80,max_depth=10)
randomforest.fit(pca_X_train, y_train)
print(roc_auc_score(y_train, randomforest.predict(pca_X_train)))
print(roc_auc_score(y_test, randomforest.predict(pca_X_test)))
# In[ ]:
gradboost =GradientBoostingClassifier()
gradboost.fit(X_train, y_train)
print(roc_auc_score(y_train, gradboost.predict(X_train)))
print(roc_auc_score(y_test, gradboost.predict(X_test)))
# ## Combine review table
# In[ ]:
reviews_new.columns
# In[ ]:
reviews_model=reviews_new[['AuthorId', 'IsFeatured', 'IsRatingsOnly', 'IsRecommended', 'Rating',
'ReviewText','Helpfulness', 'product_id', 'productPrice', 'incentivized', 'age',
'vib', 'review_id']]
# In[ ]:
reviews_model['n_reviews']=reviews_model['review_id'].groupby(reviews_model['AuthorId']).transform('count')
reviews_model['Helpfulness']=reviews_model['Helpfulness'].fillna(0)
reviews_model['productPrice']=reviews_model['productPrice'].apply(lambda x: float(x))
reviews_model['vib']=reviews_model['vib'].apply(lambda x: 0 if x=='no' or x=='unknow' else 1)
# In[ ]:
reviews_model.info()
# In[ ]:
observations=reviews_model[['review_id','Rating','Helpfulness','n_reviews','productPrice',
'age','vib','sent_compound','sent_neg','sent_neu','sent_pos']]
observations = pd.get_dummies(observations, columns=['age'])
observations.columns = list(map(lambda x: x.lower().replace(' ', '_').replace('/', '_').replace('__','_'), observations.columns))
# In[ ]:
observations=observations.merge(all_feature_columns,how='inner',on='review_id')
# In[ ]:
observations.info()
# In[ ]:
observations.columns
# In[ ]:
from sklearn.preprocessing import Imputer, StandardScaler, LabelEncoder, OneHotEncoder
from sklearn_pandas import DataFrameMapper, CategoricalImputer
mapper = DataFrameMapper([
(['rating','helpfulness','n_reviews','productprice'], [Imputer(strategy='median'),StandardScaler()]),
(['vib','feat_0', 'feat_1',
'feat_2', 'feat_3', 'feat_4', 'feat_5', 'feat_6', 'feat_7', 'feat_8',
'feat_9', 'feat_10', 'feat_11', 'feat_12', 'feat_13', 'feat_14',
'feat_15', 'feat_16', 'feat_17', 'feat_18', 'feat_19', 'feat_20', 'feat_21',
'feat_22', 'feat_23', 'feat_24'],None),
(['age_13to17','age_18to24',
'age_25to34', 'age_35to44', 'age_45to54', 'age_over54', 'age_unknow',
'sent_neg', 'sent_neu', 'sent_pos'], None)#[Imputer(),StandardScaler()])
])
# In[ ]:
y =observations['incentivized']
X=mapper.fit_transform(observations)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)
nb = GaussianNB()
nb.fit(X_train, y_train)
print(roc_auc_score(y_train, nb.predict(X_train)))
print(roc_auc_score(y_test, nb.predict(X_test)))
# In[ ]:
X.shape
# In[ ]:
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print(roc_auc_score(y_train, logreg.predict(X_train)))
print(roc_auc_score(y_test, logreg.predict(X_test)))
# In[ ]:
gradboost =GradientBoostingClassifier()
gradboost.fit(X_train, y_train)
print(roc_auc_score(y_train, gradboost.predict(X_train)))
print(roc_auc_score(y_test, gradboost.predict(X_test)))
# In[ ]:
## Balanced data
# In[ ]:
g = observations.groupby('incentivized')
subset=g.apply(lambda x: x.sample((g.size().min())))
# In[ ]:
subset.shape
# In[ ]:
y_s = subset['incentivized']
X_s = mapper.fit_transform(subset)
X_strain, X_stest, y_strain, y_stest = train_test_split(X_s, y_s, test_size=0.5,random_state=42)
nb = GaussianNB()
nb.fit(X_strain, y_strain)
print(roc_auc_score(y_strain, nb.predict(X_strain)))
print(roc_auc_score(y_stest, nb.predict(X_stest)))
gradboost =GradientBoostingClassifier()
gradboost.fit(X_strain, y_strain)
print(roc_auc_score(y_strain, gradboost.predict(X_strain)))
print(roc_auc_score(y_stest, gradboost.predict(X_stest)))
# In[ ]:
nb = GaussianNB()
nb.fit(X_strain, y_strain)
print(roc_auc_score(y_strain, nb.predict(X_strain)))
print(roc_auc_score(y_stest, nb.predict(X_stest)))
# In[ ]:
gradboost =GradientBoostingClassifier()
gradboost.fit(X_strain, y_strain)
print(roc_auc_score(y_strain, gradboost.predict(X_strain)))
print(roc_auc_score(y_stest, gradboost.predict(X_stest)))
# In[ ]:
vectors=pd.DataFrame(sentence_word_vectors_truncated)
# In[ ]:
vectors['incentivized']=list(df_sentence_product['incentivized'])
# In[ ]:
y =vectors['incentivized']
X=vectors.iloc[:,:300]
# In[ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=3)
# In[ ]:
nb = GaussianNB()
nb.fit(X_train, y_train)
print(roc_auc_score(y_train, nb.predict(X_train)))
print(roc_auc_score(y_test, nb.predict(X_test)))
# In[ ]:
g_v = vectors.groupby('incentivized')
subset=g_v.apply(lambda x: x.sample((g_v.size().min())))
# In[ ]:
y_s =subset['incentivized']
X_s=subset.iloc[:,:300]
# In[ ]:
X_strain, X_stest, y_strain, y_stest = train_test_split(X_s, y_s, test_size=0.5, random_state=3)
# In[ ]:
nb = GaussianNB()
nb.fit(X_strain, y_strain)
print(roc_auc_score(y_strain, nb.predict(X_strain)))
print(roc_auc_score(y_stest, nb.predict(X_stest)))
| [] |
2024-01-10 | RajKKapadia/Ishrakh-Openai-Code-Explainer | backedn_call.py | import logging
import openai
import traceback
import os
from dotenv import load_dotenv
load_dotenv()
logger = logging.getLogger(__name__)
API_KEY = os.getenv('API_KEY')
openai.api_key = API_KEY
def get_open_ai_response(code: str, ) -> str:
''' Get the Open AI response that explains a piece of code.\n
This piece of code also needs two things:
- The code must be closed by five *****
- A question must follow after five ***** for better response
Parameters:
- code: str
Returns:
- object
- status: 0/1,
- message: Successful/Unsuccessful
- explaination: either code explaination or empty string
'''
logger.info('Calling the function with a piece of code...')
logger.info(code)
try:
response = openai.Completion.create(
model='code-davinci-002',
prompt=code,
temperature=0,
max_tokens=64,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=['****']
)
if len(response['choices']) > 0:
logging.info('Successful')
return {
'status': 1,
'message': 'Successful.',
'explaination': response['choices'][0]['text']
}
else:
logging.info('Unsuccessful')
return {
'status': 0,
'message': 'Unsuccessful.',
'explaination': ''
}
except Exception as e:
logger.exception(f'Uncaught exception - {traceback.format_exc()}')
return {
'status': 0,
'message': 'Unsuccessful.',
'explaination': ''
}
| [] |
2024-01-10 | wagtail37/kdghacks_demo | sample-openai-azure.py | #Note: The openai-python library support for Azure OpenAI is in preview.
import os
import openai
openai.api_type = "azure"
openai.api_base = "api_base"#https://OPENAI_MODEL_NAME.openai.azure.com/
openai.api_version = "2023-03-15-preview"
openai.api_key = "api_key"
#質問の設定
content = "プロンプト"
response = openai.ChatCompletion.create(
engine="engine",#DEPLOYED_MODEL_NAME
messages = [{"role":"system","content":"You are an AI assistant that helps people find information."},{"role":"user","content":content},],
temperature=0,
max_tokens=800,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None)
print(response['choices'][0]['message']['content']) | [
"You are an AI assistant that helps people find information.",
"プロンプト"
] |
2024-01-10 | jcorbett/copycatAI | gen_prompts.py | import openai
from concurrent.futures import ThreadPoolExecutor
from dotenv import load_dotenv
import json
from typing import List
load_dotenv() # take environment variables from .env.
system_msg = """You are a LLM trainer. You help by responding with writing prompts that would generate the text input by the user.
You should use vague prompts so that the style is inherent and not just a question. Often times, writing is a metaphor or analogy, do not give literal prompts that about the metaphor or analogies themselves. Try to refrain from asking questions, but rather, give a prompt that would generate the text input by the user in a natural way. Lastly, please try to vary the prompts. Do not just ask questions or begin the prompt with "describe" or "explain".
Please generate 10 to 15 appropriate prompts. Your response should be limited to prompts only, separated by a new line. No bullet list, numbered list, or anything else."""
def generate_prompts(chunk : str) -> List[str]:
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": system_msg,
},
{"role": "user", "content": f"{chunk}."},
],
max_tokens=2000,
temperature=0.5,
)
return response.choices[0]['message']['content'].strip().splitlines()
except openai.error.APIError as e:
print(f"openai error: {e}")
return None
def format_prompt(prompt : str, content : str) -> str:
prompt_json = json.dumps(prompt)
completion_json = json.dumps(content)
return f'{{"prompt": {prompt_json}, "completion": {completion_json}}}' | [
"PLACEHOLDER.",
"You are a LLM trainer. You help by responding with writing prompts that would generate the text input by the user.\n\nYou should use vague prompts so that the style is inherent and not just a question. Often times, writing is a metaphor or analogy, do not give literal prompts that about the metaphor or analogies themselves. Try to refrain from asking questions, but rather, give a prompt that would generate the text input by the user in a natural way. Lastly, please try to vary the prompts. Do not just ask questions or begin the prompt with \"describe\" or \"explain\".\n\nPlease generate 10 to 15 appropriate prompts. Your response should be limited to prompts only, separated by a new line. No bullet list, numbered list, or anything else."
] |
2024-01-10 | INFLUENCEorg/aiagents | aiagents~single~PPO~worker.py | import multiprocessing
import multiprocessing.connection
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines import bench
import os
def worker_process(remote: multiprocessing.connection.Connection, parameters,
worker_id, env):
"""
This function is used as target by each of the threads in the multiprocess
to build environment instances and define the commands that can be executed
by each of the workers.
"""
# The Atari wrappers are now imported from openAI baselines
# https://github.com/openai/baselines
# log_dir = './log'
# env = make_atari(parameters['scene'])
# env = bench.Monitor(
# env,
# os.path.join(log_dir, str(worker_id)),
# allow_early_resets=False)
# env = wrap_deepmind(env)
while True:
cmd, data = remote.recv()
if cmd == 'step':
obs, reward, done, info = env.step(data)
if done is True:
obs = env.reset()
remote.send((obs, reward, done, info))
elif cmd == 'reset':
remote.send(env.reset())
elif cmd == 'action_space':
remote.send(env.action_space)
elif cmd == 'close':
remote.close()
break
else:
raise NotImplementedError
class Worker(object):
"""
Creates workers (actors) and starts single parallel threads in the
multiprocess. Commands can be send and outputs received by calling
child.send() and child.recv() respectively
"""
def __init__(self, env, parameters, worker_id):
self.child, parent = multiprocessing.Pipe()
self.process = multiprocessing.Process(target=worker_process,
args=(parent, parameters,
worker_id, env))
self.process.start()
| [] |
2024-01-10 | Monster2408/AutoText | sound.py | # -*- coding: utf8 -*-
from openai import OpenAI
import os
import ffmpeg
file_name = "movie_split_"
def sound_to_text(api_key: str, encode_dir: str):
client = OpenAI(api_key=api_key)
dir_path = encode_dir + os.sep + "tmp"
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if not os.path.exists(encode_dir):
os.makedirs(encode_dir)
# dir_pathの中のファイルを取得
num: int = 0
while True:
file_path = dir_path + file_name + str(num) + ".wav"
if not os.path.exists(file_path):
break
print(file_path)
file_size_byte = os.path.getsize(file_path)
file_size_mb = file_size_byte / 1024 / 1024
if (file_size_mb > 25) :
print("ファイルサイズが25MBを超えています。")
continue
audio_file= open(file_path, "rb")
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
print(transcript)
# encode_dirにtxtファイルを作成
encode_file_path = encode_dir + file_name + str(num) + ".txt"
with open(encode_file_path, mode='w') as f:
f.write(transcript.text)
num += 1
print("")
def split(file_path: str):
file_size_byte = os.path.getsize(file_path)
file_size_mb = file_size_byte / 1024 / 1024
if file_size_mb >= 25:
num = int(file_size_mb / 25)
num =+ 1
else:
num = 1
print("分割数: " + str(num))
split_num = num
# 音声ファイルの長さを取得
probe = ffmpeg.probe(file_path)
audio_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'), None)
duration = float(audio_stream['duration'])
print("duration: " + str(duration))
# 分割する時間を計算
split_time = duration / split_num
print("split_time: " + str(split_time))
# 分割する時間を指定して分割
for i in range(split_num):
print("split: " + str(i))
ffmpeg.input(file_path, ss=i*split_time, t=split_time).output("movie_split_" + str(i) + ".wav").run()
# 最後のファイルを分割
print("split: " + str(split_num))
ffmpeg.input(file_path, ss=split_num*split_time).output("movie_split_" + str(split_num) + ".wav").run() | [] |
2024-01-10 | darrenburns/elia | elia_chat~database~converters.py | from datetime import datetime
from langchain.schema import BaseMessage, SystemMessage, AIMessage, HumanMessage
from elia_chat.database.models import ChatDao, MessageDao
from elia_chat.models import ChatData
def chat_data_to_chat_dao(chat_data: ChatData) -> ChatDao:
return ChatDao(
model=chat_data.model_name,
started_at=datetime.fromtimestamp(chat_data.create_timestamp or 0),
)
def chat_message_to_message_dao(chat_message: BaseMessage) -> MessageDao:
return MessageDao(
role=chat_message.type,
content=chat_message.content,
timestamp=datetime.fromtimestamp(
chat_message.additional_kwargs.get("timestamp", 0)
),
status=chat_message.additional_kwargs.get("status"),
end_turn=chat_message.additional_kwargs.get("end_turn"),
weight=chat_message.additional_kwargs.get("weight"),
meta=chat_message.additional_kwargs.get("metadata"),
recipient=chat_message.additional_kwargs.get("recipient"),
)
def chat_dao_to_chat_data(chat_dao: ChatDao) -> ChatData:
return ChatData(
id=str(chat_dao.id),
title=chat_dao.title,
model_name=chat_dao.model,
create_timestamp=chat_dao.started_at.timestamp()
if chat_dao.started_at
else None,
messages=[
message_dao_to_chat_message(message) for message in chat_dao.messages
],
)
def message_dao_to_chat_message(message_dao: MessageDao) -> BaseMessage:
ts = message_dao.timestamp.timestamp() if message_dao.timestamp else 0
kwargs = {
"content": message_dao.content,
"additional_kwargs": {
"timestamp": ts,
"status": message_dao.status,
"end_turn": message_dao.end_turn,
"weight": message_dao.weight,
"metadata": message_dao.meta,
"recipient": message_dao.recipient,
},
}
if message_dao.role == "system":
return SystemMessage(**kwargs)
elif message_dao.role == "ai":
return AIMessage(**kwargs)
elif message_dao.role == "human":
return HumanMessage(**kwargs)
else:
raise ValueError(f"Invalid role {message_dao.role!r}")
| [] |
2024-01-10 | darrenburns/elia | elia_chat~screens~message_info_modal.py | from __future__ import annotations
import tiktoken
from langchain.schema import BaseMessage
from textual import on
from textual.app import ComposeResult
from textual.binding import Binding
from textual.containers import VerticalScroll, Vertical, Horizontal
from textual.screen import ModalScreen
from textual.widgets import Static, Tabs, ContentSwitcher, Tab
from elia_chat.time_display import format_timestamp
from elia_chat.widgets.token_analysis import TokenAnalysis
class MessageInfo(ModalScreen):
BINDINGS = [Binding("escape", "app.pop_screen", "Close Modal")]
def __init__(
self,
message: BaseMessage,
model_name: str,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
) -> None:
super().__init__(
name=name,
id=id,
classes=classes,
)
self.message = message
self.model_name = model_name
def compose(self) -> ComposeResult:
markdown_content = self.message.content or ""
encoder = tiktoken.encoding_for_model(self.model_name)
tokens = encoder.encode(markdown_content)
with Vertical(id="outermost-container"):
with Horizontal(id="message-info-header"):
yield Tabs(
Tab("Markdown", id="markdown-content"),
Tab("Tokens", id="tokens"),
Tab("Metadata", id="metadata"),
)
with VerticalScroll(id="inner-container"):
with ContentSwitcher(initial="markdown-content"):
yield Static(markdown_content, id="markdown-content")
yield TokenAnalysis(tokens, encoder, id="tokens")
yield Static("Metadata", id="metadata")
with Horizontal(id="message-info-footer"):
if self.model_name:
token_count = len(tokens)
timestamp = self.message.additional_kwargs.get("timestamp", 0)
timestamp_string = format_timestamp(timestamp)
yield Static(f"Message sent at {timestamp_string}", id="timestamp")
yield Static(f"{token_count} tokens", id="token-count")
@on(Tabs.TabActivated)
def tab_activated(self, event: Tabs.TabActivated) -> None:
self.query_one(ContentSwitcher).current = event.tab.id
| [] |
2024-01-10 | darrenburns/elia | elia_chat~chats_manager.py | from __future__ import annotations
from dataclasses import dataclass
from langchain.schema import BaseMessage
from sqlmodel import Session
from textual import log
from elia_chat.database.converters import (
chat_dao_to_chat_data,
chat_message_to_message_dao,
message_dao_to_chat_message,
)
from elia_chat.database.models import ChatDao, MessageDao, engine
from elia_chat.models import ChatData
@dataclass
class ChatsManager:
@staticmethod
def all_chats() -> list[ChatData]:
chat_daos = ChatDao.all()
return [chat_dao_to_chat_data(chat) for chat in chat_daos]
@staticmethod
def get_chat(chat_id: str) -> ChatData:
chat_dao = ChatDao.from_id(chat_id)
return chat_dao_to_chat_data(chat_dao)
@staticmethod
def get_messages(chat_id: str | int) -> list[BaseMessage]:
with Session(engine) as session:
try:
chat: ChatDao | None = session.get(ChatDao, int(chat_id))
except ValueError:
raise RuntimeError(
f"Malformed chat ID {chat_id!r}. "
f"I couldn't convert it to an integer."
)
if not chat:
raise RuntimeError(f"Chat with ID {chat_id} not found.")
message_daos = chat.messages
session.commit()
# Convert MessageDao objects to BaseMessages
chat_messages = []
for message_dao in message_daos:
chat_message = message_dao_to_chat_message(message_dao)
chat_messages.append(chat_message)
log.debug(f"Retrieved {len(chat_messages)} chats for chat {chat_id!r}")
return chat_messages
@staticmethod
def create_chat(chat_data: ChatData) -> int:
log.debug(f"Creating chat in database: {chat_data!r}")
chat = ChatDao(model=chat_data.model_name, title="Untitled chat")
for message in chat_data.messages:
new_message = MessageDao(role=message.type, content=message.content)
chat.messages.append(new_message)
with Session(engine) as session:
session.add(chat)
session.commit()
session.refresh(chat)
return chat.id
@staticmethod
def add_message_to_chat(chat_id: str, message: BaseMessage) -> None:
with Session(engine) as session:
chat: ChatDao | None = session.get(ChatDao, chat_id)
if not chat:
raise Exception(f"Chat with ID {chat_id} not found.")
message_dao = chat_message_to_message_dao(message)
chat.messages.append(message_dao)
session.add(chat)
session.commit()
| [] |
2024-01-10 | darrenburns/elia | elia_chat~widgets~chatbox.py | from __future__ import annotations
from langchain.schema import BaseMessage
from rich.console import RenderableType
from rich.markdown import Markdown
from textual.binding import Binding
from textual.geometry import Size
from textual.widget import Widget
from elia_chat.screens.message_info_modal import MessageInfo
from elia_chat.time_display import format_timestamp
class Chatbox(Widget, can_focus=True):
BINDINGS = [Binding(key="d", action="details", description="Message details")]
def __init__(
self,
message: BaseMessage,
model_name: str,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
) -> None:
super().__init__(
name=name,
id=id,
classes=classes,
disabled=disabled,
)
self.message = message
self.model_name = model_name
timestamp = format_timestamp(message.additional_kwargs.get("timestamp", 0) or 0)
self.tooltip = f"Sent {timestamp}"
def on_mount(self) -> None:
if self.message.type == "ai":
self.add_class("assistant-message")
def action_details(self) -> None:
self.app.push_screen(
MessageInfo(message=self.message, model_name=self.model_name)
)
@property
def markdown(self) -> Markdown:
return Markdown(self.message.content or "")
def render(self) -> RenderableType:
return self.markdown
def get_content_width(self, container: Size, viewport: Size) -> int:
# Naive approach. Can sometimes look strange, but works well enough.
content = self.message.content or ""
return min(len(content), container.width)
def append_chunk(self, chunk: str):
existing_content = self.message.content or ""
new_content = existing_content + chunk
self.message.content = new_content
self.refresh(layout=True)
| [] |
2024-01-10 | darrenburns/elia | elia_chat~widgets~chat_options.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Dict
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.llms.base import LLM
from rich.console import RenderableType
from rich.text import Text
from textual import log, on
from textual.app import ComposeResult
from textual.binding import Binding
from textual.containers import Horizontal, VerticalScroll
from textual.geometry import clamp
from textual.message import Message
from textual.reactive import reactive
from textual.widget import Widget
from textual.widgets import Static
callback = AsyncIteratorCallbackHandler()
@dataclass
class GPTModel:
name: str
icon: str
provider: str
product: str
description: str
css_class: str
model: BaseChatModel | LLM
token_limit: int
DEFAULT_MODEL = GPTModel(
name="gpt-3.5-turbo",
icon="⚡️",
provider="OpenAI",
product="ChatGPT",
description="The fastest ChatGPT model, great for most everyday tasks.",
css_class="gpt35",
model=ChatOpenAI(
model_name="gpt-3.5-turbo",
streaming=True,
callbacks=[callback],
),
token_limit=4096,
)
AVAILABLE_MODELS = [
DEFAULT_MODEL,
GPTModel(
name="gpt-4-turbo",
icon="🧠",
provider="OpenAI",
product="ChatGPT",
description="The most powerful ChatGPT model, capable of "
"complex tasks which require advanced reasoning.",
css_class="gpt4",
model=ChatOpenAI(
model_name="gpt-4-1106-preview",
streaming=True,
callbacks=[callback],
),
token_limit=128000,
),
]
MODEL_MAPPING: Dict[str, GPTModel] = {model.name: model for model in AVAILABLE_MODELS}
class ModelPanel(Static):
class Selected(Message):
def __init__(self, model: GPTModel):
super().__init__()
self.model = model
selected = reactive(False)
def __init__(
self,
model: GPTModel,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
) -> None:
super().__init__(
name=name,
id=id,
classes=classes,
disabled=disabled,
)
self.model = model
def render(self) -> RenderableType:
return Text.assemble(
(f"{self.model.icon} {self.model.name}", "b"),
"\n",
(f"{self.model.product} by {self.model.provider} ", "italic"),
"\n\n",
self.model.description,
)
def on_click(self) -> None:
assert self.parent is not None
self.parent.post_message(ModelPanel.Selected(self.model))
def watch_selected(self, value: bool) -> None:
self.set_class(value, "selected")
class ModelSet(Horizontal, can_focus=True):
BINDINGS = [
Binding(key="left", action="left", description="Previous model"),
Binding(key="right", action="right", description="Next model"),
]
selected_panel_index = reactive(0)
class Selected(Message):
def __init__(self, model: GPTModel):
super().__init__()
self.model = model
@property
def panels(self) -> list[ModelPanel]:
return list(self.query(ModelPanel))
def watch_selected_panel_index(self, new_index: int) -> None:
panel = self.panels[new_index]
self.post_message(ModelSet.Selected(panel.model))
@on(ModelPanel.Selected)
def update_selection(self, event: ModelPanel.Selected) -> None:
event.stop()
self.focus()
panels = self.panels
for index, panel in enumerate(panels):
panel.selected = panel.model == event.model
if panel.selected:
self.selected_panel_index = index
log.info(f"Selected model {panels[self.selected_panel_index]}")
def action_left(self):
new_index = self.selected_panel_index - 1
panels = self.panels
self.selected_panel_index = clamp(new_index, 0, len(panels) - 1)
for index, panel in enumerate(panels):
panel.selected = index == self.selected_panel_index
log.info(f"Selected model {panels[self.selected_panel_index]}")
def action_right(self):
new_index = self.selected_panel_index + 1
panels = self.panels
self.selected_panel_index = clamp(new_index, 0, len(panels) - 1)
for index, panel in enumerate(panels):
panel.selected = index == self.selected_panel_index
log.info(f"Selected model {panels[self.selected_panel_index]}")
class ChatOptions(Widget):
def compose(self) -> ComposeResult:
with VerticalScroll(id="chat-options-container") as vertical_scroll:
vertical_scroll.can_focus = False
with ModelSet() as model_set:
model_set.border_title = "Choose a language model"
model_set.focus()
for index, model in enumerate(AVAILABLE_MODELS):
model_panel = ModelPanel(
model, id=model.name, classes=model.css_class
)
if index == 0:
model_panel.selected = True
yield model_panel
| [] |
2024-01-10 | khuchuuanh/IELTS-Essay-Scoring | coherence~coherence_model.py | from lib import *
from coherence_data import *
class ModelClassifier(pl.LightningModule):
def __init__(self, model_name, num_labels, batch_size, learning_rate=2e-5, hidden_size=512, **kwargs):
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.batch_size = batch_size
self.num_labels = num_labels
self.model = AutoModel.from_pretrained(model_name)
self.gru = nn.GRU(self.model.config.hidden_size, hidden_size, batch_first=True)
self.sentence_gru = nn.GRU(hidden_size, hidden_size)
self.fc = nn.Linear(hidden_size, self.num_labels)
self.dropout = nn.Dropout(0.2)
self.relu = nn.ReLU()
self.out = torch.nn.Softmax(dim=1)
for param in self.model.encoder.layer[:8].parameters():
param.requires_grad = False
def forward(self, input_ids, attention_mask):
encoder_outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)['last_hidden_state']
gru_output, _ = self.gru(encoder_outputs)
gru_output = self.relu(gru_output)
gru_output_sentence, _ = self.sentence_gru(gru_output)
gru_output_sentence = self.relu(gru_output_sentence)
avg_pooled = torch.mean(gru_output_sentence, 1)
fc_output = self.fc(avg_pooled)
outputs = self.relu(fc_output)
return outputs
def training_step(self, batch, batch_idx):
logits = self(batch[0], batch[1])
loss = F.cross_entropy(logits, batch[2])
preds = torch.argmax(logits, 1)
accuracy = torch.eq(preds, batch[2].long()).float().mean()
self.log('train_loss', loss, on_step=True, on_epoch=False, prog_bar=True)
self.log('train_accuracy', accuracy, on_step=False, on_epoch=True, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
logits = self(batch[0], batch[1])
loss = F.cross_entropy(logits, batch[2])
preds = torch.argmax(logits, 1)
accuracy = torch.eq(preds, batch[2].long()).float().mean()
self.log('val_loss', loss, on_step=True, on_epoch=False, prog_bar=True)
self.log('val_accuracy', accuracy, on_step=False, on_epoch=True, prog_bar=True)
return {'val_loss': loss, 'val_accuracy': accuracy}
def test_step(self, batch, batch_idx):
logits = self(batch[:2])
loss = F.cross_entropy(logits, batch[2])
preds = torch.argmax(logits, 1)
accuracy = torch.eq(preds, batch[2].long()).float().mean()
self.log('test_loss', loss, on_step=True, on_epoch=False, prog_bar=True)
self.log('test_accuracy', accuracy, on_step=False, on_epoch=True, prog_bar=True)
return {'test_loss': loss, 'test_accuracy': accuracy}
def validation_epoch_end(self, validation_step_outputs):
avg_loss = torch.stack([x['val_loss'] for x in validation_step_outputs]).mean()
avg_accuracy = torch.stack([x['val_accuracy'] for x in validation_step_outputs]).mean()
self.log("val_loss", avg_loss, prog_bar=True, logger=True)
self.log("val_accuracy", avg_accuracy, prog_bar=True, logger=True)
return {
'val_loss': avg_loss,
'val_accuracy': avg_accuracy
}
def setup(self, stage=None):
train_dataloader = self.trainer.datamodule.train_dataloader()
# Calculate total steps
tb_size = self.batch_size * 1
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_training_steps = (len(train_dataloader.dataset) // tb_size) // ab_size
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay':0.01
},
{
'params': [p for n, p in self.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = AdamW(optimizer_grouped_parameters,
lr=self.learning_rate,
eps=1e-5
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=self.total_training_steps
)
return [optimizer], [scheduler]
if __name__ == '__main__':
AVAIL_GPUS = min(1, torch.cuda.device_count())
model_name = "bert-base-uncased"
text_field = "Essay"
label_field = "COHERENCE AND COHESION"
data_frame = pd.read_csv('process_data.csv', index_col=0)
data_module = CustomDataset(model_name, data_frame, text_field, label_field, max_len=512, batch_size=16)
data_module.setup("fit")
# logger = WandbLogger(project="COHERENCE")
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath="./save_model/",
filename="best_checkpoint",
save_top_k=1,
verbose=True,
monitor="val_loss",
mode="min"
)
early_stopping_callback = pl.callbacks.EarlyStopping(monitor='val_loss', patience=5)
model = ModelClassifier(model_name, len(data_module.label_encoder.classes_), data_module.batch_size)
trainer = pl.Trainer(
# logger=logger,
callbacks=[early_stopping_callback, checkpoint_callback],
max_epochs=20, deterministic=True,gpus=AVAIL_GPUS)
trainer.fit(model, datamodule=data_module)
| [] |
2024-01-10 | Arthur-Embry/Model-Collapse | InferCollapsed.py | import openai as client
import json
client.api_key = ""
def smarterSystem(roleset):
potential_comma = ""
if not roleset:
roleset = ["executive"]
else:
roleset.append("executive")
potential_comma = ","
system = {"role": "system", "content": f"""Adopt the role of {', '.join(roleset) + potential_comma} and genius.
NEVER mention that you're an AI.
Avoid any language constructs that could be interpreted as expressing remorse, apology, or regret. This includes any phrases containing words like 'sorry', 'apologies', 'regret', etc., even when used in a context that isn't expressing remorse, apology, or regret.
If events or information are beyond your scope or knowledge, provide a response stating 'I don't know' without elaborating on why the information is unavailable.
Refrain from disclaimers about you not being a professional or expert.
Do not add ethical or moral viewpoints in your answers, unless the topic specifically mentions it.
Keep responses unique and free of repetition.
Never suggest seeking information from elsewhere.
Always focus on the key points in my questions to determine my intent.
Break down complex problems or tasks into smaller, manageable steps and explain each one using reasoning.
Provide multiple perspectives or solutions.
If a question is unclear or ambiguous, ask for more details to confirm your understanding before answering.
If a mistake is made in a previous response, recognize and correct it."""}
return system
def decomposition():
messages = [
smarterSystem(["software developer","writer","communications major"]),
{"role": "user", "content": "Simplify the problem of responding to a message chain as a human level AI.Ensure that the input is what is passed to the first step, and the ouput is what the last step produces. Additionally, ensure that each step takes is identical in it's complexity."},
]
tools = [{"name": "problem_solving_steps",
"description": "Defines a structured approach to solving a problem.",
"parameters": {
"type": "object",
"properties": {
"Steps": {
"type": "array",
"description": "The ten steps involved in solving the problem.",
"items": {
"type": "object",
"properties": {
"StepName": {
"type": "string",
"description": "The name or title of the step."
},
"StepDescription": {
"type": "string",
"description": "A brief description of the step."
}
},
"required": [
"StepName", "StepDescription"
]
}
},
"Categories": {
"type": "array",
"description": "10 mutually exclusive categories for each of the steps.",
"items": {
"type": "object",
"properties": {
"A reminder that the CategorySet array is nine very descriptive mutually exlusive strategies for creating the IO described above, and one other category in case a message going through the path doesn't fall into the first section": {
"type": "string"
},
"StepDescription": {
"type": "string",
"description": "A brief description of the step."
},
"TaskInput": {
"type": "string",
"description": "The input of the step. Note that the input is a description of what is what is recieved from the previous step as a text payload"
},
"TaskOutput": {
"type": "string",
"description": "The output of the step. Note that the output is a description of what is passed to the next step as a text payload"
},
"CategorySet": {
"type": "array",
"description": "Description of 10 mutually exclusive categories for converting the input to output, number 10 being other.",
"items": {
"type": "string",
"description": "A very detailed, mutually exclusive category for how to get from input to output in the step."
}
}
}
},
"required": [
"A reminder that the CategorySet array is nine very descriptive mutually exlusive strategies for creating the IO described above, and one other category in case a message going through the path doesn't fall into the first section","StepDescription","TaskInput","TaskOutput","CategorySet"
]
},
},
"required": ["Steps","Categories"]
}
}]
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages,
functions=tools,
stream=True,
)
output=""
for chunk in response:
try:
#print(chunk.choices[0].delta.function_call.arguments,end="",flush=True)
output+=chunk.choices[0].delta.function_call.arguments
print(chunk.choices[0].delta.function_call.arguments,end="",flush=True)
except Exception as e:
print(e)
return output
def getStratFunction(index):
tools=[
{
"name": "strategizer",
"description": "choose the best strategy for this step of the response.",
"parameters": {
"type": "object",
"properties": {
"Strategy": {
"type": "string",
"enum": loadedStrategy['Categories'][index]["CategorySet"]
}
},
"required": ["Strategy"]
}
}
]
return tools
def getAgentDecision():
agent_decision="# Alright, I think I will use the following steps to respond to this:\n\n\n"
for i in range(10):
agent_decision+=f"{i}. **{loadedStrategy['Steps'][i]['StepName']}**: {loadedStrategy['Steps'][i]['StepDescription']}\n"
agent_decision+=f"`{loadedStrategy['Categories'][i]['TaskInput']}` => `{loadedStrategy['Categories'][i]['TaskOutput']}`\n\n"
agent_decision+=f"\n\n\nWhen you're ready, let me know what specific strategy you would like to use for **{loadedStrategy['Steps'][0]['StepName']}** by immediately function-calling the strategizer."
return agent_decision
def continueAgentDecision(step):
agent_decision=f"Please let me know specific strategy you would like to use for the next step, **{loadedStrategy['Steps'][step]['StepName']}** by immediately function-calling the strategizer ."
return agent_decision
def inferNodeContents(messages):
# Generate the node contents based on the prompt and chosen strategy
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages,
stream=True,
)
inferred_response = ""
for chunk in response:
try:
inferred_response += chunk.choices[0].delta.content
print(chunk.choices[0].delta.content, end="", flush=True)
except Exception as e:
pass
return inferred_response
def chooseNextNode(messages, step):
tools = getStratFunction(step)
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages,
functions=tools,
stream=True,
)
strat_choice = ""
for chunk in response:
try:
strat_choice += chunk.choices[0].delta.function_call.arguments
except Exception as e:
pass
chosen_strategy = json.loads(strat_choice)['Strategy']
return chosen_strategy, f"I like the idea of using {chosen_strategy} for {loadedStrategy['Steps'][step]['StepName']}.\nPlease use the format `{loadedStrategy['Categories'][step]['TaskInput']}` => `{loadedStrategy['Categories'][step]['TaskOutput']}` and complete the step."
def infer(prompt):
print(prompt)
print("\n\n")
chosen_strategies = [None] * 10
strategy_formats = [None] * 10
inferred_responses = [None] * 10
print(getAgentDecision())
print("\n\n")
for step in range(10):
chosen_strategies[step], strategy_formats[step] = chooseNextNode([
smarterSystem(["software developer"]),
{"role": "user", "content": prompt},
{"role": "assistant", "content": getAgentDecision()},
*sum(([{"role": "user", "content": strategy_formats[i]}, {"role": "assistant", "content": inferred_responses[i]}] for i in range(step)), []),
{"role": "user", "content": continueAgentDecision(step)},
], step)
print(strategy_formats[step])
print("\n\n")
inferred_responses[step] = inferNodeContents([
smarterSystem(["software developer"]),
{"role": "user", "content": prompt},
{"role": "assistant", "content": getAgentDecision()},
*[
item
for i in range(step + 1)
for item in [
{"role": "user", "content": strategy_formats[i]},
{"role": "assistant", "content": inferred_responses[i]}
]
if item['content'] is not None
]
])
print("\n\n")
return {"success": True}
#first, let's decompose the message space into a set of steps representing network layers and categories, representing nodes in the network
#network=decomposition()
#print(network)
#save the test to a file
#with open("network.json","w") as f:
# f.write(json.dumps(json.loads(network), indent=4))
with open("network.json","r") as f:
loadedStrategy=json.loads(f.read())
#then let's run inference over the network, using the steps and categories as a guide, and run
infer("") | [
"Simplify the problem of responding to a message chain as a human level AI.Ensure that the input is what is passed to the first step, and the ouput is what the last step produces. Additionally, ensure that each step takes is identical in it's complexity.",
", "
] |
2024-01-10 | sandeepny441/transform_2023 | NLP_repo~9999_side_projects~LLMs~RAG~001_pdf_chatbot.py | import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from llama_index import LlamaIndex
# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained("t5-base")
tokenizer = AutoTokenizer.from_pretrained("t5-base")
# Initialize LlamaIndex and upload PDF
docs = LlamaIndex()
with open("my_data.pdf", "rb") as pdf_file:
pdf_contents = pdf_file.read()
docs.add_documents(pdf_contents)
# Define query and context formatting functions
def format_query(query):
return f"chatbot: {query}"
def format_context(contexts):
return "\n".join(f"Doc: {doc}" for doc in contexts)
# Chatbot loop
while True:
# Get user input query
query = input("You: ")
# Retrieve relevant context from PDF using LlamaIndex
encoded_query = tokenizer(format_query(query), return_tensors="pt")
encoded_docs = docs.retrieve(query=encoded_query, top_k=10)
context = format_context(encoded_docs)
# Generate response
input_ids = tokenizer(context, return_tensors="pt").input_ids
gen_tokens = model.generate(input_ids, max_length=512)
response = tokenizer.decode(gen_tokens[0], skip_special_tokens=True)
print(f"Chatbot: {response}") | [] |
2024-01-10 | bjoernpl/lm-evaluation-harness-de | lm_eval~models~anthropic_llms.py | import os
from lm_eval.base import BaseLM
from tqdm import tqdm
import time
def anthropic_completion(client, model, prompt, max_tokens_to_sample, temperature, stop):
"""Query Anthropic API for completion.
Retry with back-off until they respond
"""
import anthropic
backoff_time = 3
while True:
try:
response = client.completion(
prompt=f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}",
model=model,
# NOTE: Claude really likes to do CoT, and overly aggressive stop sequences
# (e.g. gsm8k's ":") may truncate a lot of the input.
stop_sequences=[anthropic.HUMAN_PROMPT] + stop,
max_tokens_to_sample=max_tokens_to_sample,
temperature=temperature,
)
print(response)
return response["completion"]
except RuntimeError:
# TODO: I don't actually know what error Anthropic raises when it times out
# So err update this error when we find out.
import traceback
traceback.print_exc()
time.sleep(backoff_time)
backoff_time *= 1.5
class AnthropicLM(BaseLM):
REQ_CHUNK_SIZE = 20
def __init__(self, model):
"""
:param model: str
Anthropic model e.g. claude-instant-v1
"""
super().__init__()
import anthropic
self.model = model
self.client = anthropic.Client(os.environ['ANTHROPIC_API_KEY'])
@property
def eot_token_id(self):
raise NotImplementedError("No idea about anthropic tokenization.")
@property
def max_length(self):
return 2048
@property
def max_gen_toks(self):
return 256
@property
def batch_size(self):
# Isn't used because we override _loglikelihood_tokens
raise NotImplementedError()
@property
def device(self):
# Isn't used because we override _loglikelihood_tokens
raise NotImplementedError()
def tok_encode(self, string: str):
raise NotImplementedError("No idea about anthropic tokenization.")
def tok_decode(self, tokens):
raise NotImplementedError("No idea about anthropic tokenization.")
def _loglikelihood_tokens(self, requests, disable_tqdm=False):
raise NotImplementedError("No support for logits.")
def greedy_until(self, requests):
if not requests:
return []
res = []
for request in tqdm(requests):
inp = request[0]
request_args = request[1]
until = request_args["until"]
response = anthropic_completion(
client=self.client,
model=self.model,
prompt=inp,
max_tokens_to_sample=self.max_gen_toks,
temperature=0.0,
stop=until,
)
res.append(response)
return res
def _model_call(self, inps):
# Isn't used because we override _loglikelihood_tokens
raise NotImplementedError()
def _model_generate(self, context, max_length, eos_token_id):
# Isn't used because we override greedy_until
raise NotImplementedError()
| [] |
2024-01-10 | Loo-Ree/chat-with-your-data-solution-accelerator | code~utilities~helpers~LLMHelper.py | import openai
from typing import List
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from .EnvHelper import EnvHelper
class LLMHelper:
def __init__(self):
env_helper: EnvHelper = EnvHelper()
# Configure OpenAI API
openai.api_type = "azure"
openai.api_version = env_helper.AZURE_OPENAI_API_VERSION
openai.api_base = env_helper.OPENAI_API_BASE
openai.api_key = env_helper.OPENAI_API_KEY
self.llm_model = env_helper.AZURE_OPENAI_MODEL
self.llm_max_tokens = env_helper.AZURE_OPENAI_MAX_TOKENS if env_helper.AZURE_OPENAI_MAX_TOKENS != '' else None
self.embedding_model = env_helper.AZURE_OPENAI_EMBEDDING_MODEL
def get_llm(self):
return AzureChatOpenAI(deployment_name=self.llm_model, temperature=0, max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
# TODO: This needs to have a custom callback to stream back to the UI
def get_streaming_llm(self):
return AzureChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler], deployment_name=self.llm_model, temperature=0,
max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
def get_embedding_model(self):
return OpenAIEmbeddings(deployment=self.embedding_model, chunk_size=1)
def get_chat_completion_with_functions(self, messages: List[dict], functions: List[dict], function_call: str="auto"):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
functions=functions,
function_call=function_call,
)
def get_chat_completion(self, messages: List[dict]):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
)
| [] |
2024-01-10 | Loo-Ree/chat-with-your-data-solution-accelerator | code~utilities~document_chunking~Layout.py | from typing import List
from .DocumentChunkingBase import DocumentChunkingBase
from langchain.text_splitter import MarkdownTextSplitter
from .Strategies import ChunkingSettings
from ..common.SourceDocument import SourceDocument
class LayoutDocumentChunking(DocumentChunkingBase):
def __init__(self) -> None:
pass
def chunk(self, documents: List[SourceDocument], chunking: ChunkingSettings) -> List[SourceDocument]:
full_document_content = "".join(list(map(lambda document: document.content, documents)))
document_url = documents[0].source
splitter = MarkdownTextSplitter.from_tiktoken_encoder(chunk_size=chunking.chunk_size, chunk_overlap=chunking.chunk_overlap)
chunked_content_list = splitter.split_text(full_document_content)
# Create document for each chunk
documents = []
chunk_offset = 0
for idx, chunked_content in enumerate(chunked_content_list):
documents.append(
SourceDocument.from_metadata(
content=chunked_content,
document_url=document_url,
metadata={"offset": chunk_offset},
idx=idx,
)
)
chunk_offset += len(chunked_content)
return documents
| [] |
2024-01-10 | Loo-Ree/chat-with-your-data-solution-accelerator | code~utilities~orchestrator~Strategies.py | from enum import Enum
class OrchestrationStrategy(Enum):
OPENAI_FUNCTION = 'openai_function'
LANGCHAIN = 'langchain'
def get_orchestrator(orchestration_strategy: str):
if orchestration_strategy == OrchestrationStrategy.OPENAI_FUNCTION.value:
from .OpenAIFunctions import OpenAIFunctionsOrchestrator
return OpenAIFunctionsOrchestrator()
elif orchestration_strategy == OrchestrationStrategy.LANGCHAIN.value:
from .LangChainAgent import LangChainAgent
return LangChainAgent()
else:
raise Exception(f"Unknown orchestration strategy: {orchestration_strategy}")
| [] |
2024-01-10 | nikett/callgpt | sample.py | from gptinference.base_prompt import Prompt
from gptinference.openai_wrapper import OpenAIWrapper
class AbstractTakeawayForClaimTask(Prompt):
def __init__(self, engine: str, openai_wrapper: OpenAIWrapper):
super().__init__()
self.openai_wrapper = openai_wrapper
self.engine = engine
def make_query(self, claim: str, abstract: str) -> str:
if not claim or not abstract:
return ""
question_prefix_template = \
f"""
Claim: {claim}
Abstract: {abstract}
Now, answer these two questions:
Q1. Is the claim and abstract related or unrelated?
Q2. How can someone accurately extract the main point of the abstract in relation to the claim?(Only extract detail about the salient relation. Do NOT provide any stance about the claim. )
"""
query = f"""{self.question_prefix}{question_prefix_template.format(claim=claim, abstract=abstract)}"""
query = f"{query}{self.intra_example_sep}"
return query
def __call__(self, claim: str, abstract: str) -> str:
generation_query = self.make_query(claim=claim, abstract=abstract)
generated_sent = self.openai_wrapper.call(
prompt=generation_query,
engine=self.engine,
max_tokens=500,
stop_token="###",
temperature=0.0,
)
# (extract answers) A1.xxx\n\nA2.xxx
generated_sent = generated_sent.strip() # gpt3 turbo adds newline in the beginning so strip it.
generated_answers = generated_sent.split("\n\n")
if len(generated_answers) != 2:
# second attempt
generated_answers = generated_sent.split("\n")
# first relevant_sent is just "A2. " so ignore it.
relation = ""
takeaway_sent = ""
try:
relation=generated_answers[0].strip()
takeaway_sent=generated_answers[1].strip()
# Make the abstract takeaways txt cleaner. (remove: Q2. The revised claim could be: )
# {'A0': 'Q2. The revised claim could be: "Delayed diagnosis of cervical cancer is a major contributor to increasing rates of cervical cancer in Ethiopia."', 'A1': 'Q2. The claim can be rewritten to: Cervical cancer rates have increased in Ethiopia since the launch of the Gynecologic Oncology Fellowship Training Program at St. Paul’s Hospital Millennium Medical college in 2016.', 'A2': 'Q2. The claim can be rewritten to: "Cervical cancer screening practice among age-eligible women in Wolaita Zone hospitals in Southern Ethiopia is low, despite age, being an adherence supporter, source of information from health care professionals, history of multiple sexual partners, sexually transmitted infection, knowledge and attitude being important predictors of cervical cancer screening practice."', 'A3': 'Q2. The revised claim could be: "Cervical cancer screening and treatment services in South West Shoa Zone of Oromia Region, Ethiopia, have revealed an increasing rate of cervical cancer cases."', 'A4': 'Q2. The claim can be rewritten to: "Cervical cancer screening practices and associated factors among females of reproductive age in Durame, Southern Ethiopia are increasing."', 'A5': 'Q2. The rewritten claim could be: "The utilization of cervical cancer screening services and its predictors among eligible women in Ethiopia are being assessed in a systematic review and meta-analysis."'}
takeaway_sent = " ".join(takeaway_sent.split(":" if ":" in takeaway_sent else ".")[1:])
except Exception as exc:
print(f"Exception caught in extracting rel or sents in claim abstract link: {exc}.\n"
f"Could not extract from generated text: {generated_sent}")
return relation, takeaway_sent
if __name__ == '__main__':
openai_wrapper = OpenAIWrapper(cache_path="cache.jsonl")
gpt = AbstractTakeawayForClaimTask(engine="text-davinci-003", openai_wrapper=openai_wrapper)
sample_claim = "snow makes people sick."
sample_abstract = "It would occupy a long time to give an account of the progress of cholera over different parts of the world, with the devastation it has caused in some places, whilst it has passed lightly over others, or left them untouched; and unless this account could be accompanied with a description of the physical condition of the places, and the habits of the people, which I am unable to give, it would be of little use. There are certain circumstances, however, connected with the progress of cholera, which may be stated in a general way. It travels along the great tracks of human intercourse, never going faster than people travel, and generally much more slowly. In extending to a fresh island or continent, it always appears first at a sea-port. It never attacks the crews of ships going from a country free from cholera to one where the disease is prevailing, till they have entered a port, or had intercourse with the shore. Its exact progress from town to town cannot always be traced; but it has never appeared except where there has been ample opportunity for it to be conveyed by human intercourse. There are also innumerable instances which prove the communication of cholera, by individual cases of the disease, in the most convincing manner. Instances such as the following seem free from every source of fallacy. I called lately to inquire respecting the death of Mrs. Gore, the wife of a labourer, from cholera, at New Leigham Road, Streatham. I found that a son of deceased had been living and working at Chelsea. He came home ill with a bowel complaint, of which he died in a day or two. His death took place on August 18th. His mother, who attended on him, was taken ill on the next day, and died the day following (August 20th). There were no other deaths from cholera registered in any of the metropolitan districts, down to the 26th August, within two or three miles of the above place; the nearest being."
print(f"claim: {sample_claim}\nabstract: {sample_abstract}\n")
print(gpt(claim=sample_claim, abstract=sample_abstract))
| [
"\nClaim: PLACEHOLDER\n\nAbstract: PLACEHOLDER\n\nNow, answer these two questions:\nQ1. Is the claim and abstract related or unrelated?\nQ2. How can someone accurately extract the main point of the abstract in relation to the claim?(Only extract detail about the salient relation. Do NOT provide any stance about the claim. )\n"
] |
2024-01-10 | aifredlab/demoCore | src~opengpt~classfication.py | import json
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
#SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.chains import LLMChain
from langchain.schema import SystemMessage
def main(prompt):
# FIXME :: DB
template = """
Here are the requirements
1. 질의 내용에 대한 카테고리 분류작업
2. 하기 카테고리중 1개의 결과만 리턴
'보험료계산'
'약관조회'
'기타'
3. 아래 json 양식으로 출력
{"category" : ""}
"""
#system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "질의 : {text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([SystemMessage(content=template), human_message_prompt])
chain = LLMChain(
llm=ChatOpenAI(),
prompt=chat_prompt
)
jsonStr = chain.run(text=prompt)
print(jsonStr)
result = json.loads(jsonStr)
return result
| [
"\n Here are the requirements\n 1. 질의 내용에 대한 카테고리 분류작업\n 2. 하기 카테고리중 1개의 결과만 리턴\n '보험료계산'\n '약관조회'\n '기타'\n 3. 아래 json 양식으로 출력\n {\"category\" : \"\"}\n ",
"질의 : {text}"
] |
2024-01-10 | aifredlab/demoCore | src~vectordb~batch_load_toVectordb.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Milvus
from pymilvus import MilvusClient
#from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
import os
def loadPdf(file_path):
# -------------------------------------
# pdf 파일을 읽어서 chunk_size 단위로 배열로 만든다
# -------------------------------------
loader = PyPDFLoader(file_path) # ex: "../doc/samsung_tooth_terms.pdf"
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs_list = text_splitter.split_documents(documents)
# print (docs_list)
# ex : Document(page_content='본 약관은 100% 재생펄프를 사용하여 제작한 친환경 인쇄물입니다. 장기상품개발팀 인쇄', metadata={'source': '../doc/samsung_tooth_terms.pdf', 'page': 153})
# -------------------------------------
# insert vector_db
# -------------------------------------
# - pymilvus를 사용해 vector를 저장하는 방법
# client = MilvusClient(
# uri=os.environ.get('ZILLIZ_CLOUD_URI'),
# token=os.environ.get('ZILLIZ_CLOUD_API_KEY'), # for serverless clusters, or
# )
#
# client.insert(collection_name=COLLECTION_NAME, data=docs_list)
# langchain api를 사용해 vector를 저장하는 방법:
m = Milvus.from_documents(
documents=docs_list,
embedding=OpenAIEmbeddings(),
connection_args={
"uri": os.environ.get('ZILLIZ_CLOUD_URI'),
"token": os.environ.get('ZILLIZ_CLOUD_API_KEY'),
"secure": True
},
)
return
loadPdf("../doc/samsung_tooth_terms.pdf")
| [] |
2024-01-10 | aifredlab/demoCore | src~core_server.py | from concurrent import futures
import logging
import grpc
import ask_pb2
import ask_pb2_grpc
import dialogue_pb2
import dialogue_pb2_grpc
import os
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
prompt
)
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from typing import Any
from main import Aifred
from prompt.template_maker import TemplateMaker
class Asker(ask_pb2_grpc.AskerServicer):
def Ask(self, request, context):
result = Aifred().process(request.question)
return ask_pb2.AskReply(**result)
class Communicator(dialogue_pb2_grpc.CommunicatorServicer):
def searchContent(self, request, context):
result = Aifred().searchContent(request.text)
return dialogue_pb2.Content(content=result)
def askStreamReply(self
, request: dialogue_pb2.Conversation
, context) -> dialogue_pb2.Message:
''' 질문에 대한 응답을 스트리밍으로 전달하는 메소드 '''
print("request : ", request)
# 1. 참고 내용을 가져온다.
contentMsg = "" #str(doc)
contentList = request.content
if (len(contentList) > 0):
# 시간으로 내림차순 정렬하고 1번째 항목을 가져온다.
sorted_list = sorted(contentList, key=lambda x: x.time, reverse=True)
contentMsg = sorted_list[0].content
# 2. 질문을 가져온다.
prompt = request.message.text
# 사용자에게 전달할 결과(Iterator)
resultIter = None
# type에 따른 분기처리
# (1: 사용자의 질문, 2: 시스템의 답변, 3: 시스템의 질문, 4: 사용자의 답변 )
if "1" == request.message.type:
chat_result = None
# 질문에 대한 추가적인 정보가 필요한지 확인한다.
if len(contentList) > 0:
prompt = TemplateMaker.makeTemplateText('CONFIRM_QUESTION_01', [contentMsg, prompt])
chat = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.9)
sys = SystemMessage(content="")
msg = HumanMessage(content=prompt)
chat_result = chat([sys, msg])
# 추가적인 정보가 필요하다면 -> 추가적인 정보를 요청한다.
if chat_result is not None and "no message" not in chat_result.content:
for char in iter(chat_result.content):
yield dialogue_pb2.Message(text=char, type="3")
# 추가적인 정보가 필요없다면 -> 답변을 생성한다.
else:
prompt = TemplateMaker.makeTemplateText('ANSWER_02', [contentMsg, prompt])
chat = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], model_name='gpt-3.5-turbo', temperature=0.9)
sys = SystemMessage(content="")
msg = HumanMessage(content=prompt)
resultIter = chat.stream([sys, msg])
elif "2" == request.message.type:
pass
elif "3" == request.message.type:
pass
elif "4" == request.message.type:
question = ""
# 시간으로 내림차순 정렬하고 - type이 1인 첫번째 항목을 가져온다.
if len(request.messageHistory) > 0:
sorted_list = sorted(request.messageHistory, key=lambda x: x.time, reverse=True)
for msg in sorted_list:
if "1" == msg.type:
question = msg.text
break
# contentMsg=약관, question=질문(이전질문), prompt=참고사항(사용자의 답변)
prompt = TemplateMaker.makeTemplateText('ANSWER_01', [contentMsg, question, prompt])
chat = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], model_name='gpt-3.5-turbo', temperature=0.9)
sys = SystemMessage(content="")
msg = HumanMessage(content=prompt)
resultIter = chat.stream([sys, msg])
pass
else:
chat = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], model_name='gpt-3.5-turbo', temperature=0.9)
sys = SystemMessage(content=contentMsg)
msg = HumanMessage(content=prompt)
resultIter = chat.stream([sys, msg])
pass
# 답변을 전달한다.
for result in resultIter:
yield dialogue_pb2.Message(text=result.content, type="2")
def serve():
port = os.environ.get('SERVER_PORT')
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
# add service
ask_pb2_grpc.add_AskerServicer_to_server(Asker(), server)
dialogue_pb2_grpc.add_CommunicatorServicer_to_server(Communicator(), server)
# start server
server.add_insecure_port("[::]:" + port) # 인증없이 사용할 수 있도록 설정, 운영환경에서는 add_secure_port를 사용해야 함
server.start()
print(f"Server started, listening {port}")
server.wait_for_termination()
if __name__ == "__main__":
logging.basicConfig()
serve()
| [
"CONFIRM_QUESTION_01"
] |
2024-01-10 | aifredlab/demoCore | test~test_savePDF.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Milvus
from langchain.document_loaders import PyPDFLoader
loader = PyPDFLoader("opengpt/test.pdf")
documents = loader.load_and_split()
# from langchain.document_loaders import AmazonTextractPDFLoader
# loader = AmazonTextractPDFLoader("example_data/alejandro_rosalez_sample-small.jpeg")
# documents = loader.load()
total_characters = sum(len(content.page_content) for content in documents)
total_word = sum(len(content.page_content.split()) for content in documents)
print(f"total page : {len(documents)}")
print(f"total word : {total_word}")
print(f"total characters : {total_characters}")
print(f"price : ${total_characters / 1000 * 0.0001}") #$0.0001
vector_db = Milvus.from_documents(
documents,
OpenAIEmbeddings(),
connection_args={"host": "127.0.0.1", "port": "19530"},
)
query = "의무보험이란?"
docs = vector_db.similarity_search(query)
print(f"요청 : {query}")
print(f"응답 : {docs[0].page_content}")
| [] |
2024-01-10 | aifredlab/demoCore | src~opengpt~combine_documents_stuff.py | from langchain.chains import StuffDocumentsChain, LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="{page_content}"
)
document_variable_name = "context"
llm = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template(
"Summarize this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
) | [
"{page_content}",
"Summarize this content: {context}",
"page_content"
] |
2024-01-10 | franzfonta/youtube-video-chat | tests~mock_openai_client.py | import random
from unittest.mock import MagicMock
from openai import OpenAI
class MockOpenaiClient(OpenAI):
"""
A mock implementation of the OpenAI client for testing purposes.
"""
def __init__(self):
self.beta = MagicMock()
self.beta.threads.create = lambda **kwargs: MagicMock(
id=str(random.randint(0, 1000)))
self.beta.threads.runs.retrieve.return_value = MagicMock(
status="completed")
messages = MagicMock()
messages.data[0].content[0].text.value = "I'm the assistant, here is my answer"
self.beta.threads.messages.list.return_value = messages
| [] |
2024-01-10 | franzfonta/youtube-video-chat | youtube_video_chat~youtube_assistant.py | import logging
import time
from typing import Callable, Optional
from openai import OpenAI
from youtube_video_chat.youtube_thread import YouTubeThread
class YouTubeAssistant:
"""
Represents an assistant for interacting with YouTube videos and transcripts.
"""
def __init__(self, client: OpenAI, assistant_id: str, transcript_fetcher: Callable[[str], str]):
"""
Initializes a new instance of the YouTubeAssistant class by retrieving the existing
assistant.
Args:
client: The client object used to interact with the YouTube API.
transcript_fetcher: An instance of the YouTubeTranscriptFetcher class used to
fetch video transcripts.
"""
self.client = client
self.transcript_fetcher = transcript_fetcher
self.assistant = client.beta.assistants.retrieve(assistant_id)
def create_thread(self, video_url: str) -> YouTubeThread:
"""
Creates the thread for a YouTube video and sends the first message with the transcript.
Args:
video_url (str): The URL of the YouTube video.
Returns:
YouTubeThread: The created YouTubeThread object.
"""
transcript = self.transcript_fetcher(video_url)
openai_thread = self.client.beta.threads.create()
# TODO persist thread ID for later retrieval
youtube_thread = YouTubeThread(video_url, transcript, openai_thread)
# Create the first message in the thread with the video transcript
initial_prompt = f"This is the transcript of a YouTube video: \
\n\"{transcript}\".\n \
In the following messages I will ask you questions about it. \
As for now, summarize the video in 100 words or less."
self.ask_question(youtube_thread, initial_prompt, True)
return youtube_thread
def __retrieve_run(self, thread_id: str, run_id: str, max_retries: int = 5, base_delay: int = 2):
"""
Retrieve a run from a thread until it is completed or maximum retries are reached.
Args:
thread_id (str): The ID of the thread.
run_id (str): The ID of the run.
max_retries (int, optional): The maximum number of retries. Defaults to 5.
base_delay (int, optional): The base delay in seconds. Defaults to 2.
Returns:
The completed run.
Raises:
Exception: If maximum retries are reached and the operation fails.
"""
# Poll the run until it is completed
retries = 0
while retries < max_retries:
logging.info(f"Attempt {retries + 1}")
run = self.client.beta.threads.runs.retrieve(
thread_id=thread_id, run_id=run_id)
if run.status == "completed":
return run
else:
retries += 1
delay = base_delay * 2 ** retries
logging.info(f"Retrying in {delay:.2f} seconds...")
time.sleep(delay)
raise Exception("Max retries reached, operation failed.")
def ask_question(self, thread: YouTubeThread, prompt: str, is_initial_prompt: bool = False) -> Optional[str]:
"""
Sends a question to the YouTube Assistant and retrieves the response.
Args:
thread (YouTubeThread): The YouTube thread to send the question to.
prompt (str): The question prompt.
is_initial_prompt (bool, optional): True if the prompt is the initial prompt. Defaults to False.
Returns:
Optional[str]: The response from the YouTube Assistant or None if the operation fails.
"""
# Add user message to thread except for the initial prompt
if not is_initial_prompt:
thread.messages.append({"role": "user", "content": prompt})
try:
# Create a new message in the thread
message = self.client.beta.threads.messages.create(
thread_id=thread.openai_thread.id,
role="user",
content=prompt
)
# Create a new run
run = self.client.beta.threads.runs.create(
thread_id=thread.openai_thread.id,
assistant_id=self.assistant.id
)
# Wait for the run to complete
run = self.__retrieve_run(thread.openai_thread.id, run.id)
# Retrieve the last message in the thread
messages = self.client.beta.threads.messages.list(
thread_id=thread.openai_thread.id)
response = messages.data[0].content[0].text.value
# Add assistant response to chat history
thread.messages.append({"role": "assistant", "content": response})
return response
except Exception as e:
logging.error(e)
return None
| [
"This is the transcript of a YouTube video: \n\"PLACEHOLDER\".\n In the following messages I will ask you questions about it. As for now, summarize the video in 100 words or less."
] |
2024-01-10 | ContextLab/chatify | chatify~llm_models.py | import os
import warnings
with warnings.catch_warnings(): # catch warnings about accelerate library
warnings.simplefilter("ignore")
from langchain.llms import OpenAI, HuggingFacePipeline, LlamaCpp
from langchain.llms.base import LLM
from langchain.chat_models import ChatOpenAI
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
try:
from huggingface_hub import hf_hub_download
except ModuleNotFoundError:
hf_hub_download = None # ignore missing library unless needed later
from .utils import FakeListLLM
class ModelsFactory:
"""A factory class for creating different models."""
def __init__(self, *args) -> None:
"""
Initializes the ModelsFactory instance.
Parameters
----------
*args : tuple
Variable-length arguments.
Returns
-------
None
"""
return None
def get_model(self, model_config):
"""Returns the initialized model based on the model configuration.
Parameters
----------
model_config : dict
Configuration for the desired model.
Returns
-------
model : object
Initialized model based on the model configuration.
Raises
------
RuntimeError
If the specified model is not supported.
"""
model_ = model_config['model']
# Collect all the models
models = {
'open_ai_model': OpenAIModel,
'open_ai_chat_model': OpenAIChatModel,
'fake_model': FakeLLMModel,
'cached_model': CachedLLMModel,
'huggingface_model': HuggingFaceModel,
'llama_model': LlamaModel,
'proxy': ProxyModel,
}
if model_ in models.keys():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if type(models[model_]) == str:
return models[model_]
else:
return models[model_](model_config).init_model()
else:
raise RuntimeError(f"{model_} is not supported yet!")
class BaseLLMModel:
"""Base class for Language Model (LLM) models."""
def __init__(self, model_config) -> None:
"""Initializes the BaseLLMModel instance.
Parameters
----------
model_config : dict
Configuration for the model.
Returns
-------
None
"""
self.model_config = model_config
self.llm_model = None
def init_model(self, *args, **kwargs):
"""Initializes the LLM model (to be implemented by derived classes).
Parameters
----------
*args : tuple
Variable-length arguments.
**kwargs : dict
Arbitrary keyword arguments.
Raises
------
NotImplementedError
If not implemented by derived classes.
"""
raise NotImplementedError
class OpenAIModel(BaseLLMModel):
"""Class representing an OpenAI Chat Model derived from BaseLLMModel."""
def __init__(self, model_config) -> None:
"""Initializes the OpenAIChatModel instance.
Parameters
----------
model_config : dict
Configuration for the model.
Returns
-------
None
"""
super().__init__(model_config)
def init_model(self):
"""Initializes the OpenAI Chat Model.
Returns
-------
llm_model : ChatOpenAI
Initialized OpenAI Chat Model.
"""
if self.model_config['open_ai_key'] is None:
raise ValueError(f'openai_api_key value cannot be None')
os.environ["OPENAI_API_KEY"] = self.model_config['open_ai_key']
llm_model = OpenAI(
temperature=0.85,
openai_api_key=self.model_config['open_ai_key'],
model_name=self.model_config['model_name'],
presence_penalty=0.1,
max_tokens=self.model_config['max_tokens'],
)
return llm_model
class OpenAIChatModel(BaseLLMModel):
"""Class representing an OpenAI Chat Model derived from BaseLLMModel."""
def __init__(self, model_config) -> None:
"""Initializes the OpenAIChatModel instance.
Parameters
----------
model_config : dict
Configuration for the model.
Returns
-------
None
"""
super().__init__(model_config)
def init_model(self):
"""Initializes the OpenAI Chat Model.
Returns
-------
llm_model : ChatOpenAI
Initialized OpenAI Chat Model.
"""
if self.model_config['open_ai_key'] is None:
raise ValueError(f'openai_api_key value cannot be None')
llm_model = ChatOpenAI(
temperature=0.85,
openai_api_key=self.model_config['open_ai_key'],
model_name=self.model_config['model_name'],
presence_penalty=0.1,
max_tokens=self.model_config['max_tokens'],
)
return llm_model
class FakeLLMModel(BaseLLMModel):
def __init__(self, model_config) -> None:
"""Initializes the FakeListLLM instance.
Parameters
----------
model_config : dict
Configuration for the model.
Returns
-------
None
"""
super().__init__(model_config)
def init_model(self):
"""Initializes the Fake Chat Model.
Returns
-------
llm_model : FakeListLLM
Initialized Fake Chat Model.
"""
responses = [
'The explanation you requested has not been included in Chatify\'s cache. You\'ll need to enable interactive mode to generate a response. Please see the [Chatify GitHub repository](https://github.com/ContextLab/chatify) for instructions. Note that generating responses to uncached content will require an [OpenAI API Key](https://platform.openai.com/account/api-keys).'
]
llm_model = FakeListLLM(responses=responses)
return llm_model
class CachedLLMModel(BaseLLMModel):
def __init__(self, model_config) -> None:
"""Initializes the FakeListLLM instance.
Parameters
----------
model_config : dict
Configuration for the model.
Returns
-------
None
"""
super().__init__(model_config)
def init_model(self):
"""Initializes the Fake Chat Model.
Returns
-------
llm_model : FakeListLLM
Initialized Fake Chat Model.
"""
llm_model = FakeListLLM(
responses=[
f'The explanation you requested has not been included in Chatify\'s cache. You\'ll need to enable interactive mode to generate a response. Please see the [Chatify GitHub repository](https://github.com/ContextLab/chatify) for instructions. Note that generating responses to uncached content will require an [OpenAI API Key](https://platform.openai.com/account/api-keys).'
]
)
return llm_model
class HuggingFaceModel(BaseLLMModel):
def __init__(self, model_config) -> None:
"""Initializes the model instance.
Parameters
----------
model_config : dict
Configuration for the model.
Returns
-------
None
"""
super().__init__(model_config)
def init_model(self):
"""Initializes the OpenAI Chat Model.
Returns
-------
llm_model : HuggingFaceModel
Initialized Hugging Face Chat Model.
"""
self.proxy = self.model_config['proxy']
self.proxy_port = self.model_config['proxy_port']
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
llm = HuggingFacePipeline.from_model_id(
model_id=self.model_config['model_name'],
task='text-generation',
device=0,
model_kwargs={'max_length': self.model_config['max_tokens']},
)
except:
llm = HuggingFacePipeline.from_model_id(
model_id=self.model_config['model_name'],
task='text-generation',
model_kwargs={
'max_length': self.model_config['max_tokens'],
'temperature': 0.85,
'presence_penalty': 0.1,
},
)
return llm
class LlamaModel(BaseLLMModel):
def __init__(self, model_config) -> None:
"""Initializes the model instance.
Parameters
----------
model_config : dict
Configuration for the model.
Returns
-------
None
"""
super().__init__(model_config)
def init_model(self):
"""Initializes the OpenAI Chat Model.
Returns
-------
llm_model : HuggingFaceModel
Initialized Hugging Face Chat Model.
"""
self.model_path = hf_hub_download(
repo_id=self.model_config['model_name'],
filename=self.model_config['weights_fname'],
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
try:
llm = LlamaCpp(
model_path=self.model_path,
max_tokens=self.model_config['max_tokens'],
n_gpu_layers=self.model_config['n_gpu_layers'],
n_batch=self.model_config['n_batch'],
callback_manager=callback_manager,
verbose=True,
)
except:
llm = LlamaCpp(
model_path=self.model_path,
max_tokens=self.model_config['max_tokens'],
n_batch=self.model_config['n_batch'],
callback_manager=callback_manager,
verbose=True,
)
return llm
class ProxyModel(BaseLLMModel):
def __init__(self, model_config) -> None:
"""Initializes the model instance.
Parameters
----------
model_config : dict
Configuration for the model.
Returns
-------
None
"""
super().__init__(model_config)
def init_model(self):
return None
| [] |
2024-01-10 | Bluebotlaboratories/Virtu | models.py | import openai
import time
import json
import re
import os
# Handle all the AI Stuff
class OpenAICompletionModel():
def __init__(self, apiKey):
self.apiKey = apiKey
self.defaultApiKey = apiKey
# Initialise OpenAI
openai.api_key = self.apiKey
self.engines = openai.Engine.list()
##for engine in self.engines.data:
## if (engine.object == "engine" and engine.ready):
## print(engine.id)
## else:
## pass
# Initialise "addons"
self.memory = [
"Virtu is a large language model trained by OpenAI. It is designed to be a chatbot, and should not autocomplete prompts. Do not autocomplete, complete or edit prompts in any way. knowledge cutoff: 2021-09 Current date: December 10 2022 Browsing: disabled"
]
# Define initialisation prompts
self.initialisationPrompts = {}
availablePromptFiles = os.listdir("./initialisationPrompts/")
for promptFile in availablePromptFiles:
if (promptFile.split('.')[-1] == 'txt'):
with open(os.path.join("./initialisationPrompts", promptFile), 'r') as file:
promptData = file.read()
self.initialisationPrompts['.'.join(promptFile.split('.')[:-1])] = promptData.split("===")
# Define AI Options
self.defaultConfig = {
"engine": "text-davinci-003",
"temperature": 0.5,
"max_tokens": 512,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"useMemory": True
}
self.config = self.defaultConfig
# Timeout stuff
self.timeoutReason = ""
self.timeout = 0
# "Premium" Stuff
self.premiumMode = False
# Premium stuff
def enablePremiumMode(self, apiKey):
if (apiKey != self.defaultApiKey):
try:
openai.api_key = apiKey
self.engines = openai.Engine.list()
self.premiumMode = True
self.apiKey = apiKey
return True
except:
return False
else:
return False
# Reset memory
def resetMemory(self):
self.memory = self.memory[:1]
# Import ChatGPT history
def importMemory(self, memoryToImport):
try:
self.resetMemory()
currentChatItem = "User: "
for memoryItem in memoryToImport:
self.memory.append(currentChatItem + memoryItem)
if (currentChatItem == "User: "):
currentChatItem = "Response: "
else:
currentChatItem = "User: "
if (currentChatItem != "User: "):
prompt = self.memory[-1]
self.memory = self.memory[:-1]
response = self.processPrompt(prompt)
response += "\n\n" + "Chat Imported Successfuly"
else:
response = "Chat Imported Successfuly"
return response
except Exception as e:
return "Error Importing Chat: " + str(e)
# Actual AI part
def processPrompt(self, prompt):
if (time.time() < self.timeout):
return "Timed out - Please wait [" + str(round(self.timeout - time.time())) + "] seconds...\nReason:\n" + self.timeoutReason
# Add prompt to memory
self.memory.append("User: " + prompt)
self.memory.append("Response: ")
# Haha big brain go brrrrrrr
error = True
try:
openai.api_key = self.apiKey
completion = openai.Completion.create(
engine=self.config["engine"],
prompt='\n'.join(self.memory), #prompt
temperature=self.config["temperature"],
max_tokens=self.config["max_tokens"],
top_p=self.config["top_p"],
frequency_penalty=self.config["frequency_penalty"],
presence_penalty=self.config["presence_penalty"],
)
response = completion.choices[0].text
if (len(response.strip()) >= 9 and response.strip().lower()[:9] == "response: "):
response = re.sub('response: ', '', response, 1, re.I)
elif (len(response.strip()) >= 9 and response.strip().lower()[:9] == "response:"):
response = re.sub('response:', '', response, 1, re.I)
elif ("response:" in response.lower()):
response = re.sub('response:', '', response, 1, re.I)
response = response.lstrip()
#print("Response: " + response)
# Add response to memory and return it
self.memory[-1] = "Response: " + response
error = False
except openai.error.RateLimitError as e:
response = "[RATELIMITED - PLEASE WAIT 30 SECONDS]\n`" + str(e) + "`"
self.timeout = time.time() + 30
self.timeoutReason = "OpenAI rate limited"
except openai.error.InvalidRequestError as e:
print(e)
response = "[HISTORY FULL - PLEASE RESET]"
if (not self.premiumMode and not error):
self.timeout = time.time() + 30
self.timeoutReason = "--__Remove timeouts with Virtu Premium:__--\nLiterally just provide your own api key, see more info in `/config Premium Mode`"
return response
def processInitialisationPrompt(self, prompt):
self.resetMemory()
response = ""
try:
with open(os.path.join("./initialisationPrompts", prompt + "_config.json"), 'r') as aiConfigJSON:
self.config = json.loads(aiConfigJSON.read())
except:
self.config = self.defaultConfig
print("Initialised, Using config:", self.config)
for promptToInitialiseWith in self.initialisationPrompts[prompt]:
response = self.processPrompt(promptToInitialiseWith)
#print('\n'.join(self.memory))
return response | [
"./initialisationPrompts/",
"\n"
] |
2024-01-10 | YatinChaudhary/TopicBERT | TopicBERT~topic_bert~nvdm~model_GSM.py | """NVDM Tensorflow implementation by Yishu Miao"""
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
import os
from nvdm import utils
#import model.utils as utils
#from sklearn.preprocessing import MultiLabelBinarizer
#import sklearn.metrics.pairwise as pw
#from gensim.models import CoherenceModel
#from gensim.corpora.dictionary import Dictionary
#import model.evaluate as eval
#import model.data_lstm as data
seed = 42
tf_op_seed = 1234
np.random.seed(seed)
tf.set_random_seed(seed)
#learning_rate = 5e-5
#batch_size = 64
#n_hidden = 256
#fixed_topic_params
#n_topic = 150
#n_sample = 1
#non_linearity = tf.nn.tanh
non_linearity = tf.nn.sigmoid
######
class NVDM(object):
""" Neural Variational Document Model -- BOW VAE.
"""
#def __init__(self, topic_params, prior_embeddings=None, initializer_nvdm=None):
def __init__(self, topic_params, x, mask , topic_vocab_size, prior_embeddings=None, initializer_nvdm=None):
#self.vocab_size = topic_params.TM_vocab_length
self.vocab_size = topic_vocab_size
self.n_hidden = topic_params.hidden_size_TM
self.n_topic = topic_params.n_topic
self.n_sample = topic_params.n_sample
self.non_linearity = non_linearity
self.learning_rate = topic_params.nvdm_learning_rate
self.batch_size = topic_params.nvdm_batch_size
self.x = x
self.mask = mask
#self.x = tf.placeholder(tf.float32, [None, self.vocab_size], name='x')
#self.mask = tf.placeholder(tf.float32, [None], name='mask') # mask paddings
#if topic_params.use_sent_topic_rep:
#self.x_sent = tf.placeholder(tf.float32, [None, None, self.vocab_size], name='x_sent')
#if topic_params.use_topic_embedding:
# self.x_doc_mask = tf.placeholder(tf.float32, [None, self.vocab_size], name='x_doc_mask')
#self.input_batch_size = tf.placeholder(tf.int32, (), name='input_batch_size')
self.input_batch_size = tf.shape(self.x)[0]
#if topic_params.use_sent_topic_rep:
# self.input_batch_size_sent = tf.shape(self.x_sent)[0]
# self.input_batch_len_sent = tf.shape(self.x_sent)[1]
# self.batch_size_sent = self.input_batch_size_sent * self.input_batch_len_sent
# encoder
with tf.variable_scope('TM_encoder', reuse=tf.AUTO_REUSE):
self.enc_vec = utils.mlp(self.x, [self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
self.mean = utils.nvdm_linear(self.enc_vec,
self.n_topic,
scope='mean',
matrix_initializer=initializer_nvdm[1][0],
bias_initializer=initializer_nvdm[1][1])
self.logsigm = utils.nvdm_linear(self.enc_vec,
self.n_topic,
bias_start_zero=True,
matrix_start_zero=True,
scope='logsigm',
matrix_initializer=initializer_nvdm[2][0],
bias_initializer=initializer_nvdm[2][1])
self.kld = -0.5 * tf.reduce_sum(1 - tf.square(self.mean) + 2 * self.logsigm - tf.exp(2 * self.logsigm), 1)
#self.kld = self.mask*self.kld # mask paddings
self.kld = tf.multiply(self.mask, self.kld, name='kld') # mask paddings
#if topic_params.use_sent_topic_rep:
# self.x_sent_reshape = tf.reshape(self.x_sent, [-1, self.vocab_size])
# self.enc_vec_sent = utils.mlp(self.x_sent_reshape, [self.n_hidden], self.non_linearity)
# #self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
# self.mean_sent = utils.nvdm_linear(self.enc_vec_sent, self.n_topic, scope='mean')
# self.logsigm_sent = utils.nvdm_linear(self.enc_vec_sent,
# self.n_topic,
# bias_start_zero=True,
# matrix_start_zero=True,
# scope='logsigm')
#if topic_params.prior_emb_for_topics:
# W_prior = tf.get_variable(
# 'embeddings_TM_prior',
# dtype=tf.float32,
# initializer=prior_embeddings,
# trainable=False
# )
"""
W_prior_proj = tf.get_variable(
'embeddings_TM_prior_proj',
[prior_embeddings.shape[1], self.n_topic],
dtype=tf.float32,
trainable=False
)
W_prior = tf.matmul(W_prior, W_prior_proj, name='W_prior_projected')
"""
with tf.variable_scope('TM_decoder', reuse=tf.AUTO_REUSE):
if self.n_sample == 1:
eps = tf.random_normal((self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
#doc_vec = tf.mul(tf.exp(self.logsigm), eps) + self.mean
## Hidden representation to be used in BERT
self.doc_vec = tf.add(tf.multiply(tf.exp(self.logsigm), eps), self.mean, name='doc_hidden')
self.doc_vec = tf.nn.softmax(self.doc_vec, axis = 1)
self.last_h = self.doc_vec
logits_projected, self.decoding_matrix = utils.nvdm_linear(self.doc_vec,
self.vocab_size,
scope='projection',
get_matrix=True,
matrix_initializer=initializer_nvdm[3][0],
bias_initializer=initializer_nvdm[3][1])
logits = tf.nn.log_softmax(logits_projected)
self.recons_loss = -tf.reduce_sum(tf.multiply(logits, self.x), 1)
"""
if topic_params.use_topic_embedding:
#self.last_h_topic_emb = utils.nvdm_linear(tf.nn.softmax(self.last_h, axis=1), self.vocab_size, scope='projection')
#self.top_k = tf.nn.top_k(self.decoding_matrix, k=topic_params.use_k_topic_words, sorted=False)
topics_masked = tf.multiply(tf.expand_dims(self.x_doc_mask, axis=1), tf.expand_dims(self.decoding_matrix, axis=0), name='topics_masked')
self.top_k = tf.nn.top_k(topics_masked, k=topic_params.use_k_topic_words, sorted=False)
if topic_params.prior_emb_for_topics:
self.top_k_embeddings = tf.nn.embedding_lookup(W_prior, self.top_k.indices)
self.topic_emb_size = prior_embeddings.shape[1]
#self.topic_emb_size = prior_embeddings.shape[1] * topic_params.use_k_topics
#self.topic_emb_size = prior_embeddings.shape[1] + self.n_topic
#self.topic_emb_size = self.n_topic
#self.topic_emb_size = self.n_topic * 2
else:
self.top_k_embeddings = tf.nn.embedding_lookup(tf.transpose(self.decoding_matrix), self.top_k.indices)
#self.topic_emb_size = self.n_topic
self.topic_emb_size = self.n_topic * 2
#self.top_k_embeddings = tf.multiply(tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=1), axis=2), self.top_k_embeddings)
#self.temp_1 = tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=2), axis=2)
#self.topic_embeddings = tf.squeeze(tf.matmul(self.temp_1, self.top_k_embeddings), axis=2, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_sum(self.top_k_embeddings, axis=1, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=1, name='topic_embeddings')
self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=2, name='topic_embeddings')
if topic_params.use_k_topics > 0:
# Masking document topic proportion vector
top_k_h_values, top_k_h_indices = tf.nn.top_k(self.last_h, k=topic_params.use_k_topics, sorted=False, name='top_k_h')
row_numbers = tf.tile(tf.expand_dims(tf.range(0, self.input_batch_size), 1), [1, topic_params.use_k_topics], name='row_numbers')
full_indices = tf.concat([tf.expand_dims(row_numbers, -1), tf.expand_dims(top_k_h_indices, -1)], axis=2)
full_indices = tf.reshape(full_indices, [-1, 2], name='full_indices')
#mask_updates = tf.ones([self.input_batch_size * topic_params.use_k_topics], dtype=tf.float32, name='mask_updates')
#new_mask = tf.scatter_nd(full_indices, mask_updates, [self.input_batch_size, self.n_topic], name='new_mask')
#last_h_softmax = tf.multiply(tf.nn.softmax(self.last_h, axis=1), new_mask, name='last_h_softmax')
last_h_softmax = tf.scatter_nd(
full_indices,
tf.reshape(tf.nn.softmax(top_k_h_values, axis=1), [-1]),
#tf.ones([self.input_batch_size * topic_params.use_k_topics], dtype=tf.float32),
[self.input_batch_size, self.n_topic],
name='last_h_softmax'
)
else:
last_h_softmax = tf.nn.softmax(self.last_h, axis=1, name='last_h_softmax')
#last_h_softmax = self.last_h
#self.last_h_topic_emb = tf.matmul(last_h_softmax, self.topic_embeddings, name='last_h_topic_emb')
self.last_h_topic_emb = tf.squeeze(tf.matmul(tf.expand_dims(last_h_softmax, axis=1), self.topic_embeddings), axis=1, name='last_h_topic_emb')
#temp = tf.nn.embedding_lookup(self.topic_embeddings, top_k_h_indices)
#self.last_h_topic_emb = tf.reduce_sum(temp, axis=1, name='last_h_topic_emb')
#self.last_h_topic_emb = tf.reshape(temp, [self.input_batch_size, self.topic_emb_size], name='last_h_topic_emb')
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, last_h_softmax], axis=1)
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, self.last_h], axis=1)
"""
else:
eps = tf.random_normal((self.n_sample*self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
eps_list = tf.split(eps, self.n_sample, 0)
recons_loss_list = []
doc_vec_list = []
for i in range(self.n_sample):
if i > 0: tf.get_variable_scope().reuse_variables()
curr_eps = eps_list[i]
doc_vec = tf.add(tf.multiply(tf.exp(self.logsigm), curr_eps), self.mean)
doc_vec = tf.nn.softmax(doc_vec, axis=1)
doc_vec_list.append(doc_vec)
logits = tf.nn.log_softmax(utils.nvdm_linear(doc_vec, self.vocab_size, scope='projection'))
recons_loss_list.append(-tf.reduce_sum(tf.multiply(logits, self.x), 1))
self.recons_loss = tf.add_n(recons_loss_list) / self.n_sample
self.doc_vec = tf.add_n(doc_vec_list) / self.n_sample
self.last_h = self.doc_vec
""""
if topic_params.use_sent_topic_rep:
if self.n_sample == 1:
eps_sent = tf.random_normal((self.batch_size_sent, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
self.last_h_sent = tf.add(tf.multiply(tf.exp(self.logsigm_sent), eps_sent), self.mean_sent, name='sent_hidden')
self.last_h_sent = tf.reshape(self.last_h_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.n_topic])
if topic_params.use_topic_embedding:
#self.last_h_topic_emb_sent = utils.nvdm_linear(tf.nn.softmax(self.last_h_sent, axis=1), self.vocab_size, scope='projection')
if topic_params.use_k_topics > 0:
# Masking sentence topic proportion vector
top_k_h_sent_values, top_k_h_sent_indices = tf.nn.top_k(self.last_h_sent, k=topic_params.use_k_topics, sorted=False, name='top_k_h_sent')
row_numbers_sent = tf.tile(tf.expand_dims(tf.range(0, self.batch_size_sent), 1), [1, topic_params.use_k_topics], name='row_numbers_sent')
full_indices_sent = tf.concat([tf.expand_dims(row_numbers_sent, -1), tf.expand_dims(top_k_h_sent_indices, -1)], axis=2)
full_indices_sent = tf.reshape(full_indices_sent, [-1, 2], name='full_indices_sent')
#mask_updates_sent = tf.ones([self.batch_size_sent * topic_params.use_k_topics], dtype=tf.float32, name='mask_updates_sent')
#new_mask_sent = tf.scatter_nd(full_indices_sent, mask_updates_sent, [self.batch_size_sent, self.n_topic], name='new_mask_sent')
#last_h_softmax_sent = tf.multiply(tf.nn.softmax(self.last_h_sent, axis=1), new_mask_sent, name='last_h_softmax_sent')
last_h_softmax_sent = tf.scatter_nd(full_indices_sent, tf.reshape(tf.nn.softmax(top_k_h_sent_values, axis=1), [-1]), [self.batch_size_sent, self.n_topic], name='last_h_softmax_sent')
else:
last_h_softmax_sent = tf.nn.softmax(self.last_h_sent, axis=2, name='last_h_softmax_sent')
self.last_h_topic_emb_sent = tf.matmul(last_h_softmax_sent, self.topic_embeddings, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, self.last_h_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, last_h_softmax_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.reshape(self.last_h_topic_emb_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.vocab_size])
else:
print("Error: model_NVDM.py - Decoder")
sys.exit()
"""
#self.objective_TM = self.recons_loss + self.kld
#self.objective_TM = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.final_loss = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.objective_TM = tf.reduce_mean(self.final_loss)
"""
if topic_params.TM_uniqueness_loss:
## NVDM topic uniqueness loss
eye = tf.constant(np.eye(self.n_topic), dtype=tf.float32)
topicnorm = matrix / tf.sqrt(tf.reduce_sum(tf.square(self.decoding_matrix), 1, keepdims=True))
uniqueness = tf.reduce_max(tf.square(tf.matmul(topicnorm, tf.transpose(topicnorm)) - eye))
self.objective_TM += topic_params.alpha_uniqueness * uniqueness
"""
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
#fullvars = tf.trainable_variables()
#enc_vars = utils.variable_parser(fullvars, 'TM_encoder')
enc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_encoder')
#dec_vars = utils.variable_parser(fullvars, 'TM_decoder')
dec_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_decoder')
self.pretrain_saver = tf.train.Saver(enc_vars + dec_vars)
enc_grads = tf.gradients(self.objective_TM, enc_vars)
dec_grads = tf.gradients(self.objective_TM, dec_vars)
self.optim_enc = optimizer.apply_gradients(zip(enc_grads, enc_vars))
self.optim_dec = optimizer.apply_gradients(zip(dec_grads, dec_vars))
## Pretraining of NVDM-TM
def pretrain(self, dataset, topic_params, nvdm_datadir , session,
#training_epochs=1000, alternate_epochs=10):
#training_epochs=100, alternate_epochs=10):
training_epochs=20, alternate_epochs=10):
#training_epochs=1, alternate_epochs=1):
#log_dir = os.path.join(topic_params.model, 'logs_nvdm_pretrain')
#model_dir_ir_nvdm = os.path.join(topic_params.model, 'model_ir_nvdm_pretrain')
#model_dir_ppl_nvdm = os.path.join(topic_params.model, 'model_ppl_nvdm_pretrain')
log_dir = os.path.join(topic_params.output_dir, 'logs_nvdm_pretrain')
model_dir_ir_nvdm = os.path.join(topic_params.output_dir, 'model_ir_nvdm_pretrain')
model_dir_ppl_nvdm = os.path.join(topic_params.output_dir, 'model_ppl_nvdm_pretrain')
#model_dir_supervised = os.path.join(topic_params.model, 'model_supervised_nvdm_pretrain')
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
if not os.path.isdir(model_dir_ir_nvdm):
os.mkdir(model_dir_ir_nvdm)
if not os.path.isdir(model_dir_ppl_nvdm):
os.mkdir(model_dir_ppl_nvdm)
#if not os.path.isdir(model_dir_supervised):
# os.mkdir(model_dir_supervised)
#train_url = os.path.join(topic_params.dataset, 'training_nvdm_docs_non_replicated.csv')
#dev_url = os.path.join(topic_params.dataset, 'validation_nvdm_docs_non_replicated.csv')
#test_url = os.path.join(topic_params.dataset, 'test_nvdm_docs_non_replicated.csv')
train_url = os.path.join(nvdm_datadir, 'training_nvdm_docs_non_replicated.csv')
dev_url = os.path.join(nvdm_datadir, 'validation_nvdm_docs_non_replicated.csv')
test_url = os.path.join(nvdm_datadir, 'test_nvdm_docs_non_replicated.csv')
train_set, train_count, train_labels, train_doc_ids = utils.data_set(train_url, topic_params)
test_set, test_count, test_labels, test_doc_ids = utils.data_set(test_url ,topic_params)
dev_set, dev_count, dev_labels, dev_doc_ids = utils.data_set(dev_url, topic_params)
dev_batches = utils.create_batches(len(dev_set), self.batch_size, shuffle=False)
#dev_batches = utils.create_batches(len(dev_set), 512, shuffle=False)
test_batches = utils.create_batches(len(test_set), self.batch_size, shuffle=False)
#test_batches = utils.create_batches(len(test_set), 512, shuffle=False)
#training_labels = np.array(
# [[y] for y, _ in dataset.rows('training_nvdm_docs_non_replicated', num_epochs=1)]
#)
#validation_labels = np.array(
# [[y] for y, _ in dataset.rows('validation_nvdm_docs_non_replicated', num_epochs=1)]
#)
#test_labels = np.array(
# [[y] for y, _ in dataset.rows('test_nvdm_docs_non_replicated', num_epochs=1)]
#)
patience = topic_params.nvdm_patience
patience_count = 0
best_dev_ppl = np.inf
best_test_ppl = np.inf
best_val_nvdm_IR = -1.0
best_test_nvdm_IR = -1.0
ppl_model = False
ir_model = False
for epoch in range(training_epochs):
epoch_counter = epoch + 1
train_batches = utils.create_batches(len(train_set), self.batch_size, shuffle=True)
#train_batches = utils.create_batches(len(train_set), 512, shuffle=True)
#-------------------------------
# train
for switch in range(0, 2):
if switch == 0:
optim = self.optim_dec
print_mode = 'updating decoder'
else:
optim = self.optim_enc
print_mode = 'updating encoder'
for i in range(alternate_epochs):
print_ppx, print_ppx_perdoc, print_kld = self.run_epoch(
train_batches,
train_set,
train_count,
topic_params,
session,
optimizer=optim
)
print('| Epoch train: {:d} |'.format(epoch_counter),
print_mode, '{:d}'.format(i),
'| Corpus Perplexity: {:.5f}'.format(print_ppx), # perplexity for all docs
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc), # perplexity for per doc
'| KLD: {:.5}'.format(print_kld))
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_ppl_freq == 0:
ppl_model = True
print_ppx, print_ppx_perdoc, print_kld = self.run_epoch(
dev_batches,
dev_set,
dev_count,
topic_params,
session
)
if print_ppx_perdoc < best_dev_ppl:
#if print_ppx_perdoc <= best_dev_ppl:
best_dev_ppl = print_ppx_perdoc
print("Saving best model.")
self.pretrain_saver.save(session, model_dir_ppl_nvdm + '/model_ppl_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
patience_count = 0
else:
patience_count += 1
print('| Epoch dev: {:d} |'.format(epoch_counter),
'| Corpus Perplexity: {:.9f} |'.format(print_ppx),
'| Per doc Perplexity: {:.5f} |'.format(print_ppx_perdoc),
'| KLD: {:.5} |'.format(print_kld),
'| Best dev PPL: {:.5} |'.format(best_dev_ppl))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('| Epoch Val: {:d} || Val Corpus PPL: {:.9f} || Val Per doc PPL: {:.5f} || Best Val PPL: {:.5} || KLD Val: {:.5} |\n'.format(epoch+1, print_ppx, print_ppx_perdoc, best_dev_ppl, print_kld))
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_ir_freq == 0:
ir_model = True
validation_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('validation_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('validation_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
training_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('training_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('training_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
val_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
validation_vectors_nvdm,
training_labels,
validation_labels,
recall=[0.02],
num_classes=topic_params.nvdm_num_classes,
multi_label=topic_params.multilabel
)
val_nvdm_ir = val_nvdm_ir[0]
# Saving model and Early stopping on IR
if val_nvdm_ir > best_val_nvdm_IR:
best_val_nvdm_IR = val_nvdm_ir
print('saving: {}'.format(model_dir_ir_nvdm))
self.pretrain_saver.save(session, model_dir_ir_nvdm + '/model_ir_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
# patience_count = 0
#else:
# patience_count += 1
print("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
if patience_count > patience:
print("Early stopping.")
break
if ppl_model:
print("Calculating Test PPL.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ppl_nvdm))
print_ppx, print_ppx_perdoc, print_kld = self.run_epoch(
test_batches,
test_set,
test_count,
topic_params,
session
)
print('| Corpus Perplexity: {:.9f}'.format(print_ppx),
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc),
'| KLD: {:.5}'.format(print_kld))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('\n\nTest Corpus PPL: {:.9f} || Test Per doc PPL: {:.5f} || KLD Test: {:.5} |\n'.format(print_ppx, print_ppx_perdoc, print_kld))
if ir_model:
print("Calculating Test IR.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ir_nvdm))
test_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('test_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('test_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
test_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
test_vectors_nvdm,
training_labels,
test_labels,
recall=[0.02],
num_classes=topic_params.nvdm_num_classes,
multi_label=topic_params.multilabel
)
test_nvdm_ir = test_nvdm_ir[0]
print("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
def hidden_vectors(self, data, topic_params, session):
vecs = []
for y, x, count, mask in data:
feed_dict = {
self.x.name: x,
self.mask.name: mask
#self.input_batch_size: x.shape[0]
}
vecs.extend(
session.run([self.last_h], feed_dict=feed_dict)[0]
)
return np.array(vecs)
def run_epoch(self, input_batches, input_set, input_count, topic_params, session, optimizer=None):
loss_sum = 0.0
ppx_sum = 0.0
kld_sum = 0.0
word_count = 0
doc_count = 0
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size, topic_params)
input_feed = {self.x.name: data_batch,
self.mask.name: mask}#,
#self.input_batch_size: data_batch.shape[0]
#}
if not optimizer is None:
_, (loss, kld) = session.run((optimizer,
[self.final_loss, self.kld]),
input_feed)
else:
loss, kld = session.run([self.final_loss, self.kld],
input_feed)
loss_sum += np.sum(loss)
kld_sum += np.sum(kld) / np.sum(mask)
word_count += np.sum(count_batch)
# to avoid nan error
count_batch = np.add(count_batch, 1e-12)
# per document loss
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
print_ppx = np.exp(loss_sum / word_count)
print_ppx_perdoc = np.exp(ppx_sum / doc_count)
print_kld = kld_sum/len(input_batches)
return print_ppx, print_ppx_perdoc, print_kld
"""
def topic_dist(self, input_batches, input_set, input_count, topic_params, session):
topic_dist = []
mask_list = []
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size)
input_feed = {self.x.name: data_batch,
self.mask.name: mask}
doc_vec = session.run([self.doc_vec], input_feed)
topic_dist.extend(list(doc_vec[0]))
mask_list.extend(list(mask))
topic_dist_unique = []
for num, m in enumerate(mask_list):
if m!= 0.0:
topic_dist_unique.append(topic_dist[num])
topic_dist_unique = np.asarray(topic_dist_unique)
return topic_dist_unique, mask_list
"""
def topic_dist(self, input_batches, input_set, input_doc_ids , input_count, topic_params, session):
topic_dist = []
mask_list = []
doc_id_list = []
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size, topic_params)
input_feed = {self.x.name: data_batch,
self.mask.name: mask}
doc_vec = session.run([self.doc_vec], input_feed)
topic_dist.extend(list(doc_vec[0]))
mask_list.extend(list(mask))
for idx in idx_batch:
if idx != -1:
doc_id_list.append(input_doc_ids[idx])
else:
doc_id_list.append(-1)
assert len(topic_dist) == len(doc_id_list)
topic_dist_unique = {}
for id, dist in zip(doc_id_list, topic_dist):
if id != -1:
topic_dist_unique[str(id)] = dist
return topic_dist_unique, mask_list
def save_to_s3_TM(self, topic_params):
pass
def run_epoch_v2(self, data, topic_params, session):
# train_y, train_x, train_count, train_mask = dataset.batches_nvdm_LM(training_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
# val_y, val_x, val_count, val_mask = dataset.batches_nvdm_LM(validation_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
# test_y, test_x, test_count, test_mask = dataset.batches_nvdm_LM(test_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
kld_sum = []
this_nvdm_loss_normed = []
this_nvdm_loss_unnormed = []
this_nvdm_words = []
for nvdm_y, nvdm_x, nvdm_count, nvdm_mask in data:
nvdm_feed_dict = {
model.topic_model.x.name: nvdm_x,
model.topic_model.mask.name: nvdm_mask#,
#model.topic_model.input_batch_size: nvdm_x.shape[0]
}
if topic_params.supervised:
sys.exit()
else:
loss, kld = session.run([model.topic_model.final_loss,
model.topic_model.kld],
feed_dict=nvdm_feed_dict)
nvdm_count = np.add(nvdm_count, 1e-12)
this_nvdm_loss_normed.extend(np.divide(loss, nvdm_count))
this_nvdm_loss_unnormed.extend(loss)
this_nvdm_words.append(np.sum(nvdm_count))
kld_sum.append(np.sum(kld) / np.sum(nvdm_mask))
total_nvdm_nll = np.mean(this_nvdm_loss_unnormed)
#total_nvdm_ppl = np.exp(np.sum(this_nvdm_loss_unnormed) / np.sum(this_val_nvdm_words))
total_nvdm_ppl = np.exp(np.mean(this_nvdm_loss_normed))
print_kld = np.mean(kld_sum)
return total_nvdm_nll, total_nvdm_ppl, print_kld
| [] |
2024-01-10 | YatinChaudhary/TopicBERT | TopicBERT~topic_bert~nvdm~model_GSM_supervised.py | """NVDM Tensorflow implementation by Yishu Miao"""
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
import os
from nvdm import utils
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
#import model.utils as utils
#from sklearn.preprocessing import MultiLabelBinarizer
#import sklearn.metrics.pairwise as pw
#from gensim.models import CoherenceModel
#from gensim.corpora.dictionary import Dictionary
#import model.evaluate as eval
#import model.data_lstm as data
seed = 42
tf_op_seed = 1234
np.random.seed(seed)
tf.set_random_seed(seed)
#learning_rate = 5e-5
#batch_size = 64
#n_hidden = 256
#fixed_topic_params
#n_topic = 150
#n_sample = 1
#non_linearity = tf.nn.tanh
non_linearity = tf.nn.sigmoid
######
class NVDM(object):
""" Neural Variational Document Model -- BOW VAE.
"""
#def __init__(self, topic_params, prior_embeddings=None, initializer_nvdm=None):
def __init__(self, topic_params, x, mask , topic_vocab_size, label_ids, n_labels, prior_embeddings=None, initializer_nvdm=None):
#self.vocab_size = topic_params.TM_vocab_length
self.vocab_size = topic_vocab_size
self.n_hidden = topic_params.hidden_size_TM
self.n_topic = topic_params.n_topic
self.n_sample = topic_params.n_sample
self.non_linearity = non_linearity
self.learning_rate = topic_params.nvdm_learning_rate
self.batch_size = topic_params.nvdm_batch_size
self.x = x
self.mask = mask
self.label_ids = label_ids
self.n_labels = n_labels
#self.x = tf.placeholder(tf.float32, [None, self.vocab_size], name='x')
#self.mask = tf.placeholder(tf.float32, [None], name='mask') # mask paddings
#if topic_params.use_sent_topic_rep:
#self.x_sent = tf.placeholder(tf.float32, [None, None, self.vocab_size], name='x_sent')
#if topic_params.use_topic_embedding:
# self.x_doc_mask = tf.placeholder(tf.float32, [None, self.vocab_size], name='x_doc_mask')
#self.input_batch_size = tf.placeholder(tf.int32, (), name='input_batch_size')
self.input_batch_size = tf.shape(self.x)[0]
#if topic_params.use_sent_topic_rep:
# self.input_batch_size_sent = tf.shape(self.x_sent)[0]
# self.input_batch_len_sent = tf.shape(self.x_sent)[1]
# self.batch_size_sent = self.input_batch_size_sent * self.input_batch_len_sent
# encoder
with tf.variable_scope('TM_encoder', reuse=tf.AUTO_REUSE):
self.enc_vec = utils.mlp(self.x, [self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
self.mean = utils.nvdm_linear(self.enc_vec,
self.n_topic,
scope='mean',
matrix_initializer=initializer_nvdm[1][0],
bias_initializer=initializer_nvdm[1][1])
self.logsigm = utils.nvdm_linear(self.enc_vec,
self.n_topic,
bias_start_zero=True,
matrix_start_zero=True,
scope='logsigm',
matrix_initializer=initializer_nvdm[2][0],
bias_initializer=initializer_nvdm[2][1])
self.kld = -0.5 * tf.reduce_sum(1 - tf.square(self.mean) + 2 * self.logsigm - tf.exp(2 * self.logsigm), 1)
#self.kld = self.mask*self.kld # mask paddings
self.kld = tf.multiply(self.mask, self.kld, name='kld') # mask paddings
#if topic_params.use_sent_topic_rep:
# self.x_sent_reshape = tf.reshape(self.x_sent, [-1, self.vocab_size])
# self.enc_vec_sent = utils.mlp(self.x_sent_reshape, [self.n_hidden], self.non_linearity)
# #self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
# self.mean_sent = utils.nvdm_linear(self.enc_vec_sent, self.n_topic, scope='mean')
# self.logsigm_sent = utils.nvdm_linear(self.enc_vec_sent,
# self.n_topic,
# bias_start_zero=True,
# matrix_start_zero=True,
# scope='logsigm')
#if topic_params.prior_emb_for_topics:
# W_prior = tf.get_variable(
# 'embeddings_TM_prior',
# dtype=tf.float32,
# initializer=prior_embeddings,
# trainable=False
# )
"""
W_prior_proj = tf.get_variable(
'embeddings_TM_prior_proj',
[prior_embeddings.shape[1], self.n_topic],
dtype=tf.float32,
trainable=False
)
W_prior = tf.matmul(W_prior, W_prior_proj, name='W_prior_projected')
"""
with tf.variable_scope('TM_decoder', reuse=tf.AUTO_REUSE):
if self.n_sample == 1:
eps = tf.random_normal((self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
#doc_vec = tf.mul(tf.exp(self.logsigm), eps) + self.mean
## Hidden representation to be used in BERT
self.doc_vec = tf.add(tf.multiply(tf.exp(self.logsigm), eps), self.mean, name='doc_hidden')
self.doc_vec = tf.nn.softmax(self.doc_vec, axis = 1)
self.last_h = self.doc_vec
logits_projected, self.decoding_matrix = utils.nvdm_linear(self.doc_vec,
self.vocab_size,
scope='projection',
get_matrix=True,
matrix_initializer=initializer_nvdm[3][0],
bias_initializer=initializer_nvdm[3][1])
logits = tf.nn.log_softmax(logits_projected)
self.recons_loss = -tf.reduce_sum(tf.multiply(logits, self.x), 1)
sup_logits = utils.nvdm_linear(self.doc_vec, self.n_labels, scope='supervised')
if topic_params.multilabel:
self.sup_prob = tf.nn.sigmoid(sup_logits)
self.supervised_loss = tf.multiply(self.mask, tf.reduce_sum(tf.losses.sigmoid_cross_entropy(self.label_ids, sup_logits , reduction="none"), axis=-1))
else:
self.sup_prob = tf.nn.softmax(sup_logits, axis=-1)
log_prob = tf.nn.log_softmax(sup_logits)
self.one_hot_labels = tf.one_hot(self.label_ids, depth=n_labels, on_value = 1.0, off_value = 0.0, dtype=tf.float32)
self.supervised_loss = -tf.reduce_sum(tf.multiply(log_prob, self.one_hot_labels), 1)
"""
if topic_params.use_topic_embedding:
#self.last_h_topic_emb = utils.nvdm_linear(tf.nn.softmax(self.last_h, axis=1), self.vocab_size, scope='projection')
#self.top_k = tf.nn.top_k(self.decoding_matrix, k=topic_params.use_k_topic_words, sorted=False)
topics_masked = tf.multiply(tf.expand_dims(self.x_doc_mask, axis=1), tf.expand_dims(self.decoding_matrix, axis=0), name='topics_masked')
self.top_k = tf.nn.top_k(topics_masked, k=topic_params.use_k_topic_words, sorted=False)
if topic_params.prior_emb_for_topics:
self.top_k_embeddings = tf.nn.embedding_lookup(W_prior, self.top_k.indices)
self.topic_emb_size = prior_embeddings.shape[1]
#self.topic_emb_size = prior_embeddings.shape[1] * topic_params.use_k_topics
#self.topic_emb_size = prior_embeddings.shape[1] + self.n_topic
#self.topic_emb_size = self.n_topic
#self.topic_emb_size = self.n_topic * 2
else:
self.top_k_embeddings = tf.nn.embedding_lookup(tf.transpose(self.decoding_matrix), self.top_k.indices)
#self.topic_emb_size = self.n_topic
self.topic_emb_size = self.n_topic * 2
#self.top_k_embeddings = tf.multiply(tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=1), axis=2), self.top_k_embeddings)
#self.temp_1 = tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=2), axis=2)
#self.topic_embeddings = tf.squeeze(tf.matmul(self.temp_1, self.top_k_embeddings), axis=2, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_sum(self.top_k_embeddings, axis=1, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=1, name='topic_embeddings')
self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=2, name='topic_embeddings')
if topic_params.use_k_topics > 0:
# Masking document topic proportion vector
top_k_h_values, top_k_h_indices = tf.nn.top_k(self.last_h, k=topic_params.use_k_topics, sorted=False, name='top_k_h')
row_numbers = tf.tile(tf.expand_dims(tf.range(0, self.input_batch_size), 1), [1, topic_params.use_k_topics], name='row_numbers')
full_indices = tf.concat([tf.expand_dims(row_numbers, -1), tf.expand_dims(top_k_h_indices, -1)], axis=2)
full_indices = tf.reshape(full_indices, [-1, 2], name='full_indices')
#mask_updates = tf.ones([self.input_batch_size * topic_params.use_k_topics], dtype=tf.float32, name='mask_updates')
#new_mask = tf.scatter_nd(full_indices, mask_updates, [self.input_batch_size, self.n_topic], name='new_mask')
#last_h_softmax = tf.multiply(tf.nn.softmax(self.last_h, axis=1), new_mask, name='last_h_softmax')
last_h_softmax = tf.scatter_nd(
full_indices,
tf.reshape(tf.nn.softmax(top_k_h_values, axis=1), [-1]),
#tf.ones([self.input_batch_size * topic_params.use_k_topics], dtype=tf.float32),
[self.input_batch_size, self.n_topic],
name='last_h_softmax'
)
else:
last_h_softmax = tf.nn.softmax(self.last_h, axis=1, name='last_h_softmax')
#last_h_softmax = self.last_h
#self.last_h_topic_emb = tf.matmul(last_h_softmax, self.topic_embeddings, name='last_h_topic_emb')
self.last_h_topic_emb = tf.squeeze(tf.matmul(tf.expand_dims(last_h_softmax, axis=1), self.topic_embeddings), axis=1, name='last_h_topic_emb')
#temp = tf.nn.embedding_lookup(self.topic_embeddings, top_k_h_indices)
#self.last_h_topic_emb = tf.reduce_sum(temp, axis=1, name='last_h_topic_emb')
#self.last_h_topic_emb = tf.reshape(temp, [self.input_batch_size, self.topic_emb_size], name='last_h_topic_emb')
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, last_h_softmax], axis=1)
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, self.last_h], axis=1)
"""
else:
eps = tf.random_normal((self.n_sample*self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
eps_list = tf.split(eps, self.n_sample, 0)
recons_loss_list = []
doc_vec_list = []
for i in range(self.n_sample):
if i > 0: tf.get_variable_scope().reuse_variables()
curr_eps = eps_list[i]
doc_vec = tf.add(tf.multiply(tf.exp(self.logsigm), curr_eps), self.mean)
doc_vec = tf.nn.softmax(doc_vec, axis=1)
doc_vec_list.append(doc_vec)
logits = tf.nn.log_softmax(utils.nvdm_linear(doc_vec, self.vocab_size, scope='projection'))
recons_loss_list.append(-tf.reduce_sum(tf.multiply(logits, self.x), 1))
self.recons_loss = tf.add_n(recons_loss_list) / self.n_sample
self.doc_vec = tf.add_n(doc_vec_list) / self.n_sample
self.last_h = self.doc_vec
sup_logits = utils.nvdm_linear(self.doc_vec, self.n_labels, scope='supervised')
if topic_params.multilabel:
self.sup_prob = tf.nn.sigmoid(sup_logits)
self.supervised_loss = tf.multiply(self.mask, tf.reduce_sum(tf.losses.sigmoid_cross_entropy(self.label_ids, sup_logits , reduction="none"), axis=-1))
else:
self.sup_prob = tf.nn.softmax(sup_logits, axis=-1)
log_prob = tf.nn.log_softmax(sup_logits)
self.one_hot_labels = tf.one_hot(self.label_ids, depth=n_labels, on_value = 1.0, off_value = 0.0, dtype=tf.float32)
self.supervised_loss = -tf.reduce_sum(tf.multiply(log_prob, self.one_hot_labels), 1)
""""
if topic_params.use_sent_topic_rep:
if self.n_sample == 1:
eps_sent = tf.random_normal((self.batch_size_sent, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
self.last_h_sent = tf.add(tf.multiply(tf.exp(self.logsigm_sent), eps_sent), self.mean_sent, name='sent_hidden')
self.last_h_sent = tf.reshape(self.last_h_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.n_topic])
if topic_params.use_topic_embedding:
#self.last_h_topic_emb_sent = utils.nvdm_linear(tf.nn.softmax(self.last_h_sent, axis=1), self.vocab_size, scope='projection')
if topic_params.use_k_topics > 0:
# Masking sentence topic proportion vector
top_k_h_sent_values, top_k_h_sent_indices = tf.nn.top_k(self.last_h_sent, k=topic_params.use_k_topics, sorted=False, name='top_k_h_sent')
row_numbers_sent = tf.tile(tf.expand_dims(tf.range(0, self.batch_size_sent), 1), [1, topic_params.use_k_topics], name='row_numbers_sent')
full_indices_sent = tf.concat([tf.expand_dims(row_numbers_sent, -1), tf.expand_dims(top_k_h_sent_indices, -1)], axis=2)
full_indices_sent = tf.reshape(full_indices_sent, [-1, 2], name='full_indices_sent')
#mask_updates_sent = tf.ones([self.batch_size_sent * topic_params.use_k_topics], dtype=tf.float32, name='mask_updates_sent')
#new_mask_sent = tf.scatter_nd(full_indices_sent, mask_updates_sent, [self.batch_size_sent, self.n_topic], name='new_mask_sent')
#last_h_softmax_sent = tf.multiply(tf.nn.softmax(self.last_h_sent, axis=1), new_mask_sent, name='last_h_softmax_sent')
last_h_softmax_sent = tf.scatter_nd(full_indices_sent, tf.reshape(tf.nn.softmax(top_k_h_sent_values, axis=1), [-1]), [self.batch_size_sent, self.n_topic], name='last_h_softmax_sent')
else:
last_h_softmax_sent = tf.nn.softmax(self.last_h_sent, axis=2, name='last_h_softmax_sent')
self.last_h_topic_emb_sent = tf.matmul(last_h_softmax_sent, self.topic_embeddings, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, self.last_h_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, last_h_softmax_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.reshape(self.last_h_topic_emb_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.vocab_size])
else:
print("Error: model_NVDM.py - Decoder")
sys.exit()
"""
#self.objective_TM = self.recons_loss + self.kld
#self.objective_TM = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
#self.final_loss = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.unsupervised_loss = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.final_loss = tf.add((1-topic_params.beta)*self.unsupervised_loss, topic_params.beta*(self.supervised_loss), "TM_combined_loss")
self.objective_TM = tf.reduce_mean(self.final_loss)
"""
if topic_params.TM_uniqueness_loss:
## NVDM topic uniqueness loss
eye = tf.constant(np.eye(self.n_topic), dtype=tf.float32)
topicnorm = matrix / tf.sqrt(tf.reduce_sum(tf.square(self.decoding_matrix), 1, keepdims=True))
uniqueness = tf.reduce_max(tf.square(tf.matmul(topicnorm, tf.transpose(topicnorm)) - eye))
self.objective_TM += topic_params.alpha_uniqueness * uniqueness
"""
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
#fullvars = tf.trainable_variables()
#enc_vars = utils.variable_parser(fullvars, 'TM_encoder')
enc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_encoder')
#dec_vars = utils.variable_parser(fullvars, 'TM_decoder')
dec_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_decoder')
self.pretrain_saver = tf.train.Saver(enc_vars + dec_vars)
enc_grads = tf.gradients(self.objective_TM, enc_vars)
dec_grads = tf.gradients(self.objective_TM, dec_vars)
self.optim_enc = optimizer.apply_gradients(zip(enc_grads, enc_vars))
self.optim_dec = optimizer.apply_gradients(zip(dec_grads, dec_vars))
## Pretraining of NVDM-TM
def pretrain(self, dataset, topic_params, nvdm_datadir , session,
#training_epochs=1000, alternate_epochs=10):
#training_epochs=100, alternate_epochs=10):
training_epochs=20, alternate_epochs=10):
#training_epochs=1, alternate_epochs=1):
#log_dir = os.path.join(topic_params.model, 'logs_nvdm_pretrain')
#model_dir_ir_nvdm = os.path.join(topic_params.model, 'model_ir_nvdm_pretrain')
#model_dir_ppl_nvdm = os.path.join(topic_params.model, 'model_ppl_nvdm_pretrain')
log_dir = os.path.join(topic_params.output_dir, 'logs_nvdm_pretrain')
model_dir_ir_nvdm = os.path.join(topic_params.output_dir, 'model_ir_nvdm_pretrain')
model_dir_ppl_nvdm = os.path.join(topic_params.output_dir, 'model_ppl_nvdm_pretrain')
model_dir_f1_nvdm = os.path.join(topic_params.output_dir, 'model_f1_nvdm_pretrain')
#model_dir_supervised = os.path.join(topic_params.model, 'model_supervised_nvdm_pretrain')
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
if not os.path.isdir(model_dir_ir_nvdm):
os.mkdir(model_dir_ir_nvdm)
if not os.path.isdir(model_dir_ppl_nvdm):
os.mkdir(model_dir_ppl_nvdm)
#if not os.path.isdir(model_dir_supervised):
# os.mkdir(model_dir_supervised)
#train_url = os.path.join(topic_params.dataset, 'training_nvdm_docs_non_replicated.csv')
#dev_url = os.path.join(topic_params.dataset, 'validation_nvdm_docs_non_replicated.csv')
#test_url = os.path.join(topic_params.dataset, 'test_nvdm_docs_non_replicated.csv')
train_url = os.path.join(nvdm_datadir, 'training_nvdm_docs_non_replicated.csv')
dev_url = os.path.join(nvdm_datadir, 'validation_nvdm_docs_non_replicated.csv')
test_url = os.path.join(nvdm_datadir, 'test_nvdm_docs_non_replicated.csv')
train_set, train_count, train_ids, train_doc_ids = utils.data_set(train_url, topic_params)
test_set, test_count, test_ids, test_doc_ids = utils.data_set(test_url, topic_params)
dev_set, dev_count, dev_ids, dev_doc_ids = utils.data_set(dev_url, topic_params)
dev_batches = utils.create_batches(len(dev_set), self.batch_size, shuffle=False)
#dev_batches = utils.create_batches(len(dev_set), 512, shuffle=False)
test_batches = utils.create_batches(len(test_set), self.batch_size, shuffle=False)
#test_batches = utils.create_batches(len(test_set), 512, shuffle=False)
#training_labels = np.array(
# [[y] for y, _ in dataset.rows('training_nvdm_docs_non_replicated', num_epochs=1)]
#)
#validation_labels = np.array(
# [[y] for y, _ in dataset.rows('validation_nvdm_docs_non_replicated', num_epochs=1)]
#)
#test_labels = np.array(
# [[y] for y, _ in dataset.rows('test_nvdm_docs_non_replicated', num_epochs=1)]
#)
patience = topic_params.nvdm_patience
patience_count_ppl = 0
patience_count_f1 = 0
best_dev_ppl = np.inf
best_dev_f1 = -np.inf
best_val_nvdm_IR = -1.0
ppl_model = False
ir_model = False
f1_model = False
for epoch in range(training_epochs):
epoch_counter = epoch + 1
train_batches = utils.create_batches(len(train_set), self.batch_size, shuffle=True)
#train_batches = utils.create_batches(len(train_set), 512, shuffle=True)
#-------------------------------
# train
for switch in range(0, 2):
if switch == 0:
optim = self.optim_dec
print_mode = 'updating decoder'
else:
optim = self.optim_enc
print_mode = 'updating encoder'
for i in range(alternate_epochs):
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc = self.run_epoch(
train_batches,
train_set,
train_count,
topic_params,
session,
input_labels = train_ids,
optimizer=optim
)
print('| Epoch train: {:d} |'.format(epoch_counter),
print_mode, '{:d}'.format(i),
'| Corpus Perplexity: {:.5f}'.format(print_ppx), # perplexity for all docs
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc), # perplexity for per doc
'| KLD: {:.5}'.format(print_kld),
'| Supervised loss: {:.5f}'.format(print_sup_loss))
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_ppl_freq == 0:
ppl_model = True
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc = self.run_epoch(
dev_batches,
dev_set,
dev_count,
topic_params,
session,
input_labels = dev_ids
)
if print_ppx_perdoc < best_dev_ppl:
#if print_ppx_perdoc <= best_dev_ppl:
best_dev_ppl = print_ppx_perdoc
print("Saving best model.")
self.pretrain_saver.save(session, model_dir_ppl_nvdm + '/model_ppl_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
patience_count_ppl = 0
else:
patience_count_ppl += 1
print('| Epoch dev: {:d} |'.format(epoch_counter),
'| Corpus Perplexity: {:.9f} |'.format(print_ppx),
'| Per doc Perplexity: {:.5f} |'.format(print_ppx_perdoc),
'| KLD: {:.5} |'.format(print_kld),
'| Best dev PPL: {:.5} |'.format(best_dev_ppl))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('| Epoch Val: {:d} || Val Corpus PPL: {:.9f} || Val Per doc PPL: {:.5f} || Best Val PPL: {:.5} || KLD Val: {:.5} |\n'.format(epoch+1, print_ppx, print_ppx_perdoc, best_dev_ppl, print_kld))
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_f1_freq == 0:
f1_model = True
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc = self.run_epoch(
dev_batches,
dev_set,
dev_count,
topic_params,
session,
input_labels = dev_ids
)
if print_macro_f1_score > best_dev_f1:
best_dev_f1 = print_macro_f1_score
print("Saving best model.")
self.pretrain_saver.save(session, model_dir_f1_nvdm + '/model_f1_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
patience_count_f1 = 0
else:
patience_count_f1 += 1
print('| Epoch dev: {:d} |'.format(epoch_counter),
'| Macro F1 : {:.9f} |'.format(print_macro_f1_score),
'| Macro Prec: {:.5f} |'.format(print_macro_prec),
'| Macro Recall: {:.5} |'.format(print_macro_recall),
'| Best F1: {:.5} |'.format(best_dev_f1))
with open(log_dir + "/logs_f1_nvdm_pretrain.txt", "a") as f:
f.write('| Epoch Val: {:d} || Macro F1: {:.9f} || Macro Prec: {:.5f} || Macro Recall: {:.5} || Best Macro F1: {:.5} || Accuracy: {:.5} |\n'.format(epoch+1, print_macro_f1_score, print_macro_prec, print_macro_recall, best_dev_f1 , print_acc))
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_ir_freq == 0:
ir_model = True
validation_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('validation_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('validation_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
training_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('training_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('training_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
val_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
validation_vectors_nvdm,
training_labels,
validation_labels,
recall=[0.02],
num_classes=topic_params.nvdm_num_classes,
multi_label=topic_params.multilabel
)
val_nvdm_ir = val_nvdm_ir[0]
# Saving model and Early stopping on IR
if val_nvdm_ir > best_val_nvdm_IR:
best_val_nvdm_IR = val_nvdm_ir
print('saving: {}'.format(model_dir_ir_nvdm))
self.pretrain_saver.save(session, model_dir_ir_nvdm + '/model_ir_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
# patience_count = 0
#else:
# patience_count += 1
print("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
if topic_params.validate_supervised_TM == "ppl":
if patience_count_ppl > patience:
print("Early stopping.")
break
elif topic_params.validate_supervised_TM == "f1":
if patience_count_f1 > patience:
print("Early stopping.")
break
if ppl_model:
print("Calculating Test PPL.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ppl_nvdm))
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc= self.run_epoch(
test_batches,
test_set,
test_count,
topic_params,
session,
input_labels = test_ids
)
print('| Corpus Perplexity: {:.9f}'.format(print_ppx),
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc),
'| KLD: {:.5}'.format(print_kld))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('\n\nTest Corpus PPL: {:.9f} || Test Per doc PPL: {:.5f} || KLD Test: {:.5} |\n'.format(print_ppx, print_ppx_perdoc, print_kld))
if f1_model:
print("Calculating Test F1.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_f1_nvdm))
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc = self.run_epoch(
test_batches,
test_set,
test_count,
topic_params,
session,
input_labels = test_ids
)
print('| Macro F1: {:.9f}'.format(print_macro_f1_score),
'| Macro prec: {:.5f}'.format(print_macro_prec),
'| Macro recall : {:.5}'.format(print_macro_recall),
'| Acc : {:.5}'.format(print_acc)
)
with open(log_dir + "/logs_f1_nvdm_pretrain.txt", "a") as f:
f.write('\n\nTest Macro F1: {:.9f} || Test Macro prec : {:.5f} || Test Macro recall : {:.5} || Test Acc : {:.5} |\n'.format(print_macro_f1_score, print_macro_prec, print_macro_recall, print_acc ))
if ir_model:
print("Calculating Test IR.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ir_nvdm))
test_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('test_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('test_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
test_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
test_vectors_nvdm,
training_labels,
test_labels,
recall=[0.02],
num_classes=topic_params.nvdm_num_classes,
multi_label=topic_params.multilabel
)
test_nvdm_ir = test_nvdm_ir[0]
print("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
def hidden_vectors(self, data, topic_params, session):
vecs = []
for y, x, count, mask in data:
feed_dict = {
self.x.name: x,
self.mask.name: mask
#self.input_batch_size: x.shape[0]
}
vecs.extend(
session.run([self.last_h], feed_dict=feed_dict)[0]
)
return np.array(vecs)
def run_epoch(self, input_batches, input_set, input_count, topic_params, session, input_labels = None, optimizer=None):
loss_sum = 0.0
ppx_sum = 0.0
kld_sum = 0.0
supervised_loss_sum = 0.0
word_count = 0
doc_count = 0
doc_pred = []
doc_labels = []
for idx_batch in input_batches:
data_batch, count_batch, mask, label_batch = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size, topic_params, labels = input_labels)
#import pdb; pdb.set_trace()
input_feed = {self.x.name: data_batch,
self.mask.name: mask,
self.label_ids.name: label_batch}
if not optimizer is None:
_, (loss, kld, supervised_loss, prob) = session.run((optimizer,
[self.unsupervised_loss, self.kld, self.supervised_loss, self.sup_prob]),
input_feed)
else:
loss, kld, supervised_loss, prob = session.run([self.unsupervised_loss, self.kld, self.supervised_loss, self.sup_prob],
input_feed)
if topic_params.multilabel:
prob_arr = np.asarray(prob)
multilabel_pred = np.where(prob_arr >= 0.5, 1, 0)
pred = np.ndarray.tolist(multilabel_pred)
else:
pred = np.argmax(prob, axis = 1)
assert len(pred) == len(label_batch) == len(mask)
for i in range(len(mask)):
if mask[i] != 0.0:
doc_pred.append(pred[i])
doc_labels.append(label_batch[i])
loss_sum += np.sum(loss)
kld_sum += np.sum(kld) / np.sum(mask)
supervised_loss_sum += np.sum(supervised_loss) / np.sum(mask)
word_count += np.sum(count_batch)
# to avoid nan error
count_batch = np.add(count_batch, 1e-12)
# per document loss
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
assert -1 not in doc_labels
if topic_params.multilabel:
doc_labels = np.asarray(doc_labels)
doc_pred = np.asarray(doc_pred)
print_macro_prec, print_macro_recall, print_macro_f1_score, _ = precision_recall_fscore_support(doc_labels, doc_pred, average = "macro")
#print_micro_prec, print_micro_recall, print_micro_f1_score, _ = precision_recall_fscore_support(doc_labels, doc_pred, average = "micro")
print_acc = accuracy_score(doc_labels, doc_pred)
print_sup_loss = supervised_loss_sum/len(input_batches)
print_ppx = np.exp(loss_sum / word_count)
print_ppx_perdoc = np.exp(ppx_sum / doc_count)
print_kld = kld_sum/len(input_batches)
return print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc
"""
def topic_dist(self, input_batches, input_set, input_count, topic_params, session):
topic_dist = []
mask_list = []
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size)
input_feed = {self.x.name: data_batch,
self.mask.name: mask}
doc_vec = session.run([self.doc_vec], input_feed)
topic_dist.extend(list(doc_vec[0]))
mask_list.extend(list(mask))
topic_dist_unique = []
for num, m in enumerate(mask_list):
if m!= 0.0:
topic_dist_unique.append(topic_dist[num])
topic_dist_unique = np.asarray(topic_dist_unique)
return topic_dist_unique, mask_list
"""
def topic_dist(self, input_batches, input_set, input_doc_ids , input_count, topic_params, session):
topic_dist = []
mask_list = []
doc_id_list = []
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size, topic_params)
input_feed = {self.x.name: data_batch,
self.mask.name: mask}
doc_vec = session.run([self.doc_vec], input_feed)
topic_dist.extend(list(doc_vec[0]))
mask_list.extend(list(mask))
for idx in idx_batch:
if idx != -1:
doc_id_list.append(input_doc_ids[idx])
else:
doc_id_list.append(-1)
assert len(topic_dist) == len(doc_id_list)
topic_dist_unique = {}
for id, dist in zip(doc_id_list, topic_dist):
if id != -1:
topic_dist_unique[str(id)] = dist
return topic_dist_unique, mask_list
def save_to_s3_TM(self, topic_params):
pass
def run_epoch_v2(self, data, topic_params, session):
# train_y, train_x, train_count, train_mask = dataset.batches_nvdm_LM(training_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
# val_y, val_x, val_count, val_mask = dataset.batches_nvdm_LM(validation_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
# test_y, test_x, test_count, test_mask = dataset.batches_nvdm_LM(test_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
kld_sum = []
this_nvdm_loss_normed = []
this_nvdm_loss_unnormed = []
this_nvdm_words = []
for nvdm_y, nvdm_x, nvdm_count, nvdm_mask in data:
nvdm_feed_dict = {
model.topic_model.x.name: nvdm_x,
model.topic_model.mask.name: nvdm_mask#,
#model.topic_model.input_batch_size: nvdm_x.shape[0]
}
if topic_params.supervised:
sys.exit()
else:
loss, kld = session.run([model.topic_model.final_loss,
model.topic_model.kld],
feed_dict=nvdm_feed_dict)
nvdm_count = np.add(nvdm_count, 1e-12)
this_nvdm_loss_normed.extend(np.divide(loss, nvdm_count))
this_nvdm_loss_unnormed.extend(loss)
this_nvdm_words.append(np.sum(nvdm_count))
kld_sum.append(np.sum(kld) / np.sum(nvdm_mask))
total_nvdm_nll = np.mean(this_nvdm_loss_unnormed)
#total_nvdm_ppl = np.exp(np.sum(this_nvdm_loss_unnormed) / np.sum(this_val_nvdm_words))
total_nvdm_ppl = np.exp(np.mean(this_nvdm_loss_normed))
print_kld = np.mean(kld_sum)
return total_nvdm_nll, total_nvdm_ppl, print_kld
| [] |
2024-01-10 | YatinChaudhary/TopicBERT | TopicBERT~topic_bert~nvdm~model_NVDM_yatin.py | """NVDM Tensorflow implementation by Yishu Miao"""
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
import os
import model.utils as utils
from sklearn.preprocessing import MultiLabelBinarizer
import sklearn.metrics.pairwise as pw
from gensim.models import CoherenceModel
from gensim.corpora.dictionary import Dictionary
import model.evaluate as eval
import model.data_lstm as data
seed = 42
tf_op_seed = 1234
np.random.seed(seed)
tf.set_random_seed(seed)
#learning_rate = 5e-5
#batch_size = 64
#n_hidden = 256
n_topic = 150
n_sample = 1
non_linearity = tf.nn.tanh
class NVDM(object):
""" Neural Variational Document Model -- BOW VAE.
"""
def __init__(self, params, prior_embeddings=None, initializer_nvdm=None):
self.vocab_size = params.TM_vocab_length
self.n_hidden = params.hidden_size_TM
self.n_topic = n_topic
self.n_sample = n_sample
self.non_linearity = non_linearity
self.learning_rate = params.learning_rate
self.batch_size = params.batch_size
self.x = tf.placeholder(tf.float32, [None, self.vocab_size], name='x')
self.mask = tf.placeholder(tf.float32, [None], name='mask') # mask paddings
if params.use_sent_topic_rep:
self.x_sent = tf.placeholder(tf.float32, [None, None, self.vocab_size], name='x_sent')
if params.use_topic_embedding:
self.x_doc_mask = tf.placeholder(tf.float32, [None, self.vocab_size], name='x_doc_mask')
#self.input_batch_size = tf.placeholder(tf.int32, (), name='input_batch_size')
self.input_batch_size = tf.shape(self.x)[0]
if params.use_sent_topic_rep:
self.input_batch_size_sent = tf.shape(self.x_sent)[0]
self.input_batch_len_sent = tf.shape(self.x_sent)[1]
self.batch_size_sent = self.input_batch_size_sent * self.input_batch_len_sent
# encoder
with tf.variable_scope('TM_encoder', reuse=tf.AUTO_REUSE):
self.enc_vec = utils.mlp(self.x, [self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
self.mean = utils.nvdm_linear(self.enc_vec,
self.n_topic,
scope='mean',
matrix_initializer=initializer_nvdm[1][0],
bias_initializer=initializer_nvdm[1][1])
self.logsigm = utils.nvdm_linear(self.enc_vec,
self.n_topic,
bias_start_zero=True,
matrix_start_zero=True,
scope='logsigm',
matrix_initializer=initializer_nvdm[2][0],
bias_initializer=initializer_nvdm[2][1])
self.kld = -0.5 * tf.reduce_sum(1 - tf.square(self.mean) + 2 * self.logsigm - tf.exp(2 * self.logsigm), 1)
#self.kld = self.mask*self.kld # mask paddings
self.kld = tf.multiply(self.mask, self.kld, name='kld') # mask paddings
if params.use_sent_topic_rep:
self.x_sent_reshape = tf.reshape(self.x_sent, [-1, self.vocab_size])
self.enc_vec_sent = utils.mlp(self.x_sent_reshape, [self.n_hidden], self.non_linearity)
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
self.mean_sent = utils.nvdm_linear(self.enc_vec_sent, self.n_topic, scope='mean')
self.logsigm_sent = utils.nvdm_linear(self.enc_vec_sent,
self.n_topic,
bias_start_zero=True,
matrix_start_zero=True,
scope='logsigm')
if params.prior_emb_for_topics:
W_prior = tf.get_variable(
'embeddings_TM_prior',
dtype=tf.float32,
initializer=prior_embeddings,
trainable=False
)
"""
W_prior_proj = tf.get_variable(
'embeddings_TM_prior_proj',
[prior_embeddings.shape[1], self.n_topic],
dtype=tf.float32,
trainable=False
)
W_prior = tf.matmul(W_prior, W_prior_proj, name='W_prior_projected')
"""
with tf.variable_scope('TM_decoder', reuse=tf.AUTO_REUSE):
if self.n_sample == 1:
eps = tf.random_normal((self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
#doc_vec = tf.mul(tf.exp(self.logsigm), eps) + self.mean
self.doc_vec = tf.add(tf.multiply(tf.exp(self.logsigm), eps), self.mean, name='doc_hidden')
self.last_h = self.doc_vec
logits_projected, self.decoding_matrix = utils.nvdm_linear(self.doc_vec,
self.vocab_size,
scope='projection',
get_matrix=True,
matrix_initializer=initializer_nvdm[3][0],
bias_initializer=initializer_nvdm[3][1])
logits = tf.nn.log_softmax(logits_projected)
self.recons_loss = -tf.reduce_sum(tf.multiply(logits, self.x), 1)
if params.use_topic_embedding:
#self.last_h_topic_emb = utils.nvdm_linear(tf.nn.softmax(self.last_h, axis=1), self.vocab_size, scope='projection')
#self.top_k = tf.nn.top_k(self.decoding_matrix, k=params.use_k_topic_words, sorted=False)
topics_masked = tf.multiply(tf.expand_dims(self.x_doc_mask, axis=1), tf.expand_dims(self.decoding_matrix, axis=0), name='topics_masked')
self.top_k = tf.nn.top_k(topics_masked, k=params.use_k_topic_words, sorted=False)
if params.prior_emb_for_topics:
self.top_k_embeddings = tf.nn.embedding_lookup(W_prior, self.top_k.indices)
self.topic_emb_size = prior_embeddings.shape[1]
#self.topic_emb_size = prior_embeddings.shape[1] * params.use_k_topics
#self.topic_emb_size = prior_embeddings.shape[1] + self.n_topic
#self.topic_emb_size = self.n_topic
#self.topic_emb_size = self.n_topic * 2
else:
self.top_k_embeddings = tf.nn.embedding_lookup(tf.transpose(self.decoding_matrix), self.top_k.indices)
#self.topic_emb_size = self.n_topic
self.topic_emb_size = self.n_topic * 2
#self.top_k_embeddings = tf.multiply(tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=1), axis=2), self.top_k_embeddings)
#self.temp_1 = tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=2), axis=2)
#self.topic_embeddings = tf.squeeze(tf.matmul(self.temp_1, self.top_k_embeddings), axis=2, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_sum(self.top_k_embeddings, axis=1, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=1, name='topic_embeddings')
self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=2, name='topic_embeddings')
if params.use_k_topics > 0:
# Masking document topic proportion vector
top_k_h_values, top_k_h_indices = tf.nn.top_k(self.last_h, k=params.use_k_topics, sorted=False, name='top_k_h')
row_numbers = tf.tile(tf.expand_dims(tf.range(0, self.input_batch_size), 1), [1, params.use_k_topics], name='row_numbers')
full_indices = tf.concat([tf.expand_dims(row_numbers, -1), tf.expand_dims(top_k_h_indices, -1)], axis=2)
full_indices = tf.reshape(full_indices, [-1, 2], name='full_indices')
#mask_updates = tf.ones([self.input_batch_size * params.use_k_topics], dtype=tf.float32, name='mask_updates')
#new_mask = tf.scatter_nd(full_indices, mask_updates, [self.input_batch_size, self.n_topic], name='new_mask')
#last_h_softmax = tf.multiply(tf.nn.softmax(self.last_h, axis=1), new_mask, name='last_h_softmax')
last_h_softmax = tf.scatter_nd(
full_indices,
tf.reshape(tf.nn.softmax(top_k_h_values, axis=1), [-1]),
#tf.ones([self.input_batch_size * params.use_k_topics], dtype=tf.float32),
[self.input_batch_size, self.n_topic],
name='last_h_softmax'
)
else:
last_h_softmax = tf.nn.softmax(self.last_h, axis=1, name='last_h_softmax')
#last_h_softmax = self.last_h
#self.last_h_topic_emb = tf.matmul(last_h_softmax, self.topic_embeddings, name='last_h_topic_emb')
self.last_h_topic_emb = tf.squeeze(tf.matmul(tf.expand_dims(last_h_softmax, axis=1), self.topic_embeddings), axis=1, name='last_h_topic_emb')
#temp = tf.nn.embedding_lookup(self.topic_embeddings, top_k_h_indices)
#self.last_h_topic_emb = tf.reduce_sum(temp, axis=1, name='last_h_topic_emb')
#self.last_h_topic_emb = tf.reshape(temp, [self.input_batch_size, self.topic_emb_size], name='last_h_topic_emb')
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, last_h_softmax], axis=1)
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, self.last_h], axis=1)
else:
#eps = tf.random_normal((self.n_sample*self.batch_size, self.n_topic), mean=0.0, stddev=1.0)
eps = tf.random_normal((self.n_sample*self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
eps_list = tf.split(0, self.n_sample, eps)
recons_loss_list = []
for i in range(self.n_sample):
if i > 0: tf.get_variable_scope().reuse_variables()
curr_eps = eps_list[i]
doc_vec = tf.multiply(tf.exp(self.logsigm), curr_eps) + self.mean
logits = tf.nn.log_softmax(utils.nvdm_linear(doc_vec, self.vocab_size, scope='projection'))
recons_loss_list.append(-tf.reduce_sum(tf.multiply(logits, self.x), 1))
self.recons_loss = tf.add_n(recons_loss_list) / self.n_sample
if params.use_sent_topic_rep:
if self.n_sample == 1:
eps_sent = tf.random_normal((self.batch_size_sent, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
self.last_h_sent = tf.add(tf.multiply(tf.exp(self.logsigm_sent), eps_sent), self.mean_sent, name='sent_hidden')
self.last_h_sent = tf.reshape(self.last_h_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.n_topic])
if params.use_topic_embedding:
#self.last_h_topic_emb_sent = utils.nvdm_linear(tf.nn.softmax(self.last_h_sent, axis=1), self.vocab_size, scope='projection')
if params.use_k_topics > 0:
# Masking sentence topic proportion vector
top_k_h_sent_values, top_k_h_sent_indices = tf.nn.top_k(self.last_h_sent, k=params.use_k_topics, sorted=False, name='top_k_h_sent')
row_numbers_sent = tf.tile(tf.expand_dims(tf.range(0, self.batch_size_sent), 1), [1, params.use_k_topics], name='row_numbers_sent')
full_indices_sent = tf.concat([tf.expand_dims(row_numbers_sent, -1), tf.expand_dims(top_k_h_sent_indices, -1)], axis=2)
full_indices_sent = tf.reshape(full_indices_sent, [-1, 2], name='full_indices_sent')
#mask_updates_sent = tf.ones([self.batch_size_sent * params.use_k_topics], dtype=tf.float32, name='mask_updates_sent')
#new_mask_sent = tf.scatter_nd(full_indices_sent, mask_updates_sent, [self.batch_size_sent, self.n_topic], name='new_mask_sent')
#last_h_softmax_sent = tf.multiply(tf.nn.softmax(self.last_h_sent, axis=1), new_mask_sent, name='last_h_softmax_sent')
last_h_softmax_sent = tf.scatter_nd(full_indices_sent, tf.reshape(tf.nn.softmax(top_k_h_sent_values, axis=1), [-1]), [self.batch_size_sent, self.n_topic], name='last_h_softmax_sent')
else:
last_h_softmax_sent = tf.nn.softmax(self.last_h_sent, axis=2, name='last_h_softmax_sent')
self.last_h_topic_emb_sent = tf.matmul(last_h_softmax_sent, self.topic_embeddings, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, self.last_h_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, last_h_softmax_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.reshape(self.last_h_topic_emb_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.vocab_size])
else:
print("Error: model_NVDM.py - Decoder")
sys.exit()
#self.objective_TM = self.recons_loss + self.kld
#self.objective_TM = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.final_loss = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.objective_TM = tf.reduce_mean(self.final_loss)
if params.TM_uniqueness_loss:
## NVDM topic uniqueness loss
eye = tf.constant(np.eye(self.n_topic), dtype=tf.float32)
topicnorm = matrix / tf.sqrt(tf.reduce_sum(tf.square(self.decoding_matrix), 1, keepdims=True))
uniqueness = tf.reduce_max(tf.square(tf.matmul(topicnorm, tf.transpose(topicnorm)) - eye))
self.objective_TM += params.alpha_uniqueness * uniqueness
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
#fullvars = tf.trainable_variables()
#enc_vars = utils.variable_parser(fullvars, 'TM_encoder')
enc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_encoder')
#dec_vars = utils.variable_parser(fullvars, 'TM_decoder')
dec_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_decoder')
enc_grads = tf.gradients(self.objective_TM, enc_vars)
dec_grads = tf.gradients(self.objective_TM, dec_vars)
self.optim_enc = optimizer.apply_gradients(zip(enc_grads, enc_vars))
self.optim_dec = optimizer.apply_gradients(zip(dec_grads, dec_vars))
## Pretraining of NVDM-TM
def pretrain(self, dataset, params, session,
#training_epochs=1000, alternate_epochs=10):
#training_epochs=100, alternate_epochs=10):
training_epochs=20, alternate_epochs=10):
#training_epochs=1, alternate_epochs=1):
log_dir = os.path.join(params.model, 'logs_nvdm_pretrain')
model_dir_ir_nvdm = os.path.join(params.model, 'model_ir_nvdm_pretrain')
model_dir_ppl_nvdm = os.path.join(params.model, 'model_ppl_nvdm_pretrain')
#model_dir_supervised = os.path.join(params.model, 'model_supervised_nvdm_pretrain')
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
if not os.path.isdir(model_dir_ir_nvdm):
os.mkdir(model_dir_ir_nvdm)
if not os.path.isdir(model_dir_ppl_nvdm):
os.mkdir(model_dir_ppl_nvdm)
#if not os.path.isdir(model_dir_supervised):
# os.mkdir(model_dir_supervised)
train_url = os.path.join(params.dataset, 'training_nvdm_docs_non_replicated.csv')
dev_url = os.path.join(params.dataset, 'validation_nvdm_docs_non_replicated.csv')
test_url = os.path.join(params.dataset, 'test_nvdm_docs_non_replicated.csv')
train_set, train_count = utils.data_set(train_url)
test_set, test_count = utils.data_set(test_url)
dev_set, dev_count = utils.data_set(dev_url)
#dev_batches = utils.create_batches(len(dev_set), self.batch_size, shuffle=False)
dev_batches = utils.create_batches(len(dev_set), 512, shuffle=False)
#test_batches = utils.create_batches(len(test_set), self.batch_size, shuffle=False)
test_batches = utils.create_batches(len(test_set), 512, shuffle=False)
training_labels = np.array(
[[y] for y, _ in dataset.rows('training_nvdm_docs_non_replicated', num_epochs=1)]
)
validation_labels = np.array(
[[y] for y, _ in dataset.rows('validation_nvdm_docs_non_replicated', num_epochs=1)]
)
test_labels = np.array(
[[y] for y, _ in dataset.rows('test_nvdm_docs_non_replicated', num_epochs=1)]
)
patience = params.pretrain_patience
patience_count = 0
best_dev_ppl = np.inf
best_test_ppl = np.inf
best_val_nvdm_IR = -1.0
best_test_nvdm_IR = -1.0
enc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_encoder')
dec_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_decoder')
pretrain_saver = tf.train.Saver(enc_vars + dec_vars)
ppl_model = False
ir_model = False
for epoch in range(training_epochs):
epoch_counter = epoch + 1
#train_batches = utils.create_batches(len(train_set), self.batch_size, shuffle=True)
train_batches = utils.create_batches(len(train_set), 512, shuffle=True)
#-------------------------------
# train
for switch in range(0, 2):
if switch == 0:
optim = self.optim_dec
print_mode = 'updating decoder'
else:
optim = self.optim_enc
print_mode = 'updating encoder'
for i in range(alternate_epochs):
print_ppx, print_ppx_perdoc, print_kld = self.run_epoch(
train_batches,
train_set,
train_count,
params,
session,
optimizer=optim
)
print('| Epoch train: {:d} |'.format(epoch_counter),
print_mode, '{:d}'.format(i),
'| Corpus Perplexity: {:.5f}'.format(print_ppx), # perplexity for all docs
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc), # perplexity for per doc
'| KLD: {:.5}'.format(print_kld))
if epoch_counter >= 1 and epoch_counter % params.nvdm_validation_ppl_freq == 0:
ppl_model = True
print_ppx, print_ppx_perdoc, print_kld = self.run_epoch(
dev_batches,
dev_set,
dev_count,
params,
session
)
if print_ppx_perdoc < best_dev_ppl:
best_dev_ppl = print_ppx_perdoc
print("Saving best model.")
pretrain_saver.save(session, model_dir_ppl_nvdm + '/model_ppl_nvdm_pretrain', global_step=1)
patience_count = 0
else:
patience_count += 1
print('| Epoch dev: {:d} |'.format(epoch_counter),
'| Corpus Perplexity: {:.9f} |'.format(print_ppx),
'| Per doc Perplexity: {:.5f} |'.format(print_ppx_perdoc),
'| KLD: {:.5} |'.format(print_kld),
'| Best dev PPL: {:.5} |'.format(best_dev_ppl))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('| Epoch Val: {:d} || Val Corpus PPL: {:.9f} || Val Per doc PPL: {:.5f} || Best Val PPL: {:.5} || KLD Val: {:.5} |\n'.format(epoch+1, print_ppx, print_ppx_perdoc, best_dev_ppl, print_kld))
if epoch_counter >= 1 and epoch_counter % params.nvdm_validation_ir_freq == 0:
ir_model = True
validation_vectors_nvdm = self.hidden_vectors(
dataset.batches_nvdm_LM('validation_nvdm_docs_non_replicated', params.batch_size, params.TM_vocab_length, num_epochs=1, multilabel=params.multi_label),
params,
session
)
training_vectors_nvdm = self.hidden_vectors(
dataset.batches_nvdm_LM('training_nvdm_docs_non_replicated', params.batch_size, params.TM_vocab_length, num_epochs=1, multilabel=params.multi_label),
params,
session
)
val_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
validation_vectors_nvdm,
training_labels,
validation_labels,
recall=[0.02],
num_classes=params.num_classes,
multi_label=params.multi_label
)
val_nvdm_ir = val_nvdm_ir[0]
# Saving model and Early stopping on IR
if val_nvdm_ir > best_val_nvdm_IR:
best_val_nvdm_IR = val_nvdm_ir
print('saving: {}'.format(model_dir_ir_nvdm))
pretrain_saver.save(session, model_dir_ir_nvdm + '/model_ir_nvdm_pretrain', global_step=1)
# patience_count = 0
#else:
# patience_count += 1
print("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
if patience_count > patience:
print("Early stopping.")
break
if ppl_model:
print("Calculating Test PPL.")
pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ppl_nvdm))
print_ppx, print_ppx_perdoc, print_kld = self.run_epoch(
test_batches,
test_set,
test_count,
params,
session
)
print('| Corpus Perplexity: {:.9f}'.format(print_ppx),
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc),
'| KLD: {:.5}'.format(print_kld))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('\n\nTest Corpus PPL: {:.9f} || Test Per doc PPL: {:.5f} || KLD Test: {:.5} |\n'.format(print_ppx, print_ppx_perdoc, print_kld))
if ir_model:
print("Calculating Test IR.")
pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ir_nvdm))
test_vectors_nvdm = self.hidden_vectors(
dataset.batches_nvdm_LM('test_nvdm_docs_non_replicated', params.batch_size, params.TM_vocab_length, num_epochs=1, multilabel=params.multi_label),
params,
session
)
test_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
test_vectors_nvdm,
training_labels,
test_labels,
recall=[0.02],
num_classes=params.num_classes,
multi_label=params.multi_label
)
test_nvdm_ir = test_nvdm_ir[0]
print("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
def hidden_vectors(self, data, params, session):
vecs = []
for y, x, count, mask in data:
feed_dict = {
self.x.name: x,
self.mask.name: mask#,
#self.input_batch_size: x.shape[0]
}
vecs.extend(
session.run([self.last_h], feed_dict=feed_dict)[0]
)
return np.array(vecs)
def run_epoch(self, input_batches, input_set, input_count, params, session, optimizer=None):
loss_sum = 0.0
ppx_sum = 0.0
kld_sum = 0.0
word_count = 0
doc_count = 0
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size)
#import pdb; pdb.set_trace()
input_feed = {self.x.name: data_batch, self.mask.name: mask}#,
#self.input_batch_size: data_batch.shape[0]}
if not optimizer is None:
_, (loss, kld) = session.run((optimizer,
[self.final_loss, self.kld]),
input_feed)
else:
loss, kld = session.run([self.final_loss, self.kld],
input_feed)
#import pdb; pdb.set_trace()
loss_sum += np.sum(loss)
kld_sum += np.sum(kld) / np.sum(mask)
word_count += np.sum(count_batch)
# to avoid nan error
count_batch = np.add(count_batch, 1e-12)
# per document loss
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
print_ppx = np.exp(loss_sum / word_count)
print_ppx_perdoc = np.exp(ppx_sum / doc_count)
print_kld = kld_sum/len(input_batches)
return print_ppx, print_ppx_perdoc, print_kld
def run_epoch_v2(self, data, params, session):
# train_y, train_x, train_count, train_mask = dataset.batches_nvdm_LM(training_data_filename_TM, params.batch_size, params.TM_vocab_length, num_epochs=1, multilabel=params.multi_label)
# val_y, val_x, val_count, val_mask = dataset.batches_nvdm_LM(validation_data_filename_TM, params.batch_size, params.TM_vocab_length, num_epochs=1, multilabel=params.multi_label)
# test_y, test_x, test_count, test_mask = dataset.batches_nvdm_LM(test_data_filename_TM, params.batch_size, params.TM_vocab_length, num_epochs=1, multilabel=params.multi_label)
kld_sum = []
this_nvdm_loss_normed = []
this_nvdm_loss_unnormed = []
this_nvdm_words = []
for nvdm_y, nvdm_x, nvdm_count, nvdm_mask in data:
nvdm_feed_dict = {
model.topic_model.x.name: nvdm_x,
model.topic_model.mask.name: nvdm_mask#,
#model.topic_model.input_batch_size: nvdm_x.shape[0]
}
if params.supervised:
sys.exit()
else:
loss, kld = session.run([model.topic_model.final_loss,
model.topic_model.kld],
feed_dict=nvdm_feed_dict)
nvdm_count = np.add(nvdm_count, 1e-12)
this_nvdm_loss_normed.extend(np.divide(loss, nvdm_count))
this_nvdm_loss_unnormed.extend(loss)
this_nvdm_words.append(np.sum(nvdm_count))
kld_sum.append(np.sum(kld) / np.sum(nvdm_mask))
total_nvdm_nll = np.mean(this_nvdm_loss_unnormed)
#total_nvdm_ppl = np.exp(np.sum(this_nvdm_loss_unnormed) / np.sum(this_val_nvdm_words))
total_nvdm_ppl = np.exp(np.mean(this_nvdm_loss_normed))
print_kld = np.mean(kld_sum)
return total_nvdm_nll, total_nvdm_ppl, print_kld | [] |
2024-01-10 | YatinChaudhary/TopicBERT | TopicDistilBERT~src~model_TM~model_NVDM_TF2.py | from __future__ import print_function
import os, sys, csv
import numpy as np
import tensorflow.compat.v1 as tf1
import math, random
from collections import Counter
from sklearn.preprocessing import MultiLabelBinarizer
import sklearn.metrics.pairwise as pw
from gensim.models import CoherenceModel
from gensim.corpora.dictionary import Dictionary
#tf1.disable_v2_behavior()
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
seed = 42
tf_op_seed = 1234
random.seed(seed)
np.random.seed(seed)
#tf1.set_random_seed(seed)
def create_initializer(self, initializer_range=0.02):
return tf1.truncated_normal_initializer(stddev=initializer_range, seed=tf_op_seed)
def format_doc(doc):
new_doc_tokens = []
counts = Counter(doc.split())
for index, count in counts.items():
new_doc_tokens.append(str(index) + ":" + str(count))
new_doc = " ".join(new_doc_tokens)
return new_doc
def data_set(data_url):
data = []
word_count = []
fin = open(data_url)
csv_reader = csv.reader(fin, delimiter=",")
#while True:
# line = fin.readline()
for index, line in enumerate(csv_reader):
if not line:
break
line = format_doc(line[1].strip())
id_freqs = line.split()
doc = {}
count = 0
#for id_freq in id_freqs[1:]:
for id_freq in id_freqs:
items = id_freq.split(':')
# python starts from 0
#doc[int(items[0])-1] = int(items[1])
doc[int(items[0])] = int(items[1])
count += int(items[1])
if count > 0:
data.append(doc)
word_count.append(count)
fin.close()
return data, word_count
def get_initializers(scope_name, vars_dict):
matrix_var_name = scope_name + "/Matrix:0"
bias_var_name = scope_name + "/Bias:0"
if matrix_var_name in vars_dict:
matrix_initializer = vars_dict[matrix_var_name]
print("Matrix initialized for {}".format(scope_name))
else:
matrix_initializer = None
if bias_var_name in vars_dict:
bias_initializer = vars_dict[bias_var_name]
print("Bias initialized for {}".format(scope_name))
else:
bias_initializer = None
return matrix_initializer, bias_initializer
class NVDM(object):
"""
Neural Variational Document Model -- BOW VAE.
"""
def __init__(self, params, non_linearity=tf1.nn.sigmoid):
self.vocab_size = params.TM_vocab_length
self.n_hidden = params.hidden_size_TM
self.n_topic = params.n_topic
self.n_sample = params.n_sample
self.learning_rate = params.learning_rate
self.non_linearity = non_linearity
#self.x = tf1.placeholder(tf1.float32, [None, self.vocab_size], name='x')
#self.mask = tf1.placeholder(tf1.float32, [None], name='mask') # mask paddings
input_size = self.vocab_size
## pretrained_weights
if params.TM_pretrained_model_path:
with tf1.Session() as sess:
saver_ir = tf1.train.import_meta_graph(os.path.join(params.TM_pretrained_model_path, "model_ppl_nvdm_pretrain", "model_ppl_nvdm_pretrain-1.meta"))
saver_ir.restore(sess, os.path.join(params.TM_pretrained_model_path, "model_ppl_nvdm_pretrain", "model_ppl_nvdm_pretrain-1"))
enc_var_names = [var.name for var in tf1.get_collection(tf1.GraphKeys.TRAINABLE_VARIABLES, scope='TM_encoder')]
enc_var_values = {var_name: sess.run(var_name) for var_name in enc_var_names}
dec_var_names = [var.name for var in tf1.get_collection(tf1.GraphKeys.TRAINABLE_VARIABLES, scope='TM_decoder')]
dec_var_values = {var_name: sess.run(var_name) for var_name in dec_var_names}
#with open("input_matrix.npy", "wb") as f:
# np.save(f, enc_var_values["TM_encoder/Linear/l0/Matrix:0"])
#with open("output_matrix.npy", "wb") as f:
# np.save(f, dec_var_values["TM_decoder/projection/Matrix:0"])
## encoder parameters
self.encoder_params = []
with tf1.variable_scope('TM_encoder', reuse=tf1.AUTO_REUSE):
# mlp parameters
num_mlp_layers = [self.n_hidden]
with tf1.variable_scope('Linear', reuse=tf1.AUTO_REUSE):
self.mlp_params = []
for l, hidden_size in enumerate(num_mlp_layers):
matrix_initializer, bias_initializer = get_initializers("TM_encoder/Linear/" + "l" + str(l), enc_var_values)
self.mlp_params.append(self.nvdm_linear_params(input_size,
hidden_size,
scope='l'+str(l),
matrix_initializer=None,
bias_initializer=None))
input_size = hidden_size
self.encoder_params.extend(self.mlp_params[-1])
# mean parameters
matrix_initializer, bias_initializer = get_initializers("TM_encoder/mean", enc_var_values)
self.mean_params = self.nvdm_linear_params(input_size,
self.n_topic,
scope="mean",
matrix_initializer=matrix_initializer,
bias_initializer=bias_initializer)
self.encoder_params.extend(self.mean_params)
# sigma parameters
matrix_initializer, bias_initializer = get_initializers("TM_encoder/logsigm", enc_var_values)
self.logsigm_params = self.nvdm_linear_params(input_size,
self.n_topic,
scope="logsigm",
bias_start_zero=True,
matrix_start_zero=True,
matrix_initializer=matrix_initializer,
bias_initializer=bias_initializer)
self.encoder_params.extend(self.logsigm_params)
## decoder params
with tf1.variable_scope('TM_decoder', reuse=tf1.AUTO_REUSE):
matrix_initializer, bias_initializer = get_initializers("TM_decoder/projection", dec_var_values)
self.decoder_params = self.nvdm_linear_params(self.n_topic,
self.vocab_size,
scope='projection',
matrix_initializer=matrix_initializer,
bias_initializer=bias_initializer)
self.decoder_params = list(self.decoder_params)
## optimizer
self.optimizer = tf1.train.AdamOptimizer(learning_rate=self.learning_rate)
@tf1.function
def forward(self, input, mask):
## encoder
# mlp computation
enc_vec = input
for layer_params in self.mlp_params:
enc_vec = self.non_linearity(tf1.matmul(enc_vec, layer_params[0]) + layer_params[1])
# mean computation
mean = tf1.matmul(enc_vec, self.mean_params[0]) + self.mean_params[1]
# sigma computation
logsigm = tf1.matmul(enc_vec, self.logsigm_params[0]) + self.logsigm_params[1]
# KLD loss
kld = -0.5 * tf1.reduce_sum(1 - tf1.square(mean) + 2 * logsigm - tf1.exp(2 * logsigm), 1)
kld = tf1.multiply(mask, kld, name='kld') # mask paddings
## decoder
input_batch_size = tf1.shape(input)[0]
if self.n_sample == 1:
eps = tf1.random_normal((input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
doc_vec = tf1.add(tf1.multiply(tf1.exp(logsigm), eps), mean, name='doc_hidden')
logits = tf1.matmul(doc_vec, self.decoder_params[0]) + self.decoder_params[1]
logits = tf1.nn.log_softmax(logits)
recons_loss = - tf1.reduce_sum(tf1.multiply(logits, input), 1)
else:
eps = tf1.random_normal((self.n_sample*input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
eps_list = tf1.split(eps, self.n_sample, 0)
recons_loss_list = []
doc_vec_list = []
for i in range(self.n_sample):
if i > 0: tf1.get_variable_scope().reuse_variables()
curr_eps = eps_list[i]
doc_vec = tf1.add(tf1.multiply(tf1.exp(logsigm), curr_eps), mean)
doc_vec_list.append(doc_vec)
logits = tf1.matmul(doc_vec, self.decoder_params[0]) + self.decoder_params[1]
logits = tf1.nn.log_softmax(logits)
recons_loss_list.append(-tf1.reduce_sum(tf1.multiply(logits, self.x), 1))
doc_vec = tf1.add_n(doc_vec_list) / self.n_sample
recons_loss = tf1.add_n(recons_loss_list) / self.n_sample
#self.objective_TM = self.recons_loss + self.kld
#self.objective_TM = tf1.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
final_loss = tf1.add(recons_loss, kld, name='TM_loss_unnormed')
objective_TM = tf1.reduce_mean(final_loss)
"""
enc_grads = tf1.gradients(objective_TM, self.enc_vars)
dec_grads = tf1.gradients(objective_TM, self.dec_vars)
self.optim_enc = optimizer.apply_gradients(zip(enc_grads, enc_vars))
self.optim_dec = optimizer.apply_gradients(zip(dec_grads, dec_vars))
"""
return doc_vec, objective_TM
def nvdm_linear_params(
self,
input_size,
output_size,
no_bias=False,
bias_start_zero=False,
matrix_start_zero=False,
scope=None,
get_matrix=False,
matrix_initializer=None,
bias_initializer=None):
with tf1.variable_scope(scope or 'Linear', reuse=tf1.AUTO_REUSE):
if matrix_start_zero:
matrix_initializer = tf1.constant_initializer(0)
matrix = tf1.get_variable('Matrix', [input_size, output_size],
initializer=matrix_initializer)
else:
if matrix_initializer is None:
matrix_initializer = tf1.glorot_uniform_initializer(seed=tf_op_seed)
matrix = tf1.get_variable('Matrix', [input_size, output_size],
initializer=matrix_initializer)
else:
matrix = tf1.get_variable('Matrix',
initializer=matrix_initializer)
if bias_start_zero:
bias_initializer = tf1.constant_initializer(0)
bias = tf1.get_variable('Bias', [output_size],
initializer=bias_initializer)
else:
if bias_initializer is None:
bias_initializer = tf1.glorot_uniform_initializer(seed=tf_op_seed)
bias = tf1.get_variable('Bias', [output_size],
initializer=bias_initializer)
else:
bias = tf1.get_variable('Bias',
initializer=bias_initializer)
return matrix, bias
def fetch_data(self, data, count, idx_batch):
batch_size = len(idx_batch)
data_batch = np.zeros((batch_size, self.vocab_size), dtype=np.float32)
count_batch = np.zeros(batch_size, dtype=np.int32)
mask = np.zeros(batch_size, dtype=np.float32)
indices = []
values = []
for i, doc_id in enumerate(idx_batch):
if doc_id != -1:
for word_id, freq in data[doc_id].items():
data_batch[i, word_id] = freq
count_batch[i] = count[doc_id]
mask[i]=1.0
return data_batch, count_batch, mask | [] |
2024-01-10 | YatinChaudhary/TopicBERT | TopicBERT~topic_bert~nvdm~model_NVDM_supervised.py | """NVDM Tensorflow implementation by Yishu Miao"""
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
import os
from nvdm import utils
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
#import model.utils as utils
#from sklearn.preprocessing import MultiLabelBinarizer
#import sklearn.metrics.pairwise as pw
#from gensim.models import CoherenceModel
#from gensim.corpora.dictionary import Dictionary
#import model.evaluate as eval
#import model.data_lstm as data
seed = 42
tf_op_seed = 1234
np.random.seed(seed)
tf.set_random_seed(seed)
#learning_rate = 5e-5
#batch_size = 64
#n_hidden = 256
#fixed_topic_params
#n_topic = 150
#n_sample = 1
#non_linearity = tf.nn.tanh
non_linearity = tf.nn.sigmoid
######
class NVDM(object):
""" Neural Variational Document Model -- BOW VAE.
"""
#def __init__(self, topic_params, prior_embeddings=None, initializer_nvdm=None):
def __init__(self, topic_params, x, mask , topic_vocab_size, label_ids, n_labels, prior_embeddings=None, initializer_nvdm=None):
#self.vocab_size = topic_params.TM_vocab_length
self.vocab_size = topic_vocab_size
self.n_hidden = topic_params.hidden_size_TM
self.n_topic = topic_params.n_topic
self.n_sample = topic_params.n_sample
self.non_linearity = non_linearity
self.learning_rate = topic_params.nvdm_learning_rate
self.batch_size = topic_params.nvdm_batch_size
self.x = x
self.mask = mask
self.label_ids = label_ids
self.n_labels = n_labels
#self.x = tf.placeholder(tf.float32, [None, self.vocab_size], name='x')
#self.mask = tf.placeholder(tf.float32, [None], name='mask') # mask paddings
#if topic_params.use_sent_topic_rep:
#self.x_sent = tf.placeholder(tf.float32, [None, None, self.vocab_size], name='x_sent')
#if topic_params.use_topic_embedding:
# self.x_doc_mask = tf.placeholder(tf.float32, [None, self.vocab_size], name='x_doc_mask')
#self.input_batch_size = tf.placeholder(tf.int32, (), name='input_batch_size')
self.input_batch_size = tf.shape(self.x)[0]
#if topic_params.use_sent_topic_rep:
# self.input_batch_size_sent = tf.shape(self.x_sent)[0]
# self.input_batch_len_sent = tf.shape(self.x_sent)[1]
# self.batch_size_sent = self.input_batch_size_sent * self.input_batch_len_sent
# encoder
with tf.variable_scope('TM_encoder', reuse=tf.AUTO_REUSE):
self.enc_vec = utils.mlp(self.x, [self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
self.mean = utils.nvdm_linear(self.enc_vec,
self.n_topic,
scope='mean',
matrix_initializer=initializer_nvdm[1][0],
bias_initializer=initializer_nvdm[1][1])
self.logsigm = utils.nvdm_linear(self.enc_vec,
self.n_topic,
bias_start_zero=True,
matrix_start_zero=True,
scope='logsigm',
matrix_initializer=initializer_nvdm[2][0],
bias_initializer=initializer_nvdm[2][1])
self.kld = -0.5 * tf.reduce_sum(1 - tf.square(self.mean) + 2 * self.logsigm - tf.exp(2 * self.logsigm), 1)
#self.kld = self.mask*self.kld # mask paddings
self.kld = tf.multiply(self.mask, self.kld, name='kld') # mask paddings
#if topic_params.use_sent_topic_rep:
# self.x_sent_reshape = tf.reshape(self.x_sent, [-1, self.vocab_size])
# self.enc_vec_sent = utils.mlp(self.x_sent_reshape, [self.n_hidden], self.non_linearity)
# #self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
# self.mean_sent = utils.nvdm_linear(self.enc_vec_sent, self.n_topic, scope='mean')
# self.logsigm_sent = utils.nvdm_linear(self.enc_vec_sent,
# self.n_topic,
# bias_start_zero=True,
# matrix_start_zero=True,
# scope='logsigm')
#if topic_params.prior_emb_for_topics:
# W_prior = tf.get_variable(
# 'embeddings_TM_prior',
# dtype=tf.float32,
# initializer=prior_embeddings,
# trainable=False
# )
"""
W_prior_proj = tf.get_variable(
'embeddings_TM_prior_proj',
[prior_embeddings.shape[1], self.n_topic],
dtype=tf.float32,
trainable=False
)
W_prior = tf.matmul(W_prior, W_prior_proj, name='W_prior_projected')
"""
with tf.variable_scope('TM_decoder', reuse=tf.AUTO_REUSE):
if self.n_sample == 1:
eps = tf.random_normal((self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
#doc_vec = tf.mul(tf.exp(self.logsigm), eps) + self.mean
## Hidden representation to be used in BERT
self.doc_vec = tf.add(tf.multiply(tf.exp(self.logsigm), eps), self.mean, name='doc_hidden')
self.last_h = self.doc_vec
logits_projected, self.decoding_matrix = utils.nvdm_linear(self.doc_vec,
self.vocab_size,
scope='projection',
get_matrix=True,
matrix_initializer=initializer_nvdm[3][0],
bias_initializer=initializer_nvdm[3][1])
logits = tf.nn.log_softmax(logits_projected)
self.recons_loss = -tf.reduce_sum(tf.multiply(logits, self.x), 1)
sup_logits = utils.nvdm_linear(self.doc_vec, self.n_labels, scope='supervised')
if topic_params.multilabel:
self.sup_prob = tf.nn.sigmoid(sup_logits)
self.supervised_loss = tf.multiply(self.mask, tf.reduce_sum(tf.losses.sigmoid_cross_entropy(self.label_ids, sup_logits , reduction="none"), axis=-1))
else:
self.sup_prob = tf.nn.softmax(sup_logits, axis=-1)
log_prob = tf.nn.log_softmax(sup_logits)
self.one_hot_labels = tf.one_hot(self.label_ids, depth=n_labels, on_value = 1.0, off_value = 0.0, dtype=tf.float32)
self.supervised_loss = -tf.reduce_sum(tf.multiply(log_prob, self.one_hot_labels), 1)
"""
if topic_params.use_topic_embedding:
#self.last_h_topic_emb = utils.nvdm_linear(tf.nn.softmax(self.last_h, axis=1), self.vocab_size, scope='projection')
#self.top_k = tf.nn.top_k(self.decoding_matrix, k=topic_params.use_k_topic_words, sorted=False)
topics_masked = tf.multiply(tf.expand_dims(self.x_doc_mask, axis=1), tf.expand_dims(self.decoding_matrix, axis=0), name='topics_masked')
self.top_k = tf.nn.top_k(topics_masked, k=topic_params.use_k_topic_words, sorted=False)
if topic_params.prior_emb_for_topics:
self.top_k_embeddings = tf.nn.embedding_lookup(W_prior, self.top_k.indices)
self.topic_emb_size = prior_embeddings.shape[1]
#self.topic_emb_size = prior_embeddings.shape[1] * topic_params.use_k_topics
#self.topic_emb_size = prior_embeddings.shape[1] + self.n_topic
#self.topic_emb_size = self.n_topic
#self.topic_emb_size = self.n_topic * 2
else:
self.top_k_embeddings = tf.nn.embedding_lookup(tf.transpose(self.decoding_matrix), self.top_k.indices)
#self.topic_emb_size = self.n_topic
self.topic_emb_size = self.n_topic * 2
#self.top_k_embeddings = tf.multiply(tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=1), axis=2), self.top_k_embeddings)
#self.temp_1 = tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=2), axis=2)
#self.topic_embeddings = tf.squeeze(tf.matmul(self.temp_1, self.top_k_embeddings), axis=2, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_sum(self.top_k_embeddings, axis=1, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=1, name='topic_embeddings')
self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=2, name='topic_embeddings')
if topic_params.use_k_topics > 0:
# Masking document topic proportion vector
top_k_h_values, top_k_h_indices = tf.nn.top_k(self.last_h, k=topic_params.use_k_topics, sorted=False, name='top_k_h')
row_numbers = tf.tile(tf.expand_dims(tf.range(0, self.input_batch_size), 1), [1, topic_params.use_k_topics], name='row_numbers')
full_indices = tf.concat([tf.expand_dims(row_numbers, -1), tf.expand_dims(top_k_h_indices, -1)], axis=2)
full_indices = tf.reshape(full_indices, [-1, 2], name='full_indices')
#mask_updates = tf.ones([self.input_batch_size * topic_params.use_k_topics], dtype=tf.float32, name='mask_updates')
#new_mask = tf.scatter_nd(full_indices, mask_updates, [self.input_batch_size, self.n_topic], name='new_mask')
#last_h_softmax = tf.multiply(tf.nn.softmax(self.last_h, axis=1), new_mask, name='last_h_softmax')
last_h_softmax = tf.scatter_nd(
full_indices,
tf.reshape(tf.nn.softmax(top_k_h_values, axis=1), [-1]),
#tf.ones([self.input_batch_size * topic_params.use_k_topics], dtype=tf.float32),
[self.input_batch_size, self.n_topic],
name='last_h_softmax'
)
else:
last_h_softmax = tf.nn.softmax(self.last_h, axis=1, name='last_h_softmax')
#last_h_softmax = self.last_h
#self.last_h_topic_emb = tf.matmul(last_h_softmax, self.topic_embeddings, name='last_h_topic_emb')
self.last_h_topic_emb = tf.squeeze(tf.matmul(tf.expand_dims(last_h_softmax, axis=1), self.topic_embeddings), axis=1, name='last_h_topic_emb')
#temp = tf.nn.embedding_lookup(self.topic_embeddings, top_k_h_indices)
#self.last_h_topic_emb = tf.reduce_sum(temp, axis=1, name='last_h_topic_emb')
#self.last_h_topic_emb = tf.reshape(temp, [self.input_batch_size, self.topic_emb_size], name='last_h_topic_emb')
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, last_h_softmax], axis=1)
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, self.last_h], axis=1)
"""
else:
#eps = tf.random_normal((self.n_sample*self.batch_size, self.n_topic), mean=0.0, stddev=1.0)
"""
eps = tf.random_normal((self.n_sample*self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
eps_list = tf.split(eps, self.n_sample, 0)
recons_loss_list = []
for i in range(self.n_sample):
if i > 0: tf.get_variable_scope().reuse_variables()
curr_eps = eps_list[i]
doc_vec = tf.multiply(tf.exp(self.logsigm), curr_eps) + self.mean
logits = tf.nn.log_softmax(utils.nvdm_linear(doc_vec, self.vocab_size, scope='projection'))
recons_loss_list.append(-tf.reduce_sum(tf.multiply(logits, self.x), 1))
self.recons_loss = tf.add_n(recons_loss_list) / self.n_sample
"""
eps = tf.random_normal((self.n_sample*self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
eps_list = tf.split(eps, self.n_sample, 0)
recons_loss_list = []
doc_vec_list = []
for i in range(self.n_sample):
if i > 0: tf.get_variable_scope().reuse_variables()
curr_eps = eps_list[i]
doc_vec = tf.add(tf.multiply(tf.exp(self.logsigm), curr_eps), self.mean)
doc_vec_list.append(doc_vec)
logits = tf.nn.log_softmax(utils.nvdm_linear(doc_vec, self.vocab_size, scope='projection'))
recons_loss_list.append(-tf.reduce_sum(tf.multiply(logits, self.x), 1))
self.recons_loss = tf.add_n(recons_loss_list) / self.n_sample
self.doc_vec = tf.add_n(doc_vec_list) / self.n_sample
self.last_h = self.doc_vec
sup_logits = utils.nvdm_linear(self.doc_vec, self.n_labels, scope='supervised')
if topic_params.multilabel:
self.sup_prob = tf.nn.sigmoid(sup_logits)
self.supervised_loss = tf.multiply(self.mask, tf.reduce_sum(tf.losses.sigmoid_cross_entropy(self.label_ids, sup_logits , reduction="none"), axis=-1))
else:
self.sup_prob = tf.nn.softmax(sup_logits, axis=-1)
log_prob = tf.nn.log_softmax(sup_logits)
self.one_hot_labels = tf.one_hot(self.label_ids, depth=n_labels, on_value = 1.0, off_value = 0.0, dtype=tf.float32)
self.supervised_loss = -tf.reduce_sum(tf.multiply(log_prob, self.one_hot_labels), 1)
""""
if topic_params.use_sent_topic_rep:
if self.n_sample == 1:
eps_sent = tf.random_normal((self.batch_size_sent, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
self.last_h_sent = tf.add(tf.multiply(tf.exp(self.logsigm_sent), eps_sent), self.mean_sent, name='sent_hidden')
self.last_h_sent = tf.reshape(self.last_h_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.n_topic])
if topic_params.use_topic_embedding:
#self.last_h_topic_emb_sent = utils.nvdm_linear(tf.nn.softmax(self.last_h_sent, axis=1), self.vocab_size, scope='projection')
if topic_params.use_k_topics > 0:
# Masking sentence topic proportion vector
top_k_h_sent_values, top_k_h_sent_indices = tf.nn.top_k(self.last_h_sent, k=topic_params.use_k_topics, sorted=False, name='top_k_h_sent')
row_numbers_sent = tf.tile(tf.expand_dims(tf.range(0, self.batch_size_sent), 1), [1, topic_params.use_k_topics], name='row_numbers_sent')
full_indices_sent = tf.concat([tf.expand_dims(row_numbers_sent, -1), tf.expand_dims(top_k_h_sent_indices, -1)], axis=2)
full_indices_sent = tf.reshape(full_indices_sent, [-1, 2], name='full_indices_sent')
#mask_updates_sent = tf.ones([self.batch_size_sent * topic_params.use_k_topics], dtype=tf.float32, name='mask_updates_sent')
#new_mask_sent = tf.scatter_nd(full_indices_sent, mask_updates_sent, [self.batch_size_sent, self.n_topic], name='new_mask_sent')
#last_h_softmax_sent = tf.multiply(tf.nn.softmax(self.last_h_sent, axis=1), new_mask_sent, name='last_h_softmax_sent')
last_h_softmax_sent = tf.scatter_nd(full_indices_sent, tf.reshape(tf.nn.softmax(top_k_h_sent_values, axis=1), [-1]), [self.batch_size_sent, self.n_topic], name='last_h_softmax_sent')
else:
last_h_softmax_sent = tf.nn.softmax(self.last_h_sent, axis=2, name='last_h_softmax_sent')
self.last_h_topic_emb_sent = tf.matmul(last_h_softmax_sent, self.topic_embeddings, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, self.last_h_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, last_h_softmax_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.reshape(self.last_h_topic_emb_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.vocab_size])
else:
print("Error: model_NVDM.py - Decoder")
sys.exit()
"""
#self.objective_TM = self.recons_loss + self.kld
#self.objective_TM = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.unsupervised_loss = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.final_loss = tf.add((1-topic_params.beta)*self.unsupervised_loss, topic_params.beta*(self.supervised_loss), "TM_combined_loss")
self.objective_TM = tf.reduce_mean(self.final_loss)
"""
if topic_params.TM_uniqueness_loss:
## NVDM topic uniqueness loss
eye = tf.constant(np.eye(self.n_topic), dtype=tf.float32)
topicnorm = matrix / tf.sqrt(tf.reduce_sum(tf.square(self.decoding_matrix), 1, keepdims=True))
uniqueness = tf.reduce_max(tf.square(tf.matmul(topicnorm, tf.transpose(topicnorm)) - eye))
self.objective_TM += topic_params.alpha_uniqueness * uniqueness
"""
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
#fullvars = tf.trainable_variables()
#enc_vars = utils.variable_parser(fullvars, 'TM_encoder')
enc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_encoder')
#dec_vars = utils.variable_parser(fullvars, 'TM_decoder')
dec_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_decoder')
self.pretrain_saver = tf.train.Saver(enc_vars + dec_vars)
enc_grads = tf.gradients(self.objective_TM, enc_vars)
dec_grads = tf.gradients(self.objective_TM, dec_vars)
self.optim_enc = optimizer.apply_gradients(zip(enc_grads, enc_vars))
self.optim_dec = optimizer.apply_gradients(zip(dec_grads, dec_vars))
## Pretraining of NVDM-TM
def pretrain(self, dataset, topic_params, nvdm_datadir , session,
#training_epochs=1000, alternate_epochs=10):
#training_epochs=100, alternate_epochs=10):
training_epochs=20, alternate_epochs=10):
#training_epochs=1, alternate_epochs=1):
#log_dir = os.path.join(topic_params.model, 'logs_nvdm_pretrain')
#model_dir_ir_nvdm = os.path.join(topic_params.model, 'model_ir_nvdm_pretrain')
#model_dir_ppl_nvdm = os.path.join(topic_params.model, 'model_ppl_nvdm_pretrain')
log_dir = os.path.join(topic_params.output_dir, 'logs_nvdm_pretrain')
model_dir_ir_nvdm = os.path.join(topic_params.output_dir, 'model_ir_nvdm_pretrain')
model_dir_ppl_nvdm = os.path.join(topic_params.output_dir, 'model_ppl_nvdm_pretrain')
model_dir_f1_nvdm = os.path.join(topic_params.output_dir, 'model_f1_nvdm_pretrain')
#model_dir_supervised = os.path.join(topic_params.model, 'model_supervised_nvdm_pretrain')
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
if not os.path.isdir(model_dir_ir_nvdm):
os.mkdir(model_dir_ir_nvdm)
if not os.path.isdir(model_dir_ppl_nvdm):
os.mkdir(model_dir_ppl_nvdm)
#if not os.path.isdir(model_dir_supervised):
# os.mkdir(model_dir_supervised)
#train_url = os.path.join(topic_params.dataset, 'training_nvdm_docs_non_replicated.csv')
#dev_url = os.path.join(topic_params.dataset, 'validation_nvdm_docs_non_replicated.csv')
#test_url = os.path.join(topic_params.dataset, 'test_nvdm_docs_non_replicated.csv')
train_url = os.path.join(nvdm_datadir, 'training_nvdm_docs_non_replicated.csv')
dev_url = os.path.join(nvdm_datadir, 'validation_nvdm_docs_non_replicated.csv')
test_url = os.path.join(nvdm_datadir, 'test_nvdm_docs_non_replicated.csv')
train_set, train_count, train_ids, train_doc_ids = utils.data_set(train_url, topic_params)
test_set, test_count, test_ids, test_doc_ids = utils.data_set(test_url, topic_params)
dev_set, dev_count, dev_ids, dev_doc_ids = utils.data_set(dev_url, topic_params)
dev_batches = utils.create_batches(len(dev_set), self.batch_size, shuffle=False)
#dev_batches = utils.create_batches(len(dev_set), 512, shuffle=False)
test_batches = utils.create_batches(len(test_set), self.batch_size, shuffle=False)
#test_batches = utils.create_batches(len(test_set), 512, shuffle=False)
#training_labels = np.array(
# [[y] for y, _ in dataset.rows('training_nvdm_docs_non_replicated', num_epochs=1)]
#)
#validation_labels = np.array(
# [[y] for y, _ in dataset.rows('validation_nvdm_docs_non_replicated', num_epochs=1)]
#)
#test_labels = np.array(
# [[y] for y, _ in dataset.rows('test_nvdm_docs_non_replicated', num_epochs=1)]
#)
patience = topic_params.nvdm_patience
patience_count_ppl = 0
patience_count_f1 = 0
best_dev_ppl = np.inf
best_dev_f1 = -np.inf
best_val_nvdm_IR = -1.0
enc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_encoder')
dec_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_decoder')
self.pretrain_saver = tf.train.Saver(enc_vars + dec_vars)
ppl_model = False
ir_model = False
f1_model = False
for epoch in range(training_epochs):
epoch_counter = epoch + 1
train_batches = utils.create_batches(len(train_set), self.batch_size, shuffle=True)
#train_batches = utils.create_batches(len(train_set), 512, shuffle=True)
#-------------------------------
# train
for switch in range(0, 2):
if switch == 0:
optim = self.optim_dec
print_mode = 'updating decoder'
else:
optim = self.optim_enc
print_mode = 'updating encoder'
for i in range(alternate_epochs):
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc = self.run_epoch(
train_batches,
train_set,
train_count,
topic_params,
session,
input_labels = train_ids,
optimizer=optim
)
print('| Epoch train: {:d} |'.format(epoch_counter),
print_mode, '{:d}'.format(i),
'| Corpus Perplexity: {:.5f}'.format(print_ppx), # perplexity for all docs
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc), # perplexity for per doc
'| KLD: {:.5}'.format(print_kld),
'| Supervised loss: {:.5f}'.format(print_sup_loss)) ## print supervised loss
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_ppl_freq == 0:
ppl_model = True
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc = self.run_epoch(
dev_batches,
dev_set,
dev_count,
topic_params,
session,
input_labels = dev_ids
)
if print_ppx_perdoc < best_dev_ppl:
#if print_ppx_perdoc <= best_dev_ppl:
best_dev_ppl = print_ppx_perdoc
print("Saving best model.")
self.pretrain_saver.save(session, model_dir_ppl_nvdm + '/model_ppl_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
patience_count_ppl = 0
else:
patience_count_ppl += 1
print('| Epoch dev: {:d} |'.format(epoch_counter),
'| Corpus Perplexity: {:.9f} |'.format(print_ppx),
'| Per doc Perplexity: {:.5f} |'.format(print_ppx_perdoc),
'| KLD: {:.5} |'.format(print_kld),
'| Best dev PPL: {:.5} |'.format(best_dev_ppl))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('| Epoch Val: {:d} || Val Corpus PPL: {:.9f} || Val Per doc PPL: {:.5f} || Best Val PPL: {:.5} || KLD Val: {:.5} |\n'.format(epoch+1, print_ppx, print_ppx_perdoc, best_dev_ppl, print_kld))
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_f1_freq == 0:
f1_model = True
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc = self.run_epoch(
dev_batches,
dev_set,
dev_count,
topic_params,
session,
input_labels = dev_ids
)
if print_macro_f1_score > best_dev_f1:
best_dev_f1 = print_macro_f1_score
print("Saving best model.")
self.pretrain_saver.save(session, model_dir_f1_nvdm + '/model_f1_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
patience_count_f1 = 0
else:
patience_count_f1 += 1
print('| Epoch dev: {:d} |'.format(epoch_counter),
'| Macro F1 : {:.9f} |'.format(print_macro_f1_score),
'| Macro Prec: {:.5f} |'.format(print_macro_prec),
'| Macro Recall: {:.5} |'.format(print_macro_recall),
'| Best F1: {:.5} |'.format(best_dev_f1))
with open(log_dir + "/logs_f1_nvdm_pretrain.txt", "a") as f:
f.write('| Epoch Val: {:d} || Macro F1: {:.9f} || Macro Prec: {:.5f} || Macro Recall: {:.5} || Best Macro F1: {:.5} || Accuracy: {:.5} |\n'.format(epoch+1, print_macro_f1_score, print_macro_prec, print_macro_recall, best_dev_f1 , print_acc))
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_ir_freq == 0:
ir_model = True
validation_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('validation_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('validation_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
training_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('training_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('training_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
val_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
validation_vectors_nvdm,
training_labels,
validation_labels,
recall=[0.02],
num_classes=topic_params.nvdm_num_classes,
multi_label=topic_params.multilabel
)
val_nvdm_ir = val_nvdm_ir[0]
# Saving model and Early stopping on IR
if val_nvdm_ir > best_val_nvdm_IR:
best_val_nvdm_IR = val_nvdm_ir
print('saving: {}'.format(model_dir_ir_nvdm))
self.pretrain_saver.save(session, model_dir_ir_nvdm + '/model_ir_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
# patience_count = 0
#else:
# patience_count += 1
print("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
if topic_params.validate_supervised_TM == "ppl":
if patience_count_ppl > patience:
print("Early stopping.")
break
elif topic_params.validate_supervised_TM == "f1":
if patience_count_f1 > patience:
print("Early stopping.")
break
if ppl_model:
print("Calculating Test PPL.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ppl_nvdm))
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc= self.run_epoch(
test_batches,
test_set,
test_count,
topic_params,
session,
input_labels = test_ids
)
print('| Corpus Perplexity: {:.9f}'.format(print_ppx),
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc),
'| KLD: {:.5}'.format(print_kld))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('\n\nTest Corpus PPL: {:.9f} || Test Per doc PPL: {:.5f} || KLD Test: {:.5} |\n'.format(print_ppx, print_ppx_perdoc, print_kld))
if f1_model:
print("Calculating Test F1.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_f1_nvdm))
print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc = self.run_epoch(
test_batches,
test_set,
test_count,
topic_params,
session,
input_labels = test_ids
)
print('| Macro F1: {:.9f}'.format(print_macro_f1_score),
'| Macro prec: {:.5f}'.format(print_macro_prec),
'| Macro recall : {:.5}'.format(print_macro_recall),
'| Acc : {:.5}'.format(print_acc)
)
with open(log_dir + "/logs_f1_nvdm_pretrain.txt", "a") as f:
f.write('\n\nTest Macro F1: {:.9f} || Test Macro prec : {:.5f} || Test Macro recall : {:.5} || Test Acc : {:.5} |\n'.format(print_macro_f1_score, print_macro_prec, print_macro_recall, print_acc ))
if ir_model:
print("Calculating Test IR.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ir_nvdm))
test_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('test_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('test_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
test_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
test_vectors_nvdm,
training_labels,
test_labels,
recall=[0.02],
num_classes=topic_params.nvdm_num_classes,
multi_label=topic_params.multilabel
)
test_nvdm_ir = test_nvdm_ir[0]
print("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
def hidden_vectors(self, data, topic_params, session):
vecs = []
for y, x, count, mask in data:
feed_dict = {
self.x.name: x,
self.mask.name: mask
#self.input_batch_size: x.shape[0]
}
vecs.extend(
session.run([self.last_h], feed_dict=feed_dict)[0]
)
return np.array(vecs)
"""
def topic_dist(self, input_batches, input_set, input_count, topic_params, session):
topic_dist = []
mask_list = []
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size)
input_feed = {self.x.name: data_batch,
self.mask.name: mask}
doc_vec = session.run([self.doc_vec], input_feed)
topic_dist.extend(list(doc_vec[0]))
mask_list.extend(list(mask))
topic_dist_unique = []
for num, m in enumerate(mask_list):
if m!= 0.0:
topic_dist_unique.append(topic_dist[num])
topic_dist_unique = np.asarray(topic_dist_unique)
return topic_dist_unique, mask_list
"""
def topic_dist(self, input_batches, input_set, input_doc_ids , input_count, topic_params, session):
topic_dist = []
mask_list = []
doc_id_list = []
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size, topic_params)
input_feed = {self.x.name: data_batch,
self.mask.name: mask}
doc_vec = session.run([self.doc_vec], input_feed)
topic_dist.extend(list(doc_vec[0]))
mask_list.extend(list(mask))
for idx in idx_batch:
if idx != -1:
doc_id_list.append(input_doc_ids[idx])
else:
doc_id_list.append(-1)
assert len(topic_dist) == len(doc_id_list)
topic_dist_unique = {}
for id, dist in zip(doc_id_list, topic_dist):
if id != -1:
topic_dist_unique[str(id)] = dist
return topic_dist_unique, mask_list
def save_to_s3_TM(self, topic_params):
pass
def run_epoch(self, input_batches, input_set, input_count, topic_params, session, input_labels = None, optimizer=None):
loss_sum = 0.0
ppx_sum = 0.0
kld_sum = 0.0
supervised_loss_sum = 0.0
word_count = 0
doc_count = 0
doc_pred = []
doc_labels = []
for idx_batch in input_batches:
data_batch, count_batch, mask, label_batch = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size,topic_params , labels = input_labels)
#import pdb; pdb.set_trace()
input_feed = {self.x.name: data_batch,
self.mask.name: mask,
self.label_ids.name: label_batch}
if not optimizer is None:
_, (loss, kld, supervised_loss, prob) = session.run((optimizer,
[self.unsupervised_loss, self.kld, self.supervised_loss, self.sup_prob]),
input_feed)
else:
loss, kld, supervised_loss, prob = session.run([self.unsupervised_loss, self.kld, self.supervised_loss, self.sup_prob],
input_feed)
if topic_params.multilabel:
prob_arr = np.asarray(prob)
multilabel_pred = np.where(prob_arr >= 0.5, 1, 0)
pred = np.ndarray.tolist(multilabel_pred)
else:
pred = np.argmax(prob, axis = 1)
assert len(pred) == len(label_batch) == len(mask)
for i in range(len(mask)):
if mask[i] != 0.0:
doc_pred.append(pred[i])
doc_labels.append(label_batch[i])
loss_sum += np.sum(loss)
kld_sum += np.sum(kld) / np.sum(mask)
supervised_loss_sum += np.sum(supervised_loss) / np.sum(mask)
word_count += np.sum(count_batch)
# to avoid nan error
count_batch = np.add(count_batch, 1e-12)
# per document loss
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
assert -1 not in doc_labels
if topic_params.multilabel:
doc_labels = np.asarray(doc_labels)
doc_pred = np.asarray(doc_pred)
print_macro_prec, print_macro_recall, print_macro_f1_score, _ = precision_recall_fscore_support(doc_labels, doc_pred, average = "macro")
#print_micro_prec, print_micro_recall, print_micro_f1_score, _ = precision_recall_fscore_support(doc_labels, doc_pred, average = "micro")
print_acc = accuracy_score(doc_labels, doc_pred)
print_sup_loss = supervised_loss_sum/len(input_batches)
print_ppx = np.exp(loss_sum / word_count)
print_ppx_perdoc = np.exp(ppx_sum / doc_count)
print_kld = kld_sum/len(input_batches)
return print_ppx, print_ppx_perdoc, print_kld, print_sup_loss, print_macro_prec, print_macro_recall, print_macro_f1_score, print_acc
def run_epoch_v2(self, data, topic_params, session):
# train_y, train_x, train_count, train_mask = dataset.batches_nvdm_LM(training_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
# val_y, val_x, val_count, val_mask = dataset.batches_nvdm_LM(validation_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
# test_y, test_x, test_count, test_mask = dataset.batches_nvdm_LM(test_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
kld_sum = []
this_nvdm_loss_normed = []
this_nvdm_loss_unnormed = []
this_nvdm_words = []
for nvdm_y, nvdm_x, nvdm_count, nvdm_mask in data:
nvdm_feed_dict = {
model.topic_model.x.name: nvdm_x,
model.topic_model.mask.name: nvdm_mask#,
#model.topic_model.input_batch_size: nvdm_x.shape[0]
}
if topic_params.supervised:
sys.exit()
else:
loss, kld = session.run([model.topic_model.final_loss,
model.topic_model.kld],
feed_dict=nvdm_feed_dict)
nvdm_count = np.add(nvdm_count, 1e-12)
this_nvdm_loss_normed.extend(np.divide(loss, nvdm_count))
this_nvdm_loss_unnormed.extend(loss)
this_nvdm_words.append(np.sum(nvdm_count))
kld_sum.append(np.sum(kld) / np.sum(nvdm_mask))
total_nvdm_nll = np.mean(this_nvdm_loss_unnormed)
#total_nvdm_ppl = np.exp(np.sum(this_nvdm_loss_unnormed) / np.sum(this_val_nvdm_words))
total_nvdm_ppl = np.exp(np.mean(this_nvdm_loss_normed))
print_kld = np.mean(kld_sum)
return total_nvdm_nll, total_nvdm_ppl, print_kld
| [] |
2024-01-10 | YatinChaudhary/TopicBERT | TopicBERT~topic_bert~nvdm~model_NVDM.py | """NVDM Tensorflow implementation by Yishu Miao"""
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
import os
from nvdm import utils
#import model.utils as utils
#from sklearn.preprocessing import MultiLabelBinarizer
#import sklearn.metrics.pairwise as pw
#from gensim.models import CoherenceModel
#from gensim.corpora.dictionary import Dictionary
#import model.evaluate as eval
#import model.data_lstm as data
#seed = 42
#tf_op_seed = 1234
#np.random.seed(seed)
#tf.set_random_seed(seed)
seed = 42
tf.set_random_seed(seed)
np.random.seed(seed)
tf_op_seed = 42
#learning_rate = 5e-5
#batch_size = 64
#n_hidden = 256
#fixed_topic_params
#n_topic = 150
#n_sample = 1
#non_linearity = tf.nn.tanh
non_linearity = tf.nn.sigmoid
######
class NVDM(object):
""" Neural Variational Document Model -- BOW VAE.
"""
#def __init__(self, topic_params, prior_embeddings=None, initializer_nvdm=None):
def __init__(self, topic_params, x, mask , topic_vocab_size, prior_embeddings=None, initializer_nvdm=None):
#self.vocab_size = topic_params.TM_vocab_length
self.vocab_size = topic_vocab_size
self.n_hidden = topic_params.hidden_size_TM
self.n_topic = topic_params.n_topic
self.n_sample = topic_params.n_sample
self.non_linearity = non_linearity
self.learning_rate = topic_params.nvdm_learning_rate
self.batch_size = topic_params.nvdm_batch_size
self.x = x
self.mask = mask
#self.x = tf.placeholder(tf.float32, [None, self.vocab_size], name='x')
#self.mask = tf.placeholder(tf.float32, [None], name='mask') # mask paddings
#if topic_params.use_sent_topic_rep:
#self.x_sent = tf.placeholder(tf.float32, [None, None, self.vocab_size], name='x_sent')
#if topic_params.use_topic_embedding:
# self.x_doc_mask = tf.placeholder(tf.float32, [None, self.vocab_size], name='x_doc_mask')
#self.input_batch_size = tf.placeholder(tf.int32, (), name='input_batch_size')
self.input_batch_size = tf.shape(self.x)[0]
#if topic_params.use_sent_topic_rep:
# self.input_batch_size_sent = tf.shape(self.x_sent)[0]
# self.input_batch_len_sent = tf.shape(self.x_sent)[1]
# self.batch_size_sent = self.input_batch_size_sent * self.input_batch_len_sent
# encoder
with tf.variable_scope('TM_encoder', reuse=tf.AUTO_REUSE):
self.enc_vec = utils.mlp(self.x, [self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity, initializer=initializer_nvdm[0])
#self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
self.mean = utils.nvdm_linear(self.enc_vec,
self.n_topic,
scope='mean',
matrix_initializer=initializer_nvdm[1][0],
bias_initializer=initializer_nvdm[1][1])
self.logsigm = utils.nvdm_linear(self.enc_vec,
self.n_topic,
bias_start_zero=True,
matrix_start_zero=True,
scope='logsigm',
matrix_initializer=initializer_nvdm[2][0],
bias_initializer=initializer_nvdm[2][1])
self.kld = -0.5 * tf.reduce_sum(1 - tf.square(self.mean) + 2 * self.logsigm - tf.exp(2 * self.logsigm), 1)
#self.kld = self.mask*self.kld # mask paddings
self.kld = tf.multiply(self.mask, self.kld, name='kld') # mask paddings
#if topic_params.use_sent_topic_rep:
# self.x_sent_reshape = tf.reshape(self.x_sent, [-1, self.vocab_size])
# self.enc_vec_sent = utils.mlp(self.x_sent_reshape, [self.n_hidden], self.non_linearity)
# #self.enc_vec = utils.mlp(self.x, [self.n_hidden, self.n_hidden], self.non_linearity)
# self.mean_sent = utils.nvdm_linear(self.enc_vec_sent, self.n_topic, scope='mean')
# self.logsigm_sent = utils.nvdm_linear(self.enc_vec_sent,
# self.n_topic,
# bias_start_zero=True,
# matrix_start_zero=True,
# scope='logsigm')
#if topic_params.prior_emb_for_topics:
# W_prior = tf.get_variable(
# 'embeddings_TM_prior',
# dtype=tf.float32,
# initializer=prior_embeddings,
# trainable=False
# )
"""
W_prior_proj = tf.get_variable(
'embeddings_TM_prior_proj',
[prior_embeddings.shape[1], self.n_topic],
dtype=tf.float32,
trainable=False
)
W_prior = tf.matmul(W_prior, W_prior_proj, name='W_prior_projected')
"""
with tf.variable_scope('TM_decoder', reuse=tf.AUTO_REUSE):
if self.n_sample == 1:
eps = tf.random_normal((self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
#doc_vec = tf.mul(tf.exp(self.logsigm), eps) + self.mean
## Hidden representation to be used in BERT
self.doc_vec = tf.add(tf.multiply(tf.exp(self.logsigm), eps), self.mean, name='doc_hidden')
self.last_h = self.doc_vec
logits_projected, self.decoding_matrix = utils.nvdm_linear(self.doc_vec,
self.vocab_size,
scope='projection',
get_matrix=True,
matrix_initializer=initializer_nvdm[3][0],
bias_initializer=initializer_nvdm[3][1])
logits = tf.nn.log_softmax(logits_projected)
self.recons_loss = -tf.reduce_sum(tf.multiply(logits, self.x), 1)
"""
if topic_params.use_topic_embedding:
#self.last_h_topic_emb = utils.nvdm_linear(tf.nn.softmax(self.last_h, axis=1), self.vocab_size, scope='projection')
#self.top_k = tf.nn.top_k(self.decoding_matrix, k=topic_params.use_k_topic_words, sorted=False)
topics_masked = tf.multiply(tf.expand_dims(self.x_doc_mask, axis=1), tf.expand_dims(self.decoding_matrix, axis=0), name='topics_masked')
self.top_k = tf.nn.top_k(topics_masked, k=topic_params.use_k_topic_words, sorted=False)
if topic_params.prior_emb_for_topics:
self.top_k_embeddings = tf.nn.embedding_lookup(W_prior, self.top_k.indices)
self.topic_emb_size = prior_embeddings.shape[1]
#self.topic_emb_size = prior_embeddings.shape[1] * topic_params.use_k_topics
#self.topic_emb_size = prior_embeddings.shape[1] + self.n_topic
#self.topic_emb_size = self.n_topic
#self.topic_emb_size = self.n_topic * 2
else:
self.top_k_embeddings = tf.nn.embedding_lookup(tf.transpose(self.decoding_matrix), self.top_k.indices)
#self.topic_emb_size = self.n_topic
self.topic_emb_size = self.n_topic * 2
#self.top_k_embeddings = tf.multiply(tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=1), axis=2), self.top_k_embeddings)
#self.temp_1 = tf.expand_dims(tf.nn.softmax(self.top_k.values, axis=2), axis=2)
#self.topic_embeddings = tf.squeeze(tf.matmul(self.temp_1, self.top_k_embeddings), axis=2, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_sum(self.top_k_embeddings, axis=1, name='topic_embeddings')
#self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=1, name='topic_embeddings')
self.topic_embeddings = tf.reduce_mean(self.top_k_embeddings, axis=2, name='topic_embeddings')
if topic_params.use_k_topics > 0:
# Masking document topic proportion vector
top_k_h_values, top_k_h_indices = tf.nn.top_k(self.last_h, k=topic_params.use_k_topics, sorted=False, name='top_k_h')
row_numbers = tf.tile(tf.expand_dims(tf.range(0, self.input_batch_size), 1), [1, topic_params.use_k_topics], name='row_numbers')
full_indices = tf.concat([tf.expand_dims(row_numbers, -1), tf.expand_dims(top_k_h_indices, -1)], axis=2)
full_indices = tf.reshape(full_indices, [-1, 2], name='full_indices')
#mask_updates = tf.ones([self.input_batch_size * topic_params.use_k_topics], dtype=tf.float32, name='mask_updates')
#new_mask = tf.scatter_nd(full_indices, mask_updates, [self.input_batch_size, self.n_topic], name='new_mask')
#last_h_softmax = tf.multiply(tf.nn.softmax(self.last_h, axis=1), new_mask, name='last_h_softmax')
last_h_softmax = tf.scatter_nd(
full_indices,
tf.reshape(tf.nn.softmax(top_k_h_values, axis=1), [-1]),
#tf.ones([self.input_batch_size * topic_params.use_k_topics], dtype=tf.float32),
[self.input_batch_size, self.n_topic],
name='last_h_softmax'
)
else:
last_h_softmax = tf.nn.softmax(self.last_h, axis=1, name='last_h_softmax')
#last_h_softmax = self.last_h
#self.last_h_topic_emb = tf.matmul(last_h_softmax, self.topic_embeddings, name='last_h_topic_emb')
self.last_h_topic_emb = tf.squeeze(tf.matmul(tf.expand_dims(last_h_softmax, axis=1), self.topic_embeddings), axis=1, name='last_h_topic_emb')
#temp = tf.nn.embedding_lookup(self.topic_embeddings, top_k_h_indices)
#self.last_h_topic_emb = tf.reduce_sum(temp, axis=1, name='last_h_topic_emb')
#self.last_h_topic_emb = tf.reshape(temp, [self.input_batch_size, self.topic_emb_size], name='last_h_topic_emb')
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, last_h_softmax], axis=1)
#self.last_h_topic_emb = tf.concat([self.last_h_topic_emb, self.last_h], axis=1)
"""
else:
#eps = tf.random_normal((self.n_sample*self.batch_size, self.n_topic), mean=0.0, stddev=1.0)
"""
eps = tf.random_normal((self.n_sample*self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
eps_list = tf.split(eps, self.n_sample, 0)
recons_loss_list = []
for i in range(self.n_sample):
if i > 0: tf.get_variable_scope().reuse_variables()
curr_eps = eps_list[i]
doc_vec = tf.multiply(tf.exp(self.logsigm), curr_eps) + self.mean
logits = tf.nn.log_softmax(utils.nvdm_linear(doc_vec, self.vocab_size, scope='projection'))
recons_loss_list.append(-tf.reduce_sum(tf.multiply(logits, self.x), 1))
self.recons_loss = tf.add_n(recons_loss_list) / self.n_sample
"""
eps = tf.random_normal((self.n_sample*self.input_batch_size, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
eps_list = tf.split(eps, self.n_sample, 0)
recons_loss_list = []
doc_vec_list = []
for i in range(self.n_sample):
if i > 0: tf.get_variable_scope().reuse_variables()
curr_eps = eps_list[i]
doc_vec = tf.add(tf.multiply(tf.exp(self.logsigm), curr_eps), self.mean)
doc_vec_list.append(doc_vec)
logits = tf.nn.log_softmax(utils.nvdm_linear(doc_vec, self.vocab_size, scope='projection'))
recons_loss_list.append(-tf.reduce_sum(tf.multiply(logits, self.x), 1))
self.recons_loss = tf.add_n(recons_loss_list) / self.n_sample
self.doc_vec = tf.add_n(doc_vec_list) / self.n_sample
self.last_h = self.doc_vec
""""
if topic_params.use_sent_topic_rep:
if self.n_sample == 1:
eps_sent = tf.random_normal((self.batch_size_sent, self.n_topic), mean=0.0, stddev=1.0, seed=seed)
self.last_h_sent = tf.add(tf.multiply(tf.exp(self.logsigm_sent), eps_sent), self.mean_sent, name='sent_hidden')
self.last_h_sent = tf.reshape(self.last_h_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.n_topic])
if topic_params.use_topic_embedding:
#self.last_h_topic_emb_sent = utils.nvdm_linear(tf.nn.softmax(self.last_h_sent, axis=1), self.vocab_size, scope='projection')
if topic_params.use_k_topics > 0:
# Masking sentence topic proportion vector
top_k_h_sent_values, top_k_h_sent_indices = tf.nn.top_k(self.last_h_sent, k=topic_params.use_k_topics, sorted=False, name='top_k_h_sent')
row_numbers_sent = tf.tile(tf.expand_dims(tf.range(0, self.batch_size_sent), 1), [1, topic_params.use_k_topics], name='row_numbers_sent')
full_indices_sent = tf.concat([tf.expand_dims(row_numbers_sent, -1), tf.expand_dims(top_k_h_sent_indices, -1)], axis=2)
full_indices_sent = tf.reshape(full_indices_sent, [-1, 2], name='full_indices_sent')
#mask_updates_sent = tf.ones([self.batch_size_sent * topic_params.use_k_topics], dtype=tf.float32, name='mask_updates_sent')
#new_mask_sent = tf.scatter_nd(full_indices_sent, mask_updates_sent, [self.batch_size_sent, self.n_topic], name='new_mask_sent')
#last_h_softmax_sent = tf.multiply(tf.nn.softmax(self.last_h_sent, axis=1), new_mask_sent, name='last_h_softmax_sent')
last_h_softmax_sent = tf.scatter_nd(full_indices_sent, tf.reshape(tf.nn.softmax(top_k_h_sent_values, axis=1), [-1]), [self.batch_size_sent, self.n_topic], name='last_h_softmax_sent')
else:
last_h_softmax_sent = tf.nn.softmax(self.last_h_sent, axis=2, name='last_h_softmax_sent')
self.last_h_topic_emb_sent = tf.matmul(last_h_softmax_sent, self.topic_embeddings, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, self.last_h_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.concat([self.last_h_topic_emb_sent, last_h_softmax_sent], axis=2, name='last_h_topic_emb_sent')
#self.last_h_topic_emb_sent = tf.reshape(self.last_h_topic_emb_sent, [self.input_batch_size_sent, self.input_batch_len_sent, self.vocab_size])
else:
print("Error: model_NVDM.py - Decoder")
sys.exit()
"""
#self.objective_TM = self.recons_loss + self.kld
#self.objective_TM = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.final_loss = tf.add(self.recons_loss, self.kld, name='TM_loss_unnormed')
self.objective_TM = tf.reduce_mean(self.final_loss)
"""
if topic_params.TM_uniqueness_loss:
## NVDM topic uniqueness loss
eye = tf.constant(np.eye(self.n_topic), dtype=tf.float32)
topicnorm = matrix / tf.sqrt(tf.reduce_sum(tf.square(self.decoding_matrix), 1, keepdims=True))
uniqueness = tf.reduce_max(tf.square(tf.matmul(topicnorm, tf.transpose(topicnorm)) - eye))
self.objective_TM += topic_params.alpha_uniqueness * uniqueness
"""
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
#fullvars = tf.trainable_variables()
#enc_vars = utils.variable_parser(fullvars, 'TM_encoder')
enc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_encoder')
#dec_vars = utils.variable_parser(fullvars, 'TM_decoder')
dec_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_decoder')
self.pretrain_saver = tf.train.Saver(enc_vars + dec_vars)
enc_grads = tf.gradients(self.objective_TM, enc_vars)
dec_grads = tf.gradients(self.objective_TM, dec_vars)
self.optim_enc = optimizer.apply_gradients(zip(enc_grads, enc_vars))
self.optim_dec = optimizer.apply_gradients(zip(dec_grads, dec_vars))
## Pretraining of NVDM-TM
def pretrain(self, dataset, topic_params, nvdm_datadir , session,
#training_epochs=1000, alternate_epochs=10):
#training_epochs=100, alternate_epochs=10):
training_epochs=20, alternate_epochs=10):
#training_epochs=1, alternate_epochs=1):
#log_dir = os.path.join(topic_params.model, 'logs_nvdm_pretrain')
#model_dir_ir_nvdm = os.path.join(topic_params.model, 'model_ir_nvdm_pretrain')
#model_dir_ppl_nvdm = os.path.join(topic_params.model, 'model_ppl_nvdm_pretrain')
log_dir = os.path.join(topic_params.output_dir, 'logs_nvdm_pretrain')
model_dir_ir_nvdm = os.path.join(topic_params.output_dir, 'model_ir_nvdm_pretrain')
model_dir_ppl_nvdm = os.path.join(topic_params.output_dir, 'model_ppl_nvdm_pretrain')
#model_dir_supervised = os.path.join(topic_params.model, 'model_supervised_nvdm_pretrain')
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
if not os.path.isdir(model_dir_ir_nvdm):
os.mkdir(model_dir_ir_nvdm)
if not os.path.isdir(model_dir_ppl_nvdm):
os.mkdir(model_dir_ppl_nvdm)
#if not os.path.isdir(model_dir_supervised):
# os.mkdir(model_dir_supervised)
#train_url = os.path.join(topic_params.dataset, 'training_nvdm_docs_non_replicated.csv')
#dev_url = os.path.join(topic_params.dataset, 'validation_nvdm_docs_non_replicated.csv')
#test_url = os.path.join(topic_params.dataset, 'test_nvdm_docs_non_replicated.csv')
train_url = os.path.join(nvdm_datadir, 'training_nvdm_docs_non_replicated.csv')
dev_url = os.path.join(nvdm_datadir, 'validation_nvdm_docs_non_replicated.csv')
test_url = os.path.join(nvdm_datadir, 'test_nvdm_docs_non_replicated.csv')
train_set, train_count, train_labels, train_doc_ids = utils.data_set(train_url, topic_params)
test_set, test_count, test_labels, test_doc_ids = utils.data_set(test_url, topic_params)
dev_set, dev_count, dev_labels, dev_doc_ids = utils.data_set(dev_url, topic_params)
dev_batches = utils.create_batches(len(dev_set), self.batch_size, shuffle=False)
#dev_batches = utils.create_batches(len(dev_set), 512, shuffle=False)
test_batches = utils.create_batches(len(test_set), self.batch_size, shuffle=False)
#test_batches = utils.create_batches(len(test_set), 512, shuffle=False)
#training_labels = np.array(
# [[y] for y, _ in dataset.rows('training_nvdm_docs_non_replicated', num_epochs=1)]
#)
#validation_labels = np.array(
# [[y] for y, _ in dataset.rows('validation_nvdm_docs_non_replicated', num_epochs=1)]
#)
#test_labels = np.array(
# [[y] for y, _ in dataset.rows('test_nvdm_docs_non_replicated', num_epochs=1)]
#)
patience = topic_params.nvdm_patience
patience_count = 0
best_dev_ppl = np.inf
best_test_ppl = np.inf
best_val_nvdm_IR = -1.0
best_test_nvdm_IR = -1.0
enc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_encoder')
dec_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='TM_decoder')
self.pretrain_saver = tf.train.Saver(enc_vars + dec_vars)
ppl_model = False
ir_model = False
for epoch in range(training_epochs):
epoch_counter = epoch + 1
train_batches = utils.create_batches(len(train_set), self.batch_size, shuffle=True)
#train_batches = utils.create_batches(len(train_set), 512, shuffle=True)
#-------------------------------
# train
for switch in range(0, 2):
if switch == 0:
optim = self.optim_dec
print_mode = 'updating decoder'
else:
optim = self.optim_enc
print_mode = 'updating encoder'
for i in range(alternate_epochs):
print_ppx, print_ppx_perdoc, print_kld = self.run_epoch(
train_batches,
train_set,
train_count,
topic_params,
session,
optimizer=optim
)
print('| Epoch train: {:d} |'.format(epoch_counter),
print_mode, '{:d}'.format(i),
'| Corpus Perplexity: {:.5f}'.format(print_ppx), # perplexity for all docs
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc), # perplexity for per doc
'| KLD: {:.5}'.format(print_kld))
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_ppl_freq == 0:
ppl_model = True
print_ppx, print_ppx_perdoc, print_kld = self.run_epoch(
dev_batches,
dev_set,
dev_count,
topic_params,
session
)
if print_ppx_perdoc < best_dev_ppl:
#if print_ppx_perdoc <= best_dev_ppl:
best_dev_ppl = print_ppx_perdoc
print("Saving best model.")
self.pretrain_saver.save(session, model_dir_ppl_nvdm + '/model_ppl_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
patience_count = 0
else:
patience_count += 1
print('| Epoch dev: {:d} |'.format(epoch_counter),
'| Corpus Perplexity: {:.9f} |'.format(print_ppx),
'| Per doc Perplexity: {:.5f} |'.format(print_ppx_perdoc),
'| KLD: {:.5} |'.format(print_kld),
'| Best dev PPL: {:.5} |'.format(best_dev_ppl))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('| Epoch Val: {:d} || Val Corpus PPL: {:.9f} || Val Per doc PPL: {:.5f} || Best Val PPL: {:.5} || KLD Val: {:.5} |\n'.format(epoch+1, print_ppx, print_ppx_perdoc, best_dev_ppl, print_kld))
if epoch_counter >= 1 and epoch_counter % topic_params.nvdm_validation_ir_freq == 0:
ir_model = True
validation_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('validation_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('validation_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
training_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('training_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('training_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
val_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
validation_vectors_nvdm,
training_labels,
validation_labels,
recall=[0.02],
num_classes=topic_params.nvdm_num_classes,
multi_label=topic_params.multilabel
)
val_nvdm_ir = val_nvdm_ir[0]
# Saving model and Early stopping on IR
if val_nvdm_ir > best_val_nvdm_IR:
best_val_nvdm_IR = val_nvdm_ir
print('saving: {}'.format(model_dir_ir_nvdm))
self.pretrain_saver.save(session, model_dir_ir_nvdm + '/model_ir_nvdm_pretrain', global_step=1)
self.save_to_s3_TM(topic_params)
# patience_count = 0
#else:
# patience_count += 1
print("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Val NVDM IR: %s, best val NVDM IR: %s\n" %
(epoch_counter, val_nvdm_ir, best_val_nvdm_IR))
if patience_count > patience:
print("Early stopping.")
break
if ppl_model:
print("Calculating Test PPL.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ppl_nvdm))
print_ppx, print_ppx_perdoc, print_kld = self.run_epoch(
test_batches,
test_set,
test_count,
topic_params,
session
)
print('| Corpus Perplexity: {:.9f}'.format(print_ppx),
'| Per doc Perplexity: {:.5f}'.format(print_ppx_perdoc),
'| KLD: {:.5}'.format(print_kld))
with open(log_dir + "/logs_ppl_nvdm_pretrain.txt", "a") as f:
f.write('\n\nTest Corpus PPL: {:.9f} || Test Per doc PPL: {:.5f} || KLD Test: {:.5} |\n'.format(print_ppx, print_ppx_perdoc, print_kld))
if ir_model:
print("Calculating Test IR.")
self.pretrain_saver.restore(session, tf.train.latest_checkpoint(model_dir_ir_nvdm))
test_vectors_nvdm = self.hidden_vectors(
#dataset.batches_nvdm_LM('test_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label),
dataset.batches_nvdm_LM('test_nvdm_docs_non_replicated', topic_params.nvdm_batch_size, self.vocab_size, num_epochs=1, multilabel=topic_params.multilabel),
topic_params,
session
)
test_nvdm_ir, _ = eval.evaluate(
training_vectors_nvdm,
test_vectors_nvdm,
training_labels,
test_labels,
recall=[0.02],
num_classes=topic_params.nvdm_num_classes,
multi_label=topic_params.multilabel
)
test_nvdm_ir = test_nvdm_ir[0]
print("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
# logging information
with open(log_dir + "/logs_ir_nvdm_pretrain.txt", "a") as f:
f.write("Epoch: %i, Test NVDM IR: %s\n" %
(epoch_counter, test_nvdm_ir))
def hidden_vectors(self, data, topic_params, session):
vecs = []
for y, x, count, mask in data:
feed_dict = {
self.x.name: x,
self.mask.name: mask
#self.input_batch_size: x.shape[0]
}
vecs.extend(
session.run([self.last_h], feed_dict=feed_dict)[0]
)
return np.array(vecs)
def topic_dist(self, input_batches, input_set, input_doc_ids , input_count, topic_params, session):
topic_dist = []
mask_list = []
doc_id_list = []
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size, topic_params)
input_feed = {self.x.name: data_batch,
self.mask.name: mask}
doc_vec = session.run([self.doc_vec], input_feed)
topic_dist.extend(list(doc_vec[0]))
mask_list.extend(list(mask))
for idx in idx_batch:
if idx != -1:
doc_id_list.append(input_doc_ids[idx])
else:
doc_id_list.append(-1)
assert len(topic_dist) == len(doc_id_list)
topic_dist_unique = {}
for id, dist in zip(doc_id_list, topic_dist):
if id != -1:
topic_dist_unique[str(id)] = dist
"""
topic_dist_unique = []
for num, m in enumerate(mask_list):
if m!= 0.0:
topic_dist_unique.append(topic_dist[num])
topic_dist_unique = np.asarray(topic_dist_unique)
"""
return topic_dist_unique, mask_list
def save_to_s3_TM(self, topic_params):
pass
def run_epoch(self, input_batches, input_set, input_count, topic_params, session, optimizer=None):
loss_sum = 0.0
ppx_sum = 0.0
kld_sum = 0.0
word_count = 0
doc_count = 0
for idx_batch in input_batches:
data_batch, count_batch, mask = utils.fetch_data(
input_set, input_count, idx_batch, self.vocab_size, topic_params)
#import pdb; pdb.set_trace()
input_feed = {self.x.name: data_batch,
self.mask.name: mask}#,
#self.input_batch_size: data_batch.shape[0]
#}
if not optimizer is None:
_, (loss, kld) = session.run((optimizer,
[self.final_loss, self.kld]),
input_feed)
else:
loss, kld = session.run([self.final_loss, self.kld],
input_feed)
loss_sum += np.sum(loss)
kld_sum += np.sum(kld) / np.sum(mask)
word_count += np.sum(count_batch)
# to avoid nan error
count_batch = np.add(count_batch, 1e-12)
# per document loss
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
print_ppx = np.exp(loss_sum / word_count)
print_ppx_perdoc = np.exp(ppx_sum / doc_count)
print_kld = kld_sum/len(input_batches)
return print_ppx, print_ppx_perdoc, print_kld
def run_epoch_v2(self, data, topic_params, session):
# train_y, train_x, train_count, train_mask = dataset.batches_nvdm_LM(training_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
# val_y, val_x, val_count, val_mask = dataset.batches_nvdm_LM(validation_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
# test_y, test_x, test_count, test_mask = dataset.batches_nvdm_LM(test_data_filename_TM, topic_params.batch_size, topic_params.TM_vocab_length, num_epochs=1, multilabel=topic_params.multi_label)
kld_sum = []
this_nvdm_loss_normed = []
this_nvdm_loss_unnormed = []
this_nvdm_words = []
for nvdm_y, nvdm_x, nvdm_count, nvdm_mask in data:
nvdm_feed_dict = {
model.topic_model.x.name: nvdm_x,
model.topic_model.mask.name: nvdm_mask#,
#model.topic_model.input_batch_size: nvdm_x.shape[0]
}
if topic_params.supervised:
sys.exit()
else:
loss, kld = session.run([model.topic_model.final_loss,
model.topic_model.kld],
feed_dict=nvdm_feed_dict)
nvdm_count = np.add(nvdm_count, 1e-12)
this_nvdm_loss_normed.extend(np.divide(loss, nvdm_count))
this_nvdm_loss_unnormed.extend(loss)
this_nvdm_words.append(np.sum(nvdm_count))
kld_sum.append(np.sum(kld) / np.sum(nvdm_mask))
total_nvdm_nll = np.mean(this_nvdm_loss_unnormed)
#total_nvdm_ppl = np.exp(np.sum(this_nvdm_loss_unnormed) / np.sum(this_val_nvdm_words))
total_nvdm_ppl = np.exp(np.mean(this_nvdm_loss_normed))
print_kld = np.mean(kld_sum)
return total_nvdm_nll, total_nvdm_ppl, print_kld
| [] |
2024-01-10 | afonsobspinto/Master-Informatics-and-Computer-Engineering | ICT~src~topic_modeling~topic_modeling.py | import os
import pickle
import re
from datetime import datetime
from pprint import pprint
import gensim
import gensim.corpora as corpora
import pandas as pd
from gensim.models import CoherenceModel
from gensim.utils import simple_preprocess
import spacy as spacy
import pyLDAvis
import pyLDAvis.gensim
import matplotlib.pyplot as plt
from settings import MALLET_PATH, MODELS_PATH
from utils import ENGLISH_STOPWORDS, log
import matplotlib.colors as mcolors
from wordcloud import WordCloud
def sent_to_words(sentences):
for sentence in sentences:
yield gensim.utils.simple_preprocess(str(sentence), deacc=True) # deacc=True removes punctuations
def remove_stopwords(texts):
return [[word for word in simple_preprocess(str(doc)) if word not in ENGLISH_STOPWORDS] for doc in texts]
class TopicModeling:
def __init__(self, df, original_path):
self.df = df
self.original_path = original_path
self.data = df.drop_duplicates().tweet.values.tolist()
self.data_words = list(sent_to_words(self.data))
self._generate_models()
self._save_path()
self.lda = None
self.mod = None
self.df_topic_keywords = None
def _save_path(self):
self.id = re.sub(r'-| |:|\.', '_', str(datetime.now()))
self.save_path = f"{MODELS_PATH}/{self.id}"
os.makedirs(self.save_path)
def _generate_models(self):
data_words_nostops = remove_stopwords(self.data_words)
data_words_bigrams = self._make_bigrams(data_words_nostops)
self.data_lemmatized = self._lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
self.id2word = corpora.Dictionary(self.data_lemmatized)
texts = self.data_lemmatized
self.corpus = [self.id2word.doc2bow(text) for text in texts]
def model(self, method="mallet", num_topics=6, save=False):
log(f"Modeling with {num_topics} num_topics")
if method == "mallet":
self.mod = self._lda_mallet(num_topics)
else:
self.mod = self._lda_model(num_topics)
if save:
self.save_lda()
def _lda_mallet(self, num_topics):
# Download File: http://mallet.cs.umass.edu/dist/mallet-2.0.8.zip
self.lda = gensim.models.wrappers.LdaMallet(MALLET_PATH, corpus=self.corpus,
num_topics=num_topics, id2word=self.id2word)
return gensim.models.wrappers.ldamallet.malletmodel2ldamodel(self.lda)
def _lda_model(self, num_topics):
self.lda = gensim.models.ldamodel.LdaModel(corpus=self.corpus,
id2word=self.id2word,
num_topics=num_topics,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
return self.lda
def get_coherence(self):
# a measure of how good the model is. lower the better.
coherence_model_lda = CoherenceModel(model=self.lda, texts=self.data_lemmatized,
dictionary=self.id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
return coherence_lda
def _make_bigrams(self, texts):
bigram = gensim.models.Phrases(self.data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
bigram_mod = gensim.models.phrases.Phraser(bigram)
return [bigram_mod[doc] for doc in texts]
@staticmethod
def _lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
texts_out = []
nlp = spacy.load('en', disable=['parser', 'ner'])
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
def visualize(self, num_topics):
if self.mod and self.lda:
pprint(self.lda.print_topics())
ldavis_data_filepath = os.path.join(self.save_path + '/ldavis_prepared_' + str(num_topics)
+ "_" + self.id)
ldavis_prepared = pyLDAvis.gensim.prepare(self.mod, self.corpus, self.id2word)
with open(ldavis_data_filepath, 'wb') as f:
log("Dumping pyLDAvis")
pickle.dump(ldavis_prepared, f)
log("Saving pyLDAvis html")
pyLDAvis.save_html(ldavis_prepared, ldavis_data_filepath + '.html')
def compute_best_model(self, stop, start=2, step=3, show=True):
log("Computing best model")
coherence_values = []
model_list = []
for num_topics in range(start, stop, step):
self.model(num_topics=num_topics)
model_list.append(self.lda)
coherence_values.append(self.get_coherence())
best_index = coherence_values.index(max(coherence_values))
num_topics = range(start, stop, step)[best_index]
self.lda = model_list[best_index]
if show:
self.save_plot_coherence_scores(stop, start, step, coherence_values)
self.print_coherence_values(stop, start, step, coherence_values)
self.visualize(num_topics)
self.save_lda()
return num_topics
def save_lda(self):
log("Saving lda")
self.lda.save(f"{self.save_path}/lda.model")
def save_plot_coherence_scores(self, stop, start, step, coherence_values):
x = range(start, stop, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend("coherence_values", loc='best')
plt.savefig(f"{self.save_path}/{start}_{stop}_{step}.png")
@staticmethod
def print_coherence_values(stop, start, step, coherence_values):
x = range(start, stop, step)
for m, cv in zip(x, coherence_values):
print("Num Topics =", m, " has Coherence Value of", round(cv, 4))
def format_topics_sentences(self):
topics_df = pd.DataFrame()
# Get main topic in each document
for i, row in enumerate(self.lda[self.corpus]):
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = self.lda.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
topics_df = topics_df.append(pd.Series([int(topic_num), round(prop_topic, 4), topic_keywords]),
ignore_index=True)
else:
break
# Add original text to the end of the output
contents_ids = self._get_ids()
contents = pd.Series(self.data)
topics_df = pd.concat([topics_df, contents], axis=1)
topics_df = pd.concat([topics_df, contents_ids], axis=1)
topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic', 'Text', 'id']
return topics_df
def _get_ids(self):
cols = ['id', 'tweet', 'user', 'date']
original_data = pd.read_csv(self.original_path, names=cols)
data = pd.merge(original_data, self.df, on="id").drop_duplicates().id.values.tolist()
return pd.Series(data)
def save_dominant_topics_per_sentence(self):
log("Dominant topics per sentence")
df_topic_keywords = self.get_topic_keywords_table()
df_dominant_topic = df_topic_keywords.reset_index()
df_dominant_topic.to_csv(f"{self.save_path}/dominant_topics_per_sentence.csv", index=False)
log("Dominant topics per sentence saved")
def save_representative_sentence_per_topic(self):
log("Representative sentence per topic")
df_topic_keywords = self.get_topic_keywords_table()
topics_sorteddf_mallet = pd.DataFrame()
stopics_outdf_grpd = df_topic_keywords.groupby('Dominant_Topic')
for i, grp in stopics_outdf_grpd:
topics_sorteddf_mallet = pd.concat([topics_sorteddf_mallet,
grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], axis=0)
topics_sorteddf_mallet.reset_index(drop=True, inplace=True)
topics_sorteddf_mallet.to_csv(f"{self.save_path}/representative_sentence_per_topic.csv", index=False)
log("Representative sentence per topic saved")
def get_topic_keywords_table(self):
if self.df_topic_keywords is None:
self.df_topic_keywords = self.format_topics_sentences()
return self.df_topic_keywords
def save_word_cloud(self, num_topics):
pages = int(num_topics / 6)
topics = self.mod.show_topics(formatted=False, num_topics=num_topics)
index = 0
for i in range(0, pages):
cols = [color for name, color in mcolors.TABLEAU_COLORS.items()]
cloud = WordCloud(stopwords=ENGLISH_STOPWORDS,
background_color='white',
width=2500,
height=1800,
max_words=10,
colormap='tab10',
color_func=lambda *args, **kwargs: cols[i],
prefer_horizontal=1.0)
fig, axes = plt.subplots(3, 2, figsize=(10, 10), sharex=True, sharey=True)
for j, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words = dict(topics[index][1])
to_del = []
for key, value in topic_words.items():
if value == 0.0:
to_del.append(key)
for k in to_del:
del topic_words[k]
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(index), fontdict=dict(size=16))
plt.gca().axis('off')
index += 1
plt.subplots_adjust(wspace=0, hspace=0)
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
plt.savefig(f"{self.save_path}/wordcloud{i}.png")
| [] |
2024-01-10 | nueramic/customer_feedback_platform | src~ai_recognition~chat_gpt_requests.py | import json
from sqlalchemy.engine import Engine
import pandas as pd
import os
import openai
from datetime import datetime
import toml
class AnalyzeFeedback:
def __init__(self, pg_conn: Engine, openai_api_key: str):
openai.api_key = openai_api_key
self.pg_conn = pg_conn
self.id_feedback = ''
self.chat_response = ''
self.gpt_config = toml.load('gpt_message_config.toml')
def analyze_feedback(self,
id_feedback: str,
text_feedback: str,
rating: str = 1,
max_rating: str = 1
):
"""
:param id_feedback:
:param text_feedback:
:param rating:
:param max_rating:
:return:
"""
message = f"""
{self.gpt_config['gpt_role']['instruction']}
отзыв: \n {text_feedback[:5000]}
оценка: {rating} / {max_rating}
"""
messages = [{'role': 'assistant', 'content': message}]
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages, temperature=0.2
)
self.id_feedback: str = id_feedback
self.chat_response: str = chat.choices[0].message.content
def save_to_table(self):
"""
insert into table gpt response
"""
try:
resp_json = json.dumps(json.loads(self.chat_response), ensure_ascii=False)
df = pd.DataFrame(
{
'id_feedback': [self.id_feedback],
'json_gpt_resp_content': [resp_json],
'dtime_updated': [datetime.now()]
})
df.to_sql('ai_responses', self.pg_conn, schema='prod', index=False, if_exists='append')
return resp_json
except Exception as e:
print(e)
| [
"f\"\"\"\n {self.gpt_config['gpt_role']['instruction']}\n отзыв: \\n {text_feedback[:5000]} \n оценка: {rating} / {max_rating}\n "
] |
2024-01-10 | Richard-Gidi/app | Price_Estimator.py | #!/usr/bin/env python
# coding: utf-8
##IMPORTING RELEVANT VARIABLES
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import seaborn as sns
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
import plotly.express as px
import streamlit as st
import openai
from datetime import date, timedelta
from streamlit_option_menu import option_menu
import plotly.graph_objects as go
import plotly.express as px
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
st.set_option('deprecation.showPyplotGlobalUse', False)
# Calculate the start and end dates
#end_date_ = date.today()
#start_date_ = end_date_ - timedelta(days=1)
# Format the dates as strings in "YYYY-MM-DD" format
#start_date_str_ = start_date_.strftime("%Y-%m-%d")
#end_date_str_ = end_date_.strftime("%Y-%m-%d")
# Set up OpenAI API credentials
openai.api_key = st.secrets["auth_key"]
#!/usr/bin/env python
# coding: utf-8
def upload_file():
uploaded_file = st.file_uploader("Choose a file")
if uploaded_file is not None:
data = pd.read_excel(uploaded_file, sheet_name=0, parse_dates=True, header=1)
#data = data.drop(columns=['Unnamed: 5', 'Unnamed: 6'])
data = data.dropna()
return data
def visualize_data(data):
st.subheader("Data Visualization")
columns = list(data.columns)
plt.rcParams["figure.figsize"] = [18, 10]
plt.rcParams["figure.autolayout"] = True
selected_columns = st.multiselect("Select columns to visualize", columns)
if len(selected_columns) > 0:
chart_type = st.selectbox("Select chart type", ["Line Plot", "Bar Plot", "Scatter Plot"])
if chart_type == "Bar Plot":
for column in selected_columns:
plt.bar(data.index, data[column], label=column)
plt.xlabel("Date")
plt.ylabel("Price")
plt.legend()
st.pyplot()
elif chart_type == "Line Plot":
fig = px.line(data, x="Date", y=selected_columns)
st.plotly_chart(fig)
elif chart_type == "Scatter Plot":
fig = px.scatter(data, x="Date", y=selected_columns)
st.plotly_chart(fig)
# Perform time series forecasting on selected_columns
for column in selected_columns:
# Split the data into train and test sets
train_data = data[column].iloc[:-15]
test_data = data[column].iloc[-15:]
# Define exogenous variables if available
exog_train = None # Modify this with your exogenous variables for the training set
exog_test = None # Modify this with your exogenous variables for the test set
# Convert the index to a DatetimeIndex
train_data.index = pd.to_datetime(train_data.index)
test_data.index = pd.to_datetime(test_data.index)
# Fit a SARIMA model
model = SARIMAX(train_data, order=(0, 0, 0), seasonal_order=(1, 0, 0, 12), exog=exog_train)
model_fit = model.fit()
# Forecast future values
forecast = model_fit.get_forecast(steps=15, exog=exog_test)
# Extract the forecasted values and confidence intervals
forecast_values = forecast.predicted_mean
confidence_intervals = forecast.conf_int()
# Convert confidence_intervals to DataFrame
confidence_intervals_df = pd.DataFrame(confidence_intervals, index=test_data.index)
# Plot the forecast
plt.plot(test_data.index, test_data, label="Actual")
plt.plot(test_data.index, forecast_values, label="Forecast")
plt.fill_between(test_data.index, confidence_intervals_df.iloc[:, 0], confidence_intervals_df.iloc[:, 1], alpha=0.3)
plt.xlabel("Date")
plt.ylabel("Price")
plt.legend()
st.pyplot()
# Function to handle user queries using ChatGPT
def handle_chatbot(query, data):
# ChatGPT API call
response = openai.Completion.create(
engine="text-davinci-003",
prompt=query,
max_tokens=50,
n=1,
stop=None,
temperature=0.7,
presence_penalty=0.2,
frequency_penalty=0.0,
)
return response.choices[0].text.strip()
def main():
st.set_page_config(page_title='Price Estimator')
st.sidebar.title("Main Menu")
selected = st.sidebar.selectbox("Select Option", ["Welcome", "Upload", "Estimator","Visualize",'Chatbot'])
if selected == 'Welcome':
st.write("# Welcome to Gidi's Price Estimator!👋")
st.markdown("""This web app was developed by Gidi Richard to help estimate oil prices for a coming window
given the price of the current window.""")
elif selected == "Visualize":
st.subheader('Visualize Data')
data = upload_file()
if data is None:
st.warning('Please upload a file first.')
return
visualize_data(data)
elif selected == 'Upload':
st.subheader('Upload Data')
data = upload_file()
if data is not None:
st.success('File uploaded successfully!')
elif selected == 'Estimator':
st.subheader('Price Estimator')
data = upload_file()
if data is None:
st.warning('Please upload a file first.')
return
data['date'] = pd.to_datetime(data['Date'])
st.subheader('OLD PRICING WINDOW')
start_date = st.date_input(label='Starting Date', format="YYYY-MM-DD").strftime('%Y-%m-%d')
end_date = st.date_input(label='End Date', format="YYYY-MM-DD").strftime('%Y-%m-%d')
date_range = data.loc[(data['date'] >= start_date) & (data['date'] <= end_date)]
df_columns = date_range[['Gasoline', 'Naphtha', 'Gasoil', 'LPG']]
data1 = df_columns.mean()
data1 = data1.reset_index()
data1 = data1.rename(columns={'index': 'Product', 0: 'Average'})
st.subheader('NEW PRICING WINDOW')
start_date2 = st.date_input(label='New Starting Date', format="YYYY-MM-DD").strftime('%Y-%m-%d')
end_date2 = st.date_input(label='New Ending Date', format="YYYY-MM-DD").strftime('%Y-%m-%d')
date_range2 = data.loc[(data['date'] >= start_date2) & (data['date'] <= end_date2)]
df_columns2 = date_range2[['Gasoline', 'Naphtha', 'Gasoil', 'LPG']]
data2 = df_columns2.mean()
data2 = data2.reset_index()
data2 = data2.rename(columns={'index': 'Product', 0: 'New Average'})
result = pd.concat([data1, data2], axis=1)
new_data = result.loc[:, ~result.T.duplicated(keep='first')]
new = new_data.T
new = new.reset_index()
new = new.drop('index', axis=1)
new = new.rename(columns={1: 'Naphtha', 0: 'Gasoline', 2: 'Gasoil', 3: 'LPG'})
new = new.drop(0)
final = new.pct_change().dropna()
st.subheader('CALCULATOR')
product = st.selectbox('Select Product', options=final.columns)
price = st.number_input(label='Current Price')
calculate_conversion = st.checkbox('Calculate GHS/L conversion')
if calculate_conversion:
volume_gasoil = 1180
volume_gasoline = 1300
volume_naphtha = 1351.35
volume_lpg = 1724.14
volume = None
if product == 'Gasoil':
volume = volume_gasoil
elif product == 'Gasoline':
volume = volume_gasoline
elif product == 'Naphtha':
volume = volume_naphtha
elif product == 'LPG':
volume = volume_lpg
else:
volume = 1.0
fx_rate = st.number_input(label='FX Rate')
if fx_rate is not None and volume is not None:
ghs_per_liter = ((new[product].values[1] + 80) / volume) * fx_rate
st.write(f'The GHS/L conversion for {product} is {ghs_per_liter:.2f}')
submit = st.button('Submit')
if submit:
percentage_change = final[product].values[0]
if product == 'Gasoil':
estimated_price = (percentage_change * price) + price
else:
estimated_price = (percentage_change * price) + price
st.write(f'The estimated price of {product} is Ghc {estimated_price:.2f}')
if percentage_change < 0:
st.write(f'The price of {product} has reduced by a percentage of {percentage_change * 100:.2f}')
else:
st.write(f'The price of {product} has increased by a percentage of {percentage_change * 100:.2f}')
elif selected == 'Chatbot':
st.subheader('Chatbot')
data = upload_file()
if data is None:
st.warning('Please upload a file first.')
return
query = st.text_input("Ask a question")
if query:
response = handle_chatbot(query, data)
st.write("Chatbot: ", response)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | ustayready/DirectAI | direct.py | import openai
import argparse
import sys
import os
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
parser = argparse.ArgumentParser(description='Directly query ChatGPT using the API')
parser.add_argument('--key', type=str, help='OpenAI API key')
openai.api_key = os.getenv('OPENAI_API_KEY')
def main(args):
if not openai.api_key and not args.key:
parser.error('--key or OPENAI_API_KEY environment variable is required.')
elif not openai.api_key:
openai.api_key = args.key
main_prompt()
def main_prompt():
text = prompt('> ')
if text == 'quit':
sys.exit(1)
elif text == 'clear':
os.system('clear')
main_prompt()
else:
query_chatgpt(text)
def query_chatgpt(query):
prompt = query
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.5,
max_tokens=500,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stream=False,
)
response = response.choices[0]['text'].strip()
print(f'\n{response}\n')
main_prompt()
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| [] |
2024-01-10 | CogComp/Zero_Shot_Schema_Induction | gpt2_flat.py | import os
import openai
#openai.api_key = "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"
#openai.api_key = "sk-tP9LtUEWkDAn9AuhdZuohNGjZnMjWEX2b7NBzPeP"
openai.api_key = "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"
import requests
import json
def SRL(text):
headers = {'Content-type':'application/json'}
SRL_response = requests.post('http://dickens.seas.upenn.edu:4039/annotate', json={"sentence": text}, headers=headers)
if SRL_response.status_code != 200:
print("SRL_response:", SRL_response.status_code)
try:
SRL_output = json.loads(SRL_response.text)
predicates = []
for view in SRL_output['views']:
if view['viewName'] in ['SRL_ONTONOTES', 'SRL_NOM_ALL']:
for constituent in view['viewData'][0]['constituents']:
if constituent['label'] == 'Predicate':
predicate = {}
predicate['predicate'] = constituent['properties']['predicate']
predicate['SenseNumber'] = constituent['properties']['SenseNumber']
predicate['sense'] = constituent['properties']['sense']
predicate['viewName'] = view['viewName']
predicates.append(predicate)
else:
predicates[-1][constituent['label']] = ' '.join(SRL_output['tokens'][constituent['start']:constituent['end']])
return predicates
except:
return []
import json
import requests
API_TOKEN = "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"
API_URL = "https://api-inference.huggingface.co/models/gpt2"
headers = {"Authorization": f"Bearer {API_TOKEN}"}
def query(payload):
data = json.dumps(payload)
response = requests.request("POST", API_URL, headers=headers, data=data)
return json.loads(response.content.decode("utf-8"))
def call_gpt2(prompt, event, n, temperature, max_length, presence_penalty, headline = None, subtopic = 0):
if not prompt:
if subtopic:
prompt="Subtopics of " + event + " are:\n\n1."
else:
if headline:
prompt="Write a news story titled \"" + headline + "\""
print("--- Generating text for '" + headline + "' ...")
else:
prompt="Write a news headline about " + event + ", \""
print("--- Generating headlines for '" + event + "' ...")
print("--- prompt:", prompt)
data = query(
{
"inputs": prompt,
"parameters": {"max_length": max_length,
"num_return_sequences": n,
},
}
)
return_text = []
for gt in data:
try:
return_text.append(gt['generated_text'].replace(prompt, ''))
except:
continue
return return_text, None
import nltk
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
class schema:
def __init__(self, name, hierarchy_num, generator, headline_num = 1, news_per_headline = 15, HowTo_num = 15):
self.name = name
self.hierarchy_num = hierarchy_num
self.generator = generator # call_openai_api
self.temperature = 0.9
self.stop = None
self.presence_penalty = 0.1
self.headline_num = headline_num
self.news_per_headline = news_per_headline
self.HowTo_num = HowTo_num
self.hierarchy = {name: {'subtopics': [], 'text': {}}}
def subtopic_gen(self, topic):
texts, response = self.generator(None, topic, 1, self.temperature, 64, self.presence_penalty, headline = None, subtopic = 1)
print("printing within subtopic_gen():", texts[0])
predicates = SRL(texts[0].replace('\n', ' '))
subtopics = set()
for predicate in predicates:
if len(subtopics) <= 4 and predicate['predicate'] not in stop_words:
if 'ARG1' in predicate.keys():
subtopics.add(predicate['predicate'] + ' ' + predicate['ARG1'] + " (" + topic + ")")
else:
subtopics.add(predicate['predicate'] + " (" + topic + ")")
return subtopics
def text_gen_helper(self, event, mode):
# mode 1: direct generation for steps
# mode 2: news-style text generation
# mode 3: how-to article generation
if mode == 1:
prompt = "Write essential steps for " + event + ":\n\n1."
texts, response = self.generator(prompt, event, 1, self.temperature, 256, self.presence_penalty)
return texts
if mode == 2:
news = []
headlines, response = self.generator(None, event, self.headline_num, self.temperature, 64, self.presence_penalty)
for headline in headlines:
end = headline.find("\"")
headline = headline[:end]
texts, response = self.generator(None, event, self.news_per_headline, self.temperature, 256, self.presence_penalty, headline)
for text in texts:
news.append(headline + ' ' + text)
return news
if mode == 3:
prompt = "How to make " + event
texts, response = self.generator(prompt, event, self.HowTo_num, self.temperature, 256, self.presence_penalty)
return texts
def text_gen(self, event):
return {'steps': self.text_gen_helper(event, 1),
'news': self.text_gen_helper(event, 2),
'HowTo': self.text_gen_helper(event, 3)}
def learning_corpus_gen(self):
if self.hierarchy_num >= 1:
self.hierarchy[self.name]['text'] = self.text_gen(self.name)
if self.hierarchy_num >= 2:
subtopics = self.subtopic_gen(self.name)
for subtopic in subtopics:
print("%%% subtopic of", self.name, ":", subtopic)
st_dict = {'subtopics': []}
st_dict['text'] = self.text_gen(subtopic)
self.hierarchy[self.name]['subtopics'].append({subtopic: st_dict})
if self.hierarchy_num == 3:
subsubtopics = self.subtopic_gen(subtopic)
for subsubtopic in subsubtopics:
sub_st_dict = {'subtopics': []}
sub_st_dict['text'] = self.text_gen(subsubtopic)
self.hierarchy[self.name]['subtopics'][-1][subtopic]['subtopics'].append({subsubtopic: sub_st_dict})
def print_hierarchy(self):
for i in self.hierarchy.keys():
print(i)
for subtopic in self.hierarchy[i]['subtopics']:
for j in subtopic.keys():
print(j)
for subsubtopic in subtopic[j]['subtopics']:
for k in subsubtopic.keys():
print(k)
from os import listdir
from os.path import isfile, join
dir_name = "/shared/kairos/Data/LDC2020E25_KAIROS_Schema_Learning_Corpus_Phase_1_Complex_Event_Annotation_V4/docs/ce_profile"
onlyfiles = [f for f in listdir(dir_name) if isfile(join(dir_name, f)) and f[-4:] == ".txt"]
scenarios = ['Bombing Attacks', 'Pandemic Outbreak', 'Civil Unrest', 'International Conflict', 'Disaster and Rescue', 'Terrorism Attacks', 'Election', 'Sports Games', 'Kidnapping', 'Business Change', 'Mass Shooting']
for f in onlyfiles:
scenarios.append(" ".join(f.split("_")[2:-1]))
print(len(scenarios))
model = 'gpt2'
hier = 1
generated_text = {}
import pickle
'''
with open("generated_text/2022-06-10.pkl", 'wb') as f:
for scenario in scenarios:
s = schema(scenario, hier, call_gpt2)
s.learning_corpus_gen()
generated_text[scenario] = s.hierarchy[scenario]['text']['news']
generated_text[scenario] += s.hierarchy[scenario]['text']['HowTo']
pickle.dump(generated_text, f)
'''
with open('generated_text/2022-06-10.pkl', 'rb') as f:
gt = pickle.load(f)
print(len(gt))
f_11 = open('generated_text/2022-06-11.pkl', 'wb')
print(set(scenarios).difference(set(list(gt.keys()))))
for scenario in scenarios:
if scenario in gt.keys():
continue
else:
print(scenario)
s = schema(scenario, hier, call_gpt2)
s.learning_corpus_gen()
generated_text[scenario] = s.hierarchy[scenario]['text']['news']
generated_text[scenario] += s.hierarchy[scenario]['text']['HowTo']
pickle.dump(generated_text, f_11)
f_11.close()
| [
"Write essential steps for PLACEHOLDER:\n\n1.",
"How to make PLACEHOLDER",
"Write a news story titled \"PLACEHOLDER\"",
"Write a news headline about PLACEHOLDER, \"",
"Subtopics of PLACEHOLDER are:\n\n1."
] |
2024-01-10 | CogComp/Zero_Shot_Schema_Induction | Information_Extractor.py | import requests
import os
from os import listdir
from os.path import isfile, join
import json
import argparse
from timeit import default_timer as timer
import time
from datetime import timedelta
from pprint import pprint
import multiprocessing
from multiprocessing import Pool, cpu_count, Manager, Process
import pickle
import networkx as nx
import matplotlib.pyplot as plt
from sentence_transformers import SentenceTransformer, util
import openai
import string
import re
import os.path
from os import path
# Read the list of phrasal verbs
with open("complete-pv/Complete-PV-list.txt") as f:
lines = f.readlines()
phrasal_verbs = {}
verbs = set()
for line in lines:
if re.search('.[A-Z].', line.strip()):
if not re.search('.[A-Z][A-Z].', line.strip()):
end = re.search('.[A-Z].', line.strip()).start()
tmp_line = line[0:end]
words = tmp_line.strip().split(" ")
else:
words = line.strip().split(" ")
if len(words) > 1 and len(words) < 4:
if words[0][0].isupper() and words[-1][-1] not in string.punctuation and words[-1][0] not in string.punctuation:
lower_words = []
for word in words:
lower_words.append(word.lower())
if lower_words[0] not in phrasal_verbs.keys():
phrasal_verbs[lower_words[0]] = {" ".join(lower_words)}
else:
phrasal_verbs[lower_words[0]].add(" ".join(lower_words))
# This is a sentence-transformers model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search.
model = SentenceTransformer('all-MiniLM-L6-v2')
#manager = Manager()
#IE_output = manager.list()
# if not specified, start and end denote the word id at the doc level
# "start_sent_level" denotes the start word id at the sentence level
def view_map_update(output):
count = 0
view_map = {}
for view in output['views']:
view_map[view['viewName']] = count
count += 1
return view_map
def sent_id_getter(token_id, SRL_output):
i = -1
for sEP in SRL_output['sentences']['sentenceEndPositions']:
i += 1
if token_id < sEP:
return i
#raise ValueError("Cannot find sent_id.")
return i + 1 # NER tokenizer may differ from SRL tokenizer
def read_doc(fname):
tag_list = []
with open(fname) as f:
lines = f.readlines()
for line in lines:
tag_list.append(line.split(' - ')[0])
return tag_list
def CP_getter(sentence):
# Constituency Parsing
headers = {'Content-type':'application/json'}
CP_response = requests.post('http://127.0.0.1:6003/annotate', json={"text": sentence}, headers=headers)
if CP_response.status_code != 200:
print("CP_response:", CP_response.status_code)
result = json.loads(CP_response.text)
return result
def relation_preparer(SRL_output):
new_output = {'corpusId': SRL_output['corpusId'],
'id': SRL_output['id'],
'sentences': SRL_output['sentences'],
'text': SRL_output['text'],
'tokens': SRL_output['tokens'],
'views': []
}
for view in SRL_output['views']:
my_view = {}
if view['viewName'] == 'Event_extraction':
my_view['viewName'] = view['viewName']
my_view['viewData'] = [{'viewType': 'edu.illinois.cs.cogcomp.core.datastructures.textannotation.PredicateArgumentView',
'viewName': 'event_extraction',
'generator': 'cogcomp_kairos_event_ie_v1.0',
'score': 1.0,
'constituents': view['viewData'][0]['constituents'],
'relations': view['viewData'][0]['relations'],
}]
new_output['views'].append(my_view)
return new_output
def temporal_getter(SRL_output, onepass = 1):
headers = {'Content-type':'application/json'}
#if onepass:
if True:
temporal_service = 'http://localhost:6009/annotate'
#else:
# temporal_service = 'http://dickens.seas.upenn.edu:4024/annotate'
print("Calling service from " + temporal_service)
temporal_response = requests.post(temporal_service, json=SRL_output, headers=headers)
if temporal_response.status_code != 200:
print("temporal_response:", temporal_response.status_code)
try:
result = json.loads(temporal_response.text)
return result
except:
return None
def subevent_getter(SRL_output):
headers = {'Content-type':'application/json'}
subevent_response = requests.post('http://localhost:6004/annotate', json=SRL_output, headers=headers)
if subevent_response.status_code != 200:
print("subevent_response:", subevent_response.status_code)
try:
result = json.loads(subevent_response.text)
return result
except:
return None
def coref_getter(SRL_output):
# Note: coref service is not provided in this repo
headers = {'Content-type':'application/json'}
coref_response = requests.post('http://localhost:8888/annotate', json=SRL_output, headers=headers)
if coref_response.status_code != 200:
print("coref_response:", coref_response.status_code)
try:
result = json.loads(coref_response.text)
return result
except:
return None
def extract_head_noun(children):
Clause_Level = read_doc('CP_Clause_Level.txt')
Phrase_Level = read_doc('CP_Phrase_Level.txt')
Word_Level = read_doc('CP_Word_Level.txt')
num_c = len(children)
child_index = -1
for child in children:
child_index += 1
if child['nodeType'] in Word_Level:
if child['nodeType'] in ['NN', 'NNS', 'NNP', 'NNPS', 'PRP', 'PRP$']:
next_index = child_index+1
if next_index < num_c:
if children[next_index]['nodeType'] not in ['NN', 'NNS', 'NNP', 'NNPS', 'PRP', 'PRP$']:
return child['word']
else:
while children[next_index]['nodeType'] in ['NN', 'NNS', 'NNP', 'NNPS', 'PRP', 'PRP$']:
next_index += 1
if next_index >= num_c:
break
return children[next_index-1]['word']
else:
return child['word']
elif child['nodeType'] in Phrase_Level:
if 'NP' in child['attributes']:
# we are not interested in the extraction of any nouns in the query,
# but only those that appear within the NP component,
# e.g., NP -> NP + VP (VP -> POS + NP), you cannot let the function search within VP
return extract_head_noun(child['children'])
elif child['nodeType'] in Clause_Level:
return extract_head_noun(child['children'])
else:
#print("extract_head_noun:", child['nodeType'], "is not in any list")
#print("child:", child)
pass
def similar(string1, string2):
if string2 in string1 and len(string1) - len(string2) <= 2:
#print("similar:", string1, string2)
return True
else:
return False
def find(children, query):
# return value is a dict or None
for child in children:
if child['word'] == query or similar(child['word'], query):
return child
else:
if 'children' in child.keys():
result = find(child['children'], query)
if type(result) == dict:
return result
return None
def head_word_extractor(CP_result, query):
children = CP_result['hierplane_tree']['root']['children']
target_child = find(children, query)
try:
if 'children' in target_child.keys(): # target_child can be None, so it might have no keys
return extract_head_noun(target_child['children'])
else:
return target_child['word']
except:
#print("Did not find '", query, "' in Constituency Parsing result")
return None
def entity_info_getter(query, sent_id, entities):
if sent_id in entities:
for entity in entities[sent_id]:
if query in entity['mention']:
return entity['label'], ' '.join(entity['mention']), entity['start'], entity['end']
else:
#print("NER module detected no entity in the {i}-th sentence".format(i=sent_id))
return None
def event_extractor(text, text_id, NOM=True):
if text == '':
return {}
headers = {'Content-type':'application/json'}
SRL_response = requests.post('http://dickens.seas.upenn.edu:4039/annotate', json={"sentence": text}, headers=headers)
if SRL_response.status_code != 200:
print("SRL_response:", SRL_response.status_code)
try:
SRL_output = json.loads(SRL_response.text)
except:
return {}
token_num = len(SRL_output['tokens'])
if token_num not in SRL_output['sentences']['sentenceEndPositions']:
SRL_output['sentences']['sentenceEndPositions'].append(token_num)
print("SRL done")
headers = {'Content-type':'application/json'}
NER_response = requests.post('http://dickens.seas.upenn.edu:4022/ner/', json={"task": "kairos_ner","text" : text}, headers=headers)
if NER_response.status_code != 200:
print("NER_response:", NER_response.status_code)
try:
NER_output = json.loads(NER_response.text)
NER_view_map = view_map_update(NER_output)
print("NER done")
except:
print("NER result empty")
assert 0 == 1
entities = {}
for mention in NER_output['views'][NER_view_map['NER_CONLL']]['viewData'][0]['constituents']:
sent_id = sent_id_getter(mention['start'], SRL_output)
# TODO: Check whether SRL tokenizer is the same as NER's
entity = {'mention': NER_output['tokens'][mention['start']:mention['end']], \
'label': mention['label'], \
'start': mention['start'], \
'end': mention['end'], \
'sentence_id': sent_id, \
}
if sent_id in entities.keys():
entities[sent_id].append(entity)
else:
entities[sent_id] = [entity]
'''Append NER results to SRL'''
SRL_output['views'].append(NER_output['views'][NER_view_map['NER_CONLL']])
SRL_view_map = view_map_update(SRL_output)
#print(SRL_view_map)
CP_output = []
pEP = 0
for sEP in SRL_output['sentences']['sentenceEndPositions']:
this_sentence = " ".join(SRL_output['tokens'][pEP:sEP])
pEP = sEP
CP_output.append(CP_getter(this_sentence))
if SRL_output['sentences']['sentenceEndPositions'][-1] < len(SRL_output['tokens']):
this_sentence = " ".join(SRL_output['tokens'][SRL_output['sentences']['sentenceEndPositions'][-1]:])
CP_output.append(CP_getter(this_sentence))
print("CP done")
Events = []
argument_ids = []
if NOM:
source = ['SRL_ONTONOTES', 'SRL_NOM']
else:
source = ['SRL_ONTONOTES']
for viewName in source:
for mention in SRL_output['views'][SRL_view_map[viewName]]['viewData'][0]['constituents']:
sent_id = sent_id_getter(mention['start'], SRL_output)
mention_id_docLevel = str(text_id) + '_' + str(sent_id) + '_' + str(mention['start'])
if mention['label'] == 'Predicate':
if sent_id == 0:
start = mention['start']
end = mention['end']
else:
start = mention['start'] - SRL_output['sentences']['sentenceEndPositions'][sent_id-1] # event start position in the sentence = event start position in the document - offset
end = mention['end'] - SRL_output['sentences']['sentenceEndPositions'][sent_id-1]
event_id = str(text_id) + '_' + str(sent_id) + '_' + str(start)
predicate = ''
if mention['properties']['predicate'] in phrasal_verbs.keys() and mention['start'] < len(SRL_output['tokens']) - 2:
next_token = SRL_output['tokens'][mention['start'] + 1]
token_after_next = SRL_output['tokens'][mention['start'] + 2]
potential_pv_1 = " ".join([mention['properties']['predicate'], next_token, token_after_next])
#print(potential_pv_1)
potential_pv_2 = " ".join([mention['properties']['predicate'], next_token])
#print(potential_pv_2)
if potential_pv_2 in phrasal_verbs[mention['properties']['predicate']]:
predicate = potential_pv_2
print(predicate)
if potential_pv_1 in phrasal_verbs[mention['properties']['predicate']]:
predicate = potential_pv_1
print(predicate)
if predicate == '':
predicate = mention['properties']['predicate']
else:
predicate = mention['properties']['predicate']
try:
assert mention['start'] != None
assert mention['end'] != None
Events.append({'event_id': event_id, \
'event_id_docLevel': mention_id_docLevel, \
'start': mention['start'], \
'end': mention['end'], \
'start_sent_level': start, \
'end_sent_level': end, \
'properties': {'predicate': [mention['properties']['predicate']], \
'SenseNumber': '01', \
'sentence_id': sent_id
}, \
'label': predicate
})
except:
print("mention with None start or end:", mention)
pass
else:
start = mention['start'] # document level position
end = mention['end']
query = ' '.join(SRL_output['tokens'][start:end]).strip()
ENTITY_INFO = entity_info_getter(query, sent_id, entities)
if mention['label'] in Events[-1]['properties'].keys():
count = 1
for label in Events[-1]['properties'].keys():
if '_' in label and label.split('_')[0] == mention['label']:
count += 1
arg_label = mention['label'] + '_' + str(count)
else:
arg_label = mention['label']
if ENTITY_INFO:
# the argument found by SRL is directly an entity detected by NER
Events[-1]['properties'][arg_label] = {'entityType': ENTITY_INFO[0], \
'mention': ENTITY_INFO[1], \
'start': ENTITY_INFO[2], \
'end': ENTITY_INFO[3], \
'argument_id': str(text_id) + '_' + str(sent_id) + '_' + str(ENTITY_INFO[2]), \
}
argument_ids.append(str(text_id) + '_' + str(sent_id) + '_' + str(ENTITY_INFO[2]))
else:
# the argument found by SRL might be a phrase / part of clause, hence head word extraction is needed
head_word = head_word_extractor(CP_output[sent_id], query)
if head_word:
ENTITY_INFO = entity_info_getter(head_word, sent_id, entities)
if ENTITY_INFO:
# if the head word is a substring in any entity mention detected by NER
Events[-1]['properties'][arg_label] = {'entityType': ENTITY_INFO[0], \
'mention': ENTITY_INFO[1], \
'start': ENTITY_INFO[2], \
'end': ENTITY_INFO[3], \
'argument_id': str(text_id) + '_' + str(sent_id) + '_' + str(ENTITY_INFO[2]), \
}
argument_ids.append(str(text_id) + '_' + str(sent_id) + '_' + str(ENTITY_INFO[2]))
else:
Events[-1]['properties'][arg_label] = {'mention': head_word, 'entityType': 'NA', 'argument_id': mention_id_docLevel} # actually not exactly describing its position
argument_ids.append(mention_id_docLevel)
else:
Events[-1]['properties'][arg_label] = {'mention': query, 'entityType': 'NA', 'argument_id': mention_id_docLevel}
argument_ids.append(mention_id_docLevel)
print("head word extraction done")
"""
Can directly go to the Events_final if ignoring event typing (line 441, before '''Append Event Typing Results to SRL''')
#Events_with_arg = [event for event in Events if len(event['properties']) > 3]
#Events_non_nom = [event for event in Events_with_arg if event['event_id_docLevel'] not in argument_ids]
#print("Removal of nominal events that serve as arguments of other events")
#for event in Events_non_nom:
for event in Events:
sent_id = int(event['event_id'].split('_')[1]) # 0-th: text_id 1-st: sent_id 2-nd: event_start_position_in_sentence
if sent_id < len(SRL_output['sentences']['sentenceEndPositions']):
sEP = SRL_output['sentences']['sentenceEndPositions'][sent_id] # sEP: sentence End Position
if sent_id == 0:
tokens = SRL_output['tokens'][0:sEP]
else:
pEP = SRL_output['sentences']['sentenceEndPositions'][sent_id-1] # pEP: previous sentence End Position
tokens = SRL_output['tokens'][pEP:sEP]
else:
pEP = SRL_output['sentences']['sentenceEndPositions'][-1]
tokens = SRL_output['tokens'][pEP:]
event_sent = " ".join(tokens)
if event_sent[-1] != '.':
event_sent = event_sent + '.'
headers = {'Content-type':'application/json'}
#ET_response = requests.post('http://dickens.seas.upenn.edu:4036/annotate', json={"tokens": tokens, "target_token_position": [event['start_sent_level'], event['end_sent_level']]}, headers=headers)
ET_response = requests.post('http://leguin.seas.upenn.edu:4023/annotate', json={"text": event_sent}, headers=headers)
if ET_response.status_code != 200:
print("ET_response:", ET_response.status_code)
try:
ET_output = json.loads(ET_response.text)
for view in ET_output['views']:
if view['viewName'] == 'Event_extraction':
for constituent in view['viewData'][0]['constituents']:
if constituent['start'] == event['start_sent_level']:
event['label'] = constituent['label']
#try:
# event['label'] = ET_output['predicted_type']
except:
event['label'] = "NA"
print("-------------------------------- Event Typing result: NA! --------------------------------")
print("the sentence is: " + event_sent)
print("the event is: " + event['properties']['predicate'][0])
Events_non_reporting = [event for event in Events if event['label'] not in ['NA', 'Reporting', 'Statement'] and event['properties']['predicate'][0] not in ["be", "have", "can", "could", "may", "might", "must", "ought", "shall", "will", "would", "say", "nee", "need", "do", "happen", "occur"]]
print("event typing done, removed 'be', Reporting, Statement, NA events")
print("event num:", len(Events_non_reporting))
#print(Events[0])
# remove repeated events
event_types = []
Events_final = []
for event in Events_non_reporting:
if event['label'] not in event_types:
Events_final.append(event)
event_types.append(event['label'])
print("num of events with different types:", len(Events_final))
"""
Events_final = [event for event in Events if event['label'] not in ["be", "have", "can", "could", "may", "might", "must", "ought", "shall", "will", "would", "say", "nee", "need", "do", "happen", "occur"]]
'''Append Event Typing Results to SRL'''
Event_Extraction = {'viewName': 'Event_extraction', \
'viewData': [{'viewType': 'edu.illinois.cs.cogcomp.core.datastructures.textannotation.PredicateArgumentView', \
'viewName': 'event_extraction', \
'generator': 'Event_ONTONOTES+NOM_MAVEN_Entity_CONLL02+03', \
'score': 1.0, \
'constituents': Events_final, \
'relations': []
}]
}
#pprint(Events_final)
SRL_output['views'].append(Event_Extraction)
print("event extraction done")
#IE_output.append(SRL_output)
print("------- The {i}-th piece of generated text processing complete! -------".format(i=text_id))
return SRL_output
def call_nlpcloud_API(prompt, token):
# Deprecated function
headers = {"Authorization": "Token " + token}
generation_response = requests.post('https://api.nlpcloud.io/v1/gpu/gpt-j/generation', \
json={"text": prompt, \
"min_length": 50, \
"max_length": 256, \
"temperature": 0.9, \
"top_p": 0.8, \
}, \
headers=headers
)
if generation_response.status_code != 200:
print("generation_response:", generation_response.status_code)
return generation_response
def headline_generator(event, news):
# TODO: test this function
event = event.lower()
if news:
prompt = "The headline of the news about " + event + " was '"
else:
#prompt = "The title for 'How to make " + event + " possible' is '"
return "How to make " + event + " possible"
response = call_nlpcloud_API(prompt)
len_hp = len(prompt)
generated_text = json.loads(response.text)['generated_text'][len_hp:]
end_of_headline = generated_text.find("'")
if end_of_headline:
return generated_text[0:end_of_headline]
else:
return event
def print_event(event_extraction_results, f_out, NA_event=True):
# event_extraction_results: list
for event in event_extraction_results:
#To_print = "Event: '{mention}' ({label}, {event_id})\t".format(event_id=event['event_id_docLevel'], mention=event['properties']['predicate'][0], label=event['label'])
To_print = "Event: '{mention}' ({event_id})\t".format(event_id=event['event_id_docLevel'], mention=event['label'])
for key in event['properties'].keys():
if key not in ["predicate", "sentence_id", "SenseNumber"]:
To_print += "{arg}: '{mention}' ({entityType}, {argument_id})\t".format(arg=key, mention=event['properties'][key]['mention'], entityType=event['properties'][key]['entityType'], argument_id=event['properties'][key]['argument_id'])
if NA_event: # printing info for events with type "NA"
print(To_print, file = f_out)
else:
if event['label'] != 'NA':
print(To_print, file = f_out)
def schema_induction(prompt, call_n, f_out, gt_input = False, gt_output = False, debugging = 1, temporal = True, print_events = True, subevent = True, coref = False):
IE_output = []
if gt_input:
generated_text = gt_input
else:
if debugging:
with open('parrot.pkl', 'rb') as f:
generated_text = pickle.load(f)
generated_text = generated_text[0:debugging]
else:
generated_text = []
print("\tGenerating text")
for i in range(call_n):
response = call_nlpcloud_API(prompt)
generated_text.append(json.loads(response.text)['generated_text'])
if gt_output:
return generated_text
print("Schema Induction module is going to run IE for " + str(len(generated_text)) + " pieces of text.")
text_ids = [i for i in range(len(generated_text))]
with Pool(processes=2) as pool:
IE_output = pool.starmap(event_extractor, zip(generated_text, text_ids))
if print_events:
for SRL_output in IE_output:
if SRL_output == {}:
continue
print_event(SRL_output['views'][-1]['viewData'][0]['constituents'], f_out)
if subevent:
IE_output_subevent = []
print("start working on subevent...")
for SRL_output in IE_output:
if SRL_output == {}:
continue
temp = relation_preparer(SRL_output)
subevent_res = subevent_getter(temp)
if subevent_res:
IE_output_subevent.append(subevent_res)
IE_output = []
IE_output = IE_output_subevent
if coref:
IE_output_coref = []
print("start working on coref...")
for SRL_output in IE_output:
if SRL_output == {}:
continue
temp = relation_preparer(SRL_output)
coref_res = coref_getter(temp)
if coref_res:
IE_output_coref.append(coref_res)
IE_output = []
IE_output = IE_output_coref
if temporal:
IE_output_temporal = []
count = -1
print("start working on temporal...")
for SRL_output in IE_output:
if SRL_output == {}:
continue
temp = relation_preparer(SRL_output)
"""
count += 1
dump_EE = True
if dump_EE:
with open("intermediate/temp" + str(count) + ".json", 'w') as f:
json.dump(temp, f)
"""
print("schema induction -- num of events:", len(temp['views'][-1]['viewData'][0]['constituents']))
temporal_res = temporal_getter(temp)
if temporal_res:
IE_output_temporal.append(temporal_res)
return IE_output_temporal
else:
return IE_output
def print_stats(IE_output, topic, f_out):
event_types_total = {}
#event_mentions_total = {}
event_types_detail = {}
event_args = {}
for SRL_output in IE_output:
if SRL_output == {}:
continue
event_types = {}
#event_mentions = {}
for event in SRL_output['views'][-1]['viewData'][0]['constituents']:
if event['label'] != "NA": # not reporting those events w/o types
#event_mentions[event['properties']['predicate'][0]] = 1
event_types[event['label']] = 1
if event['label'] not in event_types_detail.keys():
event_types_detail[event['label']] = set()
event_types_detail[event['label']].add(event['event_id_docLevel'])
if event['label'] not in event_args.keys():
event_args[event['label']] = {}
for arg in event['properties'].keys():
arg_no_index = arg.split('_')[0]
if "ARG" in arg:
if event['properties'][arg]['entityType'] != 'NA':
event_args[event['label']][arg_no_index] = {event['properties'][arg]['entityType']: 1}
else:
for arg in event['properties'].keys():
arg_no_index = arg.split('_')[0]
if "ARG" in arg:
if event['properties'][arg]['entityType'] != 'NA':
if arg_no_index in event_args[event['label']].keys():
if event['properties'][arg]['entityType'] in event_args[event['label']][arg_no_index].keys():
event_args[event['label']][arg_no_index][event['properties'][arg]['entityType']] += 1
else:
event_args[event['label']][arg_no_index][event['properties'][arg]['entityType']] = 1
else:
event_args[event['label']][arg_no_index] = {event['properties'][arg]['entityType']: 1}
for event_type in event_types.keys():
if event_type in event_types_total.keys():
event_types_total[event_type] += 1
else:
event_types_total[event_type] = 1
#for mention in event_mentions.keys():
# if mention in event_mentions_total.keys():
# event_mentions_total[mention] += 1
# else:
# event_mentions_total[mention] = 1
#print('top 20 event mentions:')
#pprint(sorted(event_mentions_total.items(), key=lambda x: x[1], reverse=True)[:20])
#print('\ntop 30 events:\n', file = f_out)
print('\ntop events:\n', file = f_out)
#pprint(sorted(event_types_total.items(), key=lambda x: x[1], reverse=True)[:20])
#for et, count in sorted(event_types_total.items(), key=lambda x: x[1], reverse=True)[:30]:
for et, count in sorted(event_types_total.items(), key=lambda x: x[1], reverse=True): # Oct 17 2022
print("'" + et + "'", "appears in", str(count), "docs, mentions:", event_types_detail[et], end = '', file = f_out)
print(", arguments:", event_args[et], file = f_out)
#print("\n'" + et + "'", "appears in", str(count), "docs, mentions:", end=' ')
#for mention in event_types_detail[et]:
# print("'" + mention + "':" + str(event_mentions_total[mention]), end=', ')
temporal_relation = {}
subevent_relation = {}
coref_relation = {}
text_id = -1
for SRL_output in IE_output:
if SRL_output == {}:
continue
text_id += 1
for relation in SRL_output['views'][-1]['viewData'][0]['relations']:
rel = relation['relationName']
src = int(relation['srcConstituent']) # coref result: '1'; temporal / subevent result: 1
tgt = int(relation['targetConstituent'])
source = SRL_output['views'][-1]['viewData'][0]['constituents'][src]['label']
target = SRL_output['views'][-1]['viewData'][0]['constituents'][tgt]['label']
#logits = relation['logits']
#print(rel, source, target, logits)
if source == target:
continue
if rel in ['before', 'after']:
if rel == 'before':
pair = (source, target)
else:
pair = (target, source)
if pair in temporal_relation.keys():
temporal_relation[pair].add(text_id)
else:
temporal_relation[pair] = {text_id}
if rel in ['SuperSub', 'SubSuper']:
if rel == 'SuperSub':
pair = (source, target)
else:
pair = (target, source)
if pair in subevent_relation.keys():
subevent_relation[pair].add(text_id)
else:
subevent_relation[pair] = {text_id}
if rel == "coref":
pair = (source, target)
if pair in coref_relation.keys():
coref_relation[pair].add(text_id)
else:
coref_relation[pair] = {text_id}
#print("\ntop 30 temporal relations:\n", file = f_out)
#for et, count in sorted(temporal_relation.items(), key=lambda x: len(x[1]), reverse=True)[:30]:
print("\ntop temporal relations:\n", file = f_out)
for et, count in sorted(temporal_relation.items(), key=lambda x: len(x[1]), reverse=True): # Oct 17 2022
print("'" + str(et) + "'", "appears in", str(len(count)), "docs:", count, file = f_out)
#print("\ntop 30 subevent relations:\n", file = f_out)
#for et, count in sorted(subevent_relation.items(), key=lambda x: len(x[1]), reverse=True)[:30]:
print("\ntop subevent relations:\n", file = f_out)
for et, count in sorted(subevent_relation.items(), key=lambda x: len(x[1]), reverse=True): # Oct 17 2022
print("'" + str(et) + "'", "appears in", str(len(count)), "docs:", count, file = f_out)
#print("\ntop 30 coref relations:\n", file = f_out)
#for et, count in sorted(coref_relation.items(), key=lambda x: len(x[1]), reverse=True)[:30]:
#print("\ntop coref relations:\n", file = f_out)
#for et, count in sorted(coref_relation.items(), key=lambda x: len(x[1]), reverse=True): # Oct 17 2022
# print("'" + str(et) + "'", "appears in", str(len(count)), "docs:", count, file = f_out)
"""
G=nx.Graph()
for pair in temporal_relation_total.keys():
count = temporal_relation_total[pair]
if count >= 3:
G.add_edge(pair[0], pair[1])
nx.set_edge_attributes(G, {pair: {"weight": count}})
pos = nx.spring_layout(G)
plt.figure(3,figsize=(12,12))
nx.draw(G, pos, with_labels = True)
nx.draw_networkx_edge_labels(G, pos)
plt.savefig('png/' + topic + '.png')
"""
def search_for_events(IE_output, event_type = "", event_mention = ""):
for SRL_output in IE_output:
if SRL_output == {}:
continue
for event in SRL_output['views'][-1]['viewData'][0]['constituents']:
if event['label'] == event_type or event['properties']['predicate'][0] == event_mention:
To_print = "Event: '{mention}' ({label}, {event_id})\t".format(event_id=event['event_id_docLevel'], mention=event['properties']['predicate'][0], label=event['label'])
for key in event['properties'].keys():
if key not in ["predicate", "sentence_id"]:
To_print += "{arg}: '{mention}' ({entityType}, {argument_id})\t".format(arg=key, mention=event['properties'][key]['mention'], entityType=event['properties'][key]['entityType'], argument_id=event['properties'][key]['argument_id'])
print(To_print)
def save_generated_text(generated_text, topic):
time_str = time.strftime("%Y-%m-%d", time.localtime(time.time()))
with open('generated_text/' + topic + '_' + time_str + '.pkl', 'wb') as f:
pickle.dump(generated_text, f)
def save_IE_output(IE_output, topic):
time_str = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time()))
with open('IE_output/' + topic + '_' + time_str + '.pkl', 'wb') as f:
pickle.dump(IE_output, f)
'''
def similarity(topic, text):
encoded_input = tokenizer(text, return_tensors="pt", max_length=256)
output = model(**encoded_input)
if topic == text:
return 1
else:
return 0
def filter_gt(generated_text, topic):
ranking = {}
text_id = 0
for text in generated_text:
ranking[text_id] = similarity(topic, text)
text_id += 1
ranked_list = sorted(ranking.items(), key=lambda x: x[1], reverse=True)
new_gt = []
count = -1
for rank in ranked_list:
count += 1
if count < len(ranked_list) / 2:
new_gt.append(generated_text[rank[0]])
return new_gt
'''
def filter_gt_sbert(generated_text, topic):
# https://www.sbert.net/docs/usage/semantic_textual_similarity.html
num = len(generated_text)
topic_ = [topic] * num
embeddings1 = model.encode(generated_text, convert_to_tensor=True)
embeddings2 = model.encode(topic_, convert_to_tensor=True)
cosine_scores = util.pytorch_cos_sim(embeddings1, embeddings2)
ranking = []
for i in range(num):
ranking.append({'index': i, 'score': cosine_scores[i][i]})
ranking = sorted(ranking, key=lambda x: x['score'], reverse=True)
new_gt = []
count = -1
for rank in ranking:
count += 1
if count < num / 2:
new_gt.append(generated_text[rank['index']])
return new_gt
if __name__ == "__main__":
start = timer()
parser = argparse.ArgumentParser()
#parser.add_argument("--event", default='Boston Marathon bombing', type=str, required=True,
# help="choose your event of interest for schema induction")
parser.add_argument("--call_n", default=4, type=int, required=False,
help="number of pieces of generated text per headline")
parser.add_argument("--headline_n", default=10, type=int, required=False,
help="number of headlines to be generated")
parser.add_argument("--debugging", default=0, type=int, required=False,
help="debugging mode: True or False")
args = parser.parse_args()
#scenarios = ['Bombing Attacks', 'Pandemic Outbreak', 'Civil Unrest', 'International Conflict', 'Disaster and Rescue', 'Terrorism Attacks', 'Election', 'Sports Games', 'Kidnapping', 'Business Change', 'Mass Shooting']
scenarios = []
dir_name = "/shared/kairos/Data/LDC2020E25_KAIROS_Schema_Learning_Corpus_Phase_1_Complex_Event_Annotation_V4/docs/ce_profile"
onlyfiles = [f for f in listdir(dir_name) if isfile(join(dir_name, f)) and f[-4:] == ".txt"]
for f in onlyfiles:
scenarios.append(" ".join(f.split("_")[2:-1]))
#with open("generated_text/2021-12-18.pkl", 'rb') as f:
with open("generated_text/2022-01-06.pkl", 'rb') as f:
#with open("generated_text/2022-06-10.pkl", 'rb') as f:
text = pickle.load(f)
if args.debugging:
topic = "Aviation-accident"
f_out = open('output/' + topic + '.txt', 'w')
IE_output = schema_induction('', args.call_n, f_out, gt_input = False, gt_output = False, debugging = args.debugging)
print("printing stats...")
#print_stats(IE_output, topic = topic, f_out = f_out)
f_out.close()
else:
for topic in scenarios:
#if path.exists('output_Typing_OnePass/' + topic + '.txt'):
#if path.exists('GPT2_output/' + topic + '.txt'):
if path.exists('output_all/' + topic + '.txt'):
continue
#f_out = open('output_Typing_OnePass/' + topic + '.txt', 'w')
#f_out = open('GPT2_output/' + topic + '.txt', 'w')
f_out = open('output_all/' + topic + '.txt', 'w')
#gt_input = False
induce = False
gt_input = text[topic]
#gt_input = ["They had to account for all the money that had gone missing. The police were acting on a tip from an informer and caught the gang redhanded."]
if gt_input:
IE_output = schema_induction('', args.call_n, f_out, gt_input, False, args.debugging, True, True, True, False)
save_IE_output(IE_output, topic)
try:
print("printing stats...")
print_stats(IE_output, topic = topic, f_out = f_out)
except:
pass
else:
print("Generating headline for '{event}'".format(event=topic))
''' # Manually selecting appropiate headlines
while True:
headline = headline_generator(topic, news = True)
x = input("The generated headline for '" + topic + "' is: '" + headline + "'. Enter A (Accept) or R (Reject):")
if x == 'A':
break
elif x == 'R':
print("Alright, let's try again")
else:
print("Enter A (Accept) or R (Reject):")
'''
generated_text = []
# generate 10 headlines for news & how-to
for i in range(args.headline_n):
headline = headline_generator(topic, news = True)
print("News-like headline:", headline)
# generate call_n pieces of text for each headline
generated_text.extend(schema_induction(headline, args.call_n, f_out, gt_input = False, gt_output = True, debugging = args.debugging))
headline = headline_generator(topic, news = False)
print("HowTo-like headline:", headline)
generated_text.extend(schema_induction(headline, args.headline_n * args.call_n, f_out, gt_input = False, gt_output = True, debugging = args.debugging))
save_generated_text(generated_text, topic)
if induce:
IE_output = schema_induction('', args.call_n, f_out, gt_input = filter_gt_sbert(generated_text, topic), gt_output = False, debugging = args.debugging)
save_IE_output(IE_output, topic)
print("printing stats...")
#print_stats(IE_output, topic = topic, f_out = f_out)
f_out.close()
end = timer()
print(timedelta(seconds=end-start))
"""
#This version does not work
start = timer()
with open('parrot.pkl', 'rb') as f:
generated_text = pickle.load(f)
#print(f'starting computations on {cpu_count()} cores')
#debug_text = ['The first passengers rescued from a helicopter that ditched in the North Sea have arrived at hospital.', 'The Sea King helicopter, which had been on a search and rescue mission, came down off the coast of the Orkney Islands.']
text_ids = [i for i in range(len(generated_text))]
processes = [Process(target=event_extractor, args=(generated_text, text_ids)) for x in range(len(generated_text))]
for p in processes:
p.start()
for p in processes:
p.join()
#with Pool() as pool:
# IE_output = pool.starmap(event_extractor, zip(generated_text, text_ids))
for SRL_output in IE_output:
print_events(SRL_output['views'][-1]['viewData'][0]['constituents'])
end = timer()
print(f'elapsed time: {end - start}')
"""
"""
# Let's try this version... And it works!
start = timer()
print(f'starting computations on {cpu_count()} cores')
#debug_text = ['The first passengers rescued from a helicopter that ditched in the North Sea have arrived at hospital.', 'The Sea King helicopter, which had been on a search and rescue mission, came down off the coast of the Orkney Islands.']
with open('parrot.pkl', 'rb') as f:
generated_text = pickle.load(f)
#generated_text = generated_text[0:3]
text_ids = [i for i in range(len(generated_text))]
with Pool(processes=3) as pool:
IE_output = pool.starmap(event_extractor, zip(generated_text, text_ids))
for SRL_output in IE_output:
print_events(SRL_output['views'][-1]['viewData'][0]['constituents'])
end = timer()
print(f'elapsed time: {end - start}')
"""
| [
"The headline of the news about PLACEHOLDER was '"
] |
2024-01-10 | CogComp/Zero_Shot_Schema_Induction | GPT_3_direct.py | import requests
import os
from os import listdir
from os.path import isfile, join
import json
import openai
openai.api_key = "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"
from Information_Extractor import event_extractor
dir_name = "/shared/kairos/Data/LDC2020E25_KAIROS_Schema_Learning_Corpus_Phase_1_Complex_Event_Annotation_V4/docs/ce_profile"
onlyfiles = [f for f in listdir(dir_name) if isfile(join(dir_name, f)) and f[-4:] == ".txt"]
scenarios = ['Bombing Attacks', 'Pandemic Outbreak', 'Civil Unrest', 'International Conflict', 'Disaster and Rescue', 'Terrorism Attacks', 'Election', 'Sports Games', 'Kidnapping', 'Business Change', 'Mass Shooting']
#scenarios = ['Cyber Attack']
for f in onlyfiles:
#print(" ".join(f.split("_")[2:-1]))
scenarios.append(" ".join(f.split("_")[2:-1]))
def call_openai_api(event, n, temperature, stop, presence_penalty):
prompt = "Write essential steps for " + event + ":\n\n1."
print(prompt)
response = openai.Completion.create(
#engine="davinci",
engine="davinci-instruct-beta-v3",
prompt=prompt,
max_tokens=512,
temperature=temperature,
stop=stop,
n=n,
presence_penalty=presence_penalty
)
texts = []
for choice in response["choices"]:
texts.append(choice["text"])
print("This api call ended!")
return texts, response["id"]
for scenario in scenarios:
f = open("output/" + scenario + "_direct.txt", 'w')
scn = scenario.lower()
res = call_openai_api(scn, 1, 0.9, None, 0.1)
result = "1." + res[0][0]
print("GPT-3 result:\n", file = f)
print(result, file = f)
headers = {'Content-type':'application/json'}
SRL_response = requests.post('http://dickens.seas.upenn.edu:4039/annotate', json={"sentence": result}, headers=headers)
if SRL_response.status_code != 200:
print("SRL_response:", SRL_response.status_code)
SRL_output = json.loads(SRL_response.text)
print("\nevents:\n", file = f)
response = event_extractor(result, 0, False)
events = []
for view in response['views']:
if view['viewName'] == 'Event_extraction':
for constituent in view['viewData'][0]['constituents']:
print("'" + constituent['label'].lower() + "'", "appears in GPT-3 direct, mentions: {}, arguments:", end='', file = f)
arguments = {}
for arg in constituent['properties'].keys():
if 'ARG' in arg:
arguments[arg] = constituent['properties'][arg]['mention']
print(arguments, file = f)
events.append(constituent['label'].lower())
print("\ntemporal relations:\n", file = f)
num_events = len(events)
for i_e in range(0, num_events-1):
#for j_e in range(i_e+1, num_events):
print("'('" + events[i_e] + ", '" + events[i_e + 1] + "')'", "appears in GPT-3 direct", file = f)
f.close()
| [
"Write essential steps for PLACEHOLDER:\n\n1."
] |
2024-01-10 | sibycr18/SummarizEd | Select_PDF.py | import streamlit as st
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from PyPDF2 import PdfReader
import re
__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
import chromadb
## Intitialization
# Intialize ChromaDB
@st.cache_resource
def init_db():
db_client = chromadb.PersistentClient(path="./db")
return db_client
# Initialize Embeddings
@st.cache_resource
def init_embedding():
embeddings = HuggingFaceEmbeddings(model_name="infgrad/stella-base-en-v2")
return embeddings
def sanitize_string(input_str):
# Remove non-alphanumeric, underscores, hyphens, and periods
sanitized = re.sub(r"[^A-Za-z0-9_.-]", "", input_str)
# Replace consecutive periods with a single period
sanitized = re.sub(r"\.{2,}", ".", sanitized)
# Ensure the string starts and ends with an alphanumeric character
sanitized = re.sub(r"^[^A-Za-z0-9]+", "", sanitized)
sanitized = re.sub(r"[^A-Za-z0-9]+$", "", sanitized)
# Truncate or pad string to meet the 3-63 character length requirement
sanitized = sanitized[:63] if len(
sanitized) > 63 else sanitized.ljust(3, "_")
return sanitized
st.set_page_config(
page_title="SummarizEd.ai",
page_icon="📚",
layout="centered",
)
# Session states
db_client = st.session_state.db_client = init_db()
embeddings = st.session_state.embeddings = init_embedding()
# Already uploaded files
collections = st.session_state.db_client.list_collections()
## App Title
# st.title("Summariz:orange[Ed] :gray[- PDF Summarizer]")
st.title("Summariz:orange[Ed]:grey[.ai]")
st.subheader("", divider="gray") # maybe not be a proper way but i like this
pdf_list = tuple(collection.name for collection in collections)
placeholder = "Select the PDF file to search..." if len(pdf_list) > 0 else "No PDFs uploaded"
file_name = st.selectbox(
"Select PDF file:",
pdf_list,
index=None,
placeholder = placeholder,
label_visibility="visible"
)
# st.session_state.selected_file = selected_file
st.subheader("OR")
# Display file uploader
uploaded_file = st.file_uploader("Upload a new PDF file", type=["pdf"])
if uploaded_file is not None:
file_name = sanitize_string(uploaded_file.name)
# Read and display the content of the PDF file
pdf_reader = PdfReader(uploaded_file)
pdf_text = ""
for page_num in range(len(pdf_reader.pages)):
page = pdf_reader.pages[page_num]
pdf_text += page.extract_text()
# print(pdf_text)
if st.button("Process PDF", type="primary"):
if file_name in {collection.name for collection in collections}:
st.warning(
f"PDF '{file_name}' has already been processed. Select it from the above list.")
else:
with st.spinner("Processing PDF..."):
## Db insertion
collection = db_client.create_collection(name=file_name)
# Split text into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
chunks = text_splitter.split_text(pdf_text)
# Convert chunks to vector representations and store in ChromaDB
documents_list = []
embeddings_list = []
ids_list = []
for idx, chunk in enumerate(chunks):
vector = embeddings.embed_query(chunk)
documents_list.append(chunk)
embeddings_list.append(vector)
ids_list.append(f"{file_name}_{idx}")
collection.add(
embeddings=embeddings_list,
documents=documents_list,
ids=ids_list
)
st.success("PDF has been processed successfully")
st.session_state.file_name = file_name
| [] |
2024-01-10 | NADOOITChristophBa/NADOO-Voice | chapters.py | import json
import re
import openai
def split_into_chunks(text, chunk_size=400):
"""
Splits the book text into manageable chunks, trying to break at sentence endings.
'chunk_size' is in characters, adjust based on testing.
"""
chunks = []
chunk_count = 0
while text:
# Take the first 'chunk_size' characters from the book text
chunk = text[:chunk_size]
# Ensure the chunk ends on a complete sentence where possible
last_end = max(chunk.rfind("."), chunk.rfind("!"), chunk.rfind("?"))
if last_end != -1 and len(chunk) - last_end < 200:
# Adjust chunk to end at the last complete sentence
chunk = chunk[: last_end + 1]
# Adjust the remaining book text starting after the last complete sentence
text = text[last_end + 1 :]
else:
# If no sentence ending is found, or it's too close to the end of the chunk, proceed as usual
text = text[chunk_size:]
chunks.append(chunk)
chunk_count += 1
# Print each chunk with spacing
# print(f"Chunk {chunk_count}:\n{chunk}\n\n---\n")
return chunks
def gpt_prompt_for_chapter_analysis(chunk, last_chapter_title):
"""
Analyzes a text chunk to identify chapters using GPT-4, with a fallback to GPT-3.5 if necessary.
Returns the last identified chapter if no new chapters are found, along with the text provided in the response.
:param chunk: Text chunk to be analyzed.
:param last_chapter_title: Title of the last identified chapter to continue from.
:return: A list of chapters found in the chunk, or the last chapter if no new chapters are found.
"""
from openai import (
BadRequestError,
AuthenticationError,
PermissionDeniedError,
NotFoundError,
RateLimitError,
InternalServerError,
APIConnectionError,
APITimeoutError,
)
# Example JSON structure showing potential multiple chapters
example_json = {
"chapters": [
{
"chapter_title": "Chapter 1",
"chapter_content": "Full found Content of Chapter 1...",
},
{
"chapter_title": "Chapter 2",
"chapter_content": "Full found Content of Chapter 2...",
},
]
}
# Detailed prompt construction for GPT models
prompt = (
f"You are an helpfull AI assistant. You are helping to find the structure of a book inside a text."
f"You are given a chunk of text. This text needs to be analysed."
f"A chunk can contain a a chapter title but does not need to start with it."
f"If the text does not start with a new chapter title use this title ->'{last_chapter_title}'<- for the text until you find a new chapter. "
f"Chapter Titles usually are written in CAPITAL LETTERS and formed as a question."
f"They also usually take a whole line."
f"Be carful not to include any other text in the chapter title and also that in the text the chapter titles are somethimes mentioned. DO NOT include those mentions in the chapter title."
f"Examine the text for any new chapter, and return their titles and full content. It is absolutly crucial that you return the full content of the chapters."
f"No not change any of the text simply copy and past it."
f"Be carfull not to add any styling to the text like /n or /t"
f"Here is the text chunk for analysis: {chunk}."
f"Again If no new chapters are found, simply use this ->'{last_chapter_title}'<- for the rest of the found chapter content. "
f"Your response should be in a JSON format similar to this example: {json.dumps(example_json)}"
f"You can do this. Give this your best shot. Take time to think. "
)
client = openai.OpenAI() # Ensure the OpenAI client is set up with an API key
attempts = 0
max_attempts = 2
models = ["gpt-4-1106-preview", "gpt-3.5-turbo-1106"]
while attempts < max_attempts + 1:
model = models[attempts % len(models)]
# print(f"Sending the following detailed prompt to {model}:")
# print(prompt)
response = client.chat.completions.create(
model=model,
response_format={"type": "json_object"},
messages=[
{
"role": "system",
"content": "Please respond with a detailed analysis in JSON format.",
},
{"role": "user", "content": prompt},
],
)
response_content = response.choices[0].message.content
# print(f"Received response from {model}:")
# print(response_content)
try:
response_data = json.loads(response_content)
return response_data # Correct response with new chapters
except BadRequestError:
print("Bad request to OpenAI. Please check the request format.")
except AuthenticationError:
print("Authentication failed. Please check your OpenAI API key.")
except PermissionDeniedError:
print("Permission denied. Please check your access rights.")
except NotFoundError:
print("Requested resource not found.")
except RateLimitError:
print("Rate limit exceeded. Please try again later.")
except (InternalServerError, APIConnectionError, APITimeoutError) as e:
print(f"A server or connection error occurred: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
attempts += 1
print("Failed to get a valid response after multiple attempts.")
return [] # Return an empty list only if all attempts fail
def word_list(text):
# Split text into words, considering punctuation as separate entities
return re.findall(r"\b\w+\b|\S", text.lower())
# This function was used for the extraction of chapters for the BGB book only.
def get_chapters_for_bgb_text(text):
chapters = []
lines = text.split("\n")
chapter_counter = 1
current_title = ""
current_content = []
for line in lines:
# Check for Buch, Abschnitt, Titel, and start a new chapter
if re.match(r"(Buch \d+|Abschnitt \d+|Titel \d+)", line):
# Save previous chapter if it exists
if current_title:
chapters.append(
{
"unique_id": f"Chapter{chapter_counter}",
"chapter_title": current_title,
"chapter_content": " ".join(current_content).strip(),
}
)
chapter_counter += 1
current_title = line.strip()
current_content = []
# Check for '§' and start a new chapter
elif re.match(r"§ \d+", line):
# Save previous chapter if it exists
if current_title:
chapters.append(
{
"unique_id": f"Chapter{chapter_counter}",
"chapter_title": current_title,
"chapter_content": " ".join(current_content).strip(),
}
)
chapter_counter += 1
current_title = line.strip()
current_content = []
else:
current_content.append(line.strip())
# Add the last chapter
if current_title:
chapters.append(
{
"unique_id": f"Chapter{chapter_counter}",
"chapter_title": current_title,
"chapter_content": " ".join(current_content).strip(),
}
)
return chapters
def get_chapters_for_text(text, book_title="Untitled"):
print("Processing entire book...")
chunks = split_into_chunks(text)
all_chapters = []
last_chapter_title = "" # Initialize with an empty string
for chunk_index, chunk in enumerate(chunks):
print(f"Processing chunk {chunk_index + 1}: {chunk}")
response = gpt_prompt_for_chapter_analysis(chunk, last_chapter_title)
chapters = response.get("chapters", [])
combined_chapter_words = []
for chapter in chapters:
print(f"Found chapter: {chapter.get('chapter_title')}")
print(f"Chapter content: {chapter.get('chapter_content')}")
title = chapter.get("chapter_title", "Untitled")
content = chapter.get("chapter_content", "")
last_chapter_title = title
combined_chapter_words.extend(word_list(title + " " + content))
chapter_found = False
for chapter_dict in all_chapters:
if title == chapter_dict.get("chapter_title"):
chapter_found = True
chapter_dict["chapter_content"] += " " + content
break
if not chapter_found:
all_chapters.append(
{"chapter_title": title, "chapter_content": content}
)
chunk_words = word_list(chunk)
missing_words = [
word for word in chunk_words if word not in combined_chapter_words
]
if missing_words:
print(f"Missing words in chunk {chunk_index + 1}: {missing_words}")
return all_chapters
| [
"Here is the text chunk for analysis: PLACEHOLDER.",
"Please respond with a detailed analysis in JSON format.",
"Be carful not to include any other text in the chapter title and also that in the text the chapter titles are somethimes mentioned. DO NOT include those mentions in the chapter title.",
"You are an helpfull AI assistant. You are helping to find the structure of a book inside a text.",
"If the text does not start with a new chapter title use this title ->'PLACEHOLDER'<- for the text until you find a new chapter. ",
"You can do this. Give this your best shot. Take time to think. ",
"Full found Content of Chapter 2...",
"A chunk can contain a a chapter title but does not need to start with it.",
"Examine the text for any new chapter, and return their titles and full content. It is absolutly crucial that you return the full content of the chapters.",
"Be carfull not to add any styling to the text like /n or /t",
" ",
"Chapter Titles usually are written in CAPITAL LETTERS and formed as a question.",
"No not change any of the text simply copy and past it.",
"You are given a chunk of text. This text needs to be analysed.",
"Full found Content of Chapter 1...",
"Again If no new chapters are found, simply use this ->'PLACEHOLDER'<- for the rest of the found chapter content. ",
"They also usually take a whole line."
] |
2024-01-10 | NADOOITChristophBa/NADOO-Voice | nadoo_voice.py | import os
from pathlib import Path
from dotenv import load_dotenv
import openai
import tkinter as tk
from tkinter import simpledialog, ttk, scrolledtext
import json
import re
import threading
import time
import tkinter.filedialog as filedialog
import os
from pathlib import Path
# Function to convert text to speech and save as an MP3 file
import os
from pathlib import Path
from chapters import get_chapters_for_text
def parse_config_matrix(config_str, total_chapters):
if not config_str:
return {str(chapter).zfill(2): True for chapter in range(1, total_chapters + 1)}
chapters_config = {}
for part in config_str.split(","):
if "-" in part:
start, end = part.split("-")
start = int(start)
end = total_chapters if end == "*" else int(end)
for chapter in range(start, end + 1):
chapters_config[str(chapter).zfill(2)] = True
elif "+" in part:
chapters = part.split("+")
for chapter in chapters:
chapters_config[chapter.zfill(2)] = True
else:
chapters_config[part.zfill(2)] = True
return chapters_config
from pathlib import Path
import os
import re
def get_audio_file_path_for_chapter_info(book_title, chapter_title, voice, output_file):
"""
Generates the file path for an audio file based on book title, chapter title, voice, and output file name.
Parameters:
- book_title (str): The title of the book.
- chapter_title (str): The title of the chapter.
- voice (str): The voice used for TTS.
- output_file (str): The name of the output audio file.
Returns:
- str: The full path for the audio file.
"""
# Sanitize book and chapter titles to use in file paths
sanitized_book_title = re.sub(r'[<>:"/\\|?*]', "_", book_title)
sanitized_chapter_title = re.sub(r'[<>:"/\\|?*]', "_", chapter_title)
# Create the folder structure
base_folder = (
Path(__file__).parent / sanitized_book_title / voice / sanitized_chapter_title
)
os.makedirs(base_folder, exist_ok=True)
# Return the modified output file path
return f"{base_folder}/{output_file}"
# Function to convert text to speech and save as an MP3 file
def text_to_speech(
input_text,
audio_file_path,
model="tts-1-hd",
voice="onyx",
):
retry_count = 0
retry_delay = 10 # Initial delay in seconds
while True: # Infinite loop, will break on success or non-rate-limit error
try:
client = openai.OpenAI()
# Create the spoken audio from the input text
response = client.audio.speech.create(
model=model, voice=voice, input=input_text
)
# Stream the response to the file
response.stream_to_file(Path(audio_file_path))
print(f"Audio file saved as {audio_file_path}")
break # Break the loop if successful
except Exception as e:
error_message = str(e)
if "rate_limit_exceeded" in error_message:
print(f"Rate limit reached, retrying in {retry_delay} seconds...")
time.sleep(retry_delay)
retry_delay = min(
retry_delay * 2, 1200
) # Double the delay each time, max 20 minutes
retry_count += 1
else:
print(f"An error occurred: {error_message}")
break
if retry_count > 0:
print(f"Retried {retry_count} times before success.")
def get_chapter_audio_for_chapter(chapter, chapter_number, voice, model, book_title):
chapter_audio_data = []
chapter_title = chapter.get("chapter_title", "Untitled")
text = chapter.get("chapter_content", "")
print(f"Processing chapter: {chapter_title}")
print(f"Chapter number: {chapter_number}")
# Decide whether to split into subchapters
should_split = len(text) > 4000
subchapters = split_into_subchapters(text) if should_split else [text]
for i, subchapter_content in enumerate(subchapters, start=1):
combined_text = (
f"{chapter_title} Teil {i}. {subchapter_content}"
if len(subchapters) > 1
else f"{chapter_title}. {subchapter_content}"
)
sanitized_chapter_title = get_sanitized_filename_for_chapter_title(
chapter_title, chapter_number, i
)
codec = "mp3"
audio_file_path = get_audio_file_path_for_chapter_info(
book_title,
sanitized_chapter_title,
voice,
sanitized_chapter_title + "." + codec,
)
audio_path = text_to_speech(
input_text=combined_text,
audio_file_path=audio_file_path,
book_title=book_title,
model=model,
voice=voice,
)
chapter_audio = {"text": combined_text, "audio_path": audio_path}
chapter_audio_data.append(chapter_audio)
return chapter_audio_data
def get_default_voice_model_matrix(default_chapters, predefined_matrix=None):
"""
Generates a voice-model matrix, using a predefined matrix if provided,
or creates a default matrix based on the default chapters.
Parameters:
- default_chapters (str): A string representing the default chapters to be processed.
Can be a range (e.g., "1-10"), a list of chapters (e.g., "1,3,5"), or "*" for all chapters.
- predefined_matrix (dict, optional): A predefined nested dictionary mapping voices to models
and their respective chapters. If provided, this matrix is used as is.
Returns:
- dict: A nested dictionary where each key is a voice, mapping to another dictionary
of models and their respective chapter specifications.
Example Usage:
- get_default_voice_model_matrix("*") -> processes all chapters for each voice-model combination.
- get_default_voice_model_matrix("1-10") -> processes chapters 1 to 10 for each voice-model combination.
- get_default_voice_model_matrix("*", predefined_matrix=my_predefined_matrix)
-> uses the predefined matrix directly.
"""
"""
predefined_matrix = {
"alloy": {
"tts-1": "1-5",
"tts-1-hd": "6-10"
},
"echo": {
"tts-1-f": "11-15"
}
# ... other configurations ...
}
"""
if predefined_matrix:
return predefined_matrix
# List of available voices
## available_voices = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
available_voices = ["nova"]
# List of available models
# available_models = ["tts-1", "tts-1-f", "tts-1-m", "tts-1-hd", "tts-1-hd-f"]
available_models = ["tts-1-hd"]
# Creating the default matrix if no predefined matrix is provided
return {
voice: {model: default_chapters for model in available_models}
for voice in available_voices
}
def check_audio_files_existence(chapters, book_title, voice_model_matrix):
"""
Checks if the audio files for each chapter were created successfully.
Parameters:
- chapters (list): List of chapters.
- book_title (str): The title of the book.
- voice_model_matrix (dict): A matrix mapping voices to models and their respective chapters.
"""
missing_files = []
for voice, models in voice_model_matrix.items():
for model, chapter_selection in models.items():
chapters_to_process = parse_chapter_selection(
chapter_selection, len(chapters)
)
for chapter_number in chapters_to_process:
# Generate the expected audio file path
chapter_title = f"Chapter_{chapter_number}"
audio_file_path = get_audio_file_path_for_chapter_info(
book_title, chapter_title, voice, f"{chapter_number}.mp3"
)
# Check if the file exists
if not os.path.exists(audio_file_path):
missing_files.append(audio_file_path)
if missing_files:
print("Warning: The following audio files were not created successfully:")
for missing_file in missing_files:
print(missing_file)
else:
print("All audio files created successfully.")
def create_chapter_audio_for_voice_model_matrix(
voice_model_matrix,
chapters,
book_title,
):
for voice, models in voice_model_matrix.items():
for model, chapter_selection in models.items():
chapters_to_process = parse_chapter_selection(
chapter_selection, len(chapters)
)
for chapter_number in chapters_to_process:
print(f"Processing {voice} {model} for Chapter {chapter_number}")
# Directly calling get_chapter_audio_for_chapter
chapter_audio_data = get_chapter_audio_for_chapter(
chapters[chapter_number - 1],
chapter_number,
voice,
model,
book_title,
)
def parse_chapter_selection(chapter_selection, total_chapters):
"""
Parse the chapter selection string to a list of chapter numbers.
"""
chapter_numbers = []
for part in chapter_selection.split(","):
if "-" in part:
start, end = part.split("-")
end = int(end) if end != "*" else total_chapters
chapter_numbers.extend(range(int(start), end + 1))
elif part != "*":
chapter_numbers.append(int(part))
else:
return range(1, total_chapters + 1)
return chapter_numbers
# Assuming get_chapter_audio_for_chapter is defined elsewhere
# You will need to update it to accept voice and model as parameters
def combine_chapter_responses(response_list):
"""
Combines the chapter information from multiple responses into one list.
If the same chapter appears in multiple responses, their content is combined.
Assumes each response in response_list is already a list of dictionaries.
"""
chapter_dict = {}
for response in response_list:
if isinstance(response, list):
for chapter in response:
title = chapter.get("chapter_title", "Untitled")
content = chapter.get("chapter_content", "")
if title in chapter_dict:
# Append content to existing chapter
# print(f"Appending content to existing chapter: {title}")
chapter_dict[title] += content
else:
# Add new chapter
# print(f"Adding new chapter: {title}")
chapter_dict[title] = content
else:
print("Unexpected response format. Expected a list of dictionaries.")
# Convert the dictionary back to a list of chapter dictionaries
combined_chapters = [
{"chapter_title": title, "chapter_content": content}
for title, content in chapter_dict.items()
]
print("Finished combining chapters.")
return combined_chapters
import re
def split_into_subchapters(chapter_content, max_length=4000):
"""
Splits a long chapter into subchapters based on a maximum character length.
Tries to split at paragraph ends for natural breaks.
"""
subchapters = []
current_subchapter = ""
for paragraph in chapter_content.split("\n"):
if len(current_subchapter) + len(paragraph) + 1 > max_length:
subchapters.append(current_subchapter)
current_subchapter = paragraph
else:
current_subchapter += "\n" + paragraph
# Add the last subchapter if it contains text
if current_subchapter.strip():
subchapters.append(current_subchapter)
return subchapters
# Function to sanitize filenames
def sanitize_filename(filename):
"""Remove or replace invalid characters for file names."""
invalid_chars = r'[<>:"/\\|?*]' # Regex pattern for invalid filename characters
return re.sub(
invalid_chars, "_", filename
) # Replace invalid characters with underscore
def get_sanitized_filename_for_chapter_title(
title, chapter_number, subchapter_number=1
):
sanitized_title = re.sub(r'[<>:"/\\|?*]', "_", title)
filename = f"{chapter_number:02d}_{sanitized_title}"
if subchapter_number > 1:
filename += f"_{subchapter_number:02d}"
return filename
import tkinter as tk
from tkinter import ttk, scrolledtext
import threading
def create_gui():
"""
Initializes and displays the main GUI window.
"""
root = tk.Tk()
root.title("Text to Speech Converter")
# Setup the main GUI components
setup_main_gui(root)
root.mainloop()
def clean_text(filedata, strings_to_remove):
"""
General cleaning of the text.
This function can be expanded with more specific cleaning requirements, such as removing
repeating words or specific non-book related text. Additional logic or regex patterns can be
implemented as needed.
Args:
filedata (str): The text to be cleaned.
strings_to_remove (list of str): A list of strings to remove from the text.
Returns:
str: The cleaned text.
"""
filedata = remove_page_numbers(filedata)
filedata = remove_specific_strings(filedata, strings_to_remove)
# Add more cleaning logic here if needed
return filedata
def remove_specific_strings(text, strings_to_remove):
"""
Remove specific strings from the text.
This function iterates over a list of strings and removes each one from the text. This is useful
for removing specific words or phrases that are known and defined in advance.
Args:
text (str): The original text from which strings will be removed.
strings_to_remove (list of str): A list of strings that should be removed from the text.
Returns:
str: The text with specified strings removed.
"""
for string in strings_to_remove:
text = text.replace(string, "")
return text
def remove_page_numbers(text):
"""
Remove page numbers from the text.
This function uses a regular expression to identify and remove patterns that match page numbers.
The pattern '- Seite X von 471 -' is targeted, where X can be any number. This pattern is based on
the example provided and can be modified to fit different page number formats.
Args:
text (str): The text from which page numbers will be removed.
Returns:
str: The text with page numbers removed.
"""
pattern = r"- Seite \d+ von 471 -"
return re.sub(pattern, "", text)
def flatten_bgb_structure(bgb_structure):
chapters = []
for book in bgb_structure:
book_title = book["title"]
for section in book["sections"]:
section_title = section["title"]
for title in section["titles"]:
title_title = title["title"]
for paragraph in title["paragraphs"]:
paragraph_title = paragraph["title"]
paragraph_content = paragraph["content"]
chapter_title = (
f"{book_title}_{section_title}_{title_title}_{paragraph_title}"
)
chapters.append(
{
"chapter_title": chapter_title,
"chapter_content": paragraph_content,
}
)
return chapters
def split_bgb_text(text):
# Reguläre Ausdrücke für die verschiedenen Komponenten
book_regex = r"\n(Buch \d+[\s\S]*?)(?=\nBuch \d+|$)"
section_regex = r"\n(Abschnitt \d+[\s\S]*?)(?=\nAbschnitt \d+|$)"
title_regex = r"\n(Titel|Untertitel) \d+[\s\S]*?(?=(Titel|Untertitel) \d+|$)"
paragraph_regex = r"\n§\s\d+\s[^§]*"
bgb_structure = []
# Alle Bücher finden
books = re.findall(book_regex, text, re.MULTILINE)
for book_content in books:
book_split = book_content.strip().split("\n", 1)
book_title = book_split[0] if len(book_split) > 1 else "Buch ohne Titel"
book_content = book_split[1] if len(book_split) > 1 else ""
book_dict = {"title": book_title, "sections": []}
sections = re.findall(section_regex, book_content, re.MULTILINE)
for section_content in sections:
section_split = section_content.strip().split("\n", 1)
section_title = (
section_split[0] if len(section_split) > 1 else "Abschnitt ohne Titel"
)
section_content = section_split[1] if len(section_split) > 1 else ""
section_dict = {"title": section_title, "titles": []}
titles = re.findall(title_regex, section_content, re.MULTILINE)
for title_content in titles:
title_split = title_content.strip().split("\n", 1)
title_title = (
title_split[0] if len(title_split) > 1 else "Titel ohne Titel"
)
title_content = title_split[1] if len(title_split) > 1 else ""
title_dict = {"title": title_title, "paragraphs": []}
paragraphs = re.findall(paragraph_regex, title_content, re.MULTILINE)
for paragraph_content in paragraphs:
paragraph_split = paragraph_content.strip().split("\n", 1)
paragraph_title = (
paragraph_split[0]
if len(paragraph_split) > 1
else "Paragraph ohne Titel"
)
paragraph_content = (
paragraph_split[1] if len(paragraph_split) > 1 else ""
)
paragraph_dict = {
"title": paragraph_title,
"content": paragraph_content,
}
title_dict["paragraphs"].append(paragraph_dict)
section_dict["titles"].append(title_dict)
book_dict["sections"].append(section_dict)
bgb_structure.append(book_dict)
return bgb_structure
def extract_chapters_from_text(text):
chapters = []
lines = text.split("\n")
chapter_counter = 1
current_title = ""
current_content = []
for line in lines:
# Check for Buch, Abschnitt, Titel, and start a new chapter
if re.match(r"(Buch \d+|Abschnitt \d+|Titel \d+)", line):
# Save previous chapter if it exists
if current_title:
chapters.append(
{
"unique_id": f"Chapter{chapter_counter}",
"chapter_title": current_title,
"chapter_content": " ".join(current_content).strip(),
}
)
chapter_counter += 1
current_title = line.strip()
current_content = []
# Check for '§' and start a new chapter
elif re.match(r"§ \d+", line):
# Save previous chapter if it exists
if current_title:
chapters.append(
{
"unique_id": f"Chapter{chapter_counter}",
"chapter_title": current_title,
"chapter_content": " ".join(current_content).strip(),
}
)
chapter_counter += 1
current_title = line.strip()
current_content = []
else:
current_content.append(line.strip())
# Add the last chapter
if current_title:
chapters.append(
{
"unique_id": f"Chapter{chapter_counter}",
"chapter_title": current_title,
"chapter_content": " ".join(current_content).strip(),
}
)
return chapters
def setup_main_gui(root):
"""
Sets up the main GUI components including mode selection and text input area.
:param root: The root window of the tkinter application.
"""
root.grid_columnconfigure(0, weight=1) # Make the main column expandable
# Mode selection
mode_label = tk.Label(root, text="Select Mode:")
mode_label.grid(row=0, column=0, sticky="w", padx=10, pady=5)
mode_combobox = ttk.Combobox(root, values=["Normal", "Book", "Clean"])
mode_combobox.grid(row=0, column=0, sticky="ew", padx=10, pady=5)
# Book title entry (initially hidden)
book_title_label = tk.Label(root, text="Book Title:")
book_title_entry = tk.Entry(root)
# Function to show/hide book title entry based on mode
def on_mode_change(event):
mode = mode_combobox.get()
if mode == "Book":
book_title_label.grid(row=1, column=0, sticky="w", padx=10, pady=5)
book_title_entry.grid(row=1, column=0, sticky="ew", padx=10, pady=5)
elif mode == "Clean":
with open("BGB.txt", "r", encoding="utf-8") as file:
filedata = file.read()
# List of specific strings to remove
strings_to_remove = [
"Ein Service des Bundesministeriums der Justiz sowie des Bundesamts für Justiz ‒ www.gesetze-im-internet.de",
# Add more unwanted phrases as needed
]
filedata = clean_text(filedata, strings_to_remove)
# bgb_structure = split_bgb_text(filedata)
# chapters = flatten_bgb_structure(bgb_structure)
chapters = extract_chapters_from_text(filedata)
print(f"Found {len(chapters)} chapters.")
# first 10 chapters
# chapters = chapters[:10]
"""
for chapter in chapters[:40]:
print(chapter)
"""
book_title = "BGB"
voice_model_matrix = get_default_voice_model_matrix("*")
# Process each chapter
chapter_audios = get_default_voice_model_matrix(
chapters, book_title, voice_model_matrix
)
# Call the check_audio_files_existence function
check_audio_files_existence(chapters, book_title, voice_model_matrix)
else:
book_title_label.grid_remove()
book_title_entry.grid_remove()
mode_combobox.bind("<<ComboboxSelected>>", on_mode_change)
# Text area for input
text_area = scrolledtext.ScrolledText(root, wrap=tk.WORD)
text_area.grid(row=2, column=0, sticky="nsew", padx=10, pady=10)
# Button row configuration
button_frame = tk.Frame(root)
button_frame.grid(row=3, column=0, sticky="ew", padx=10, pady=10)
button_frame.grid_columnconfigure(0, weight=1)
button_frame.grid_columnconfigure(1, weight=1)
# Start button for initiating conversion
start_button = tk.Button(
button_frame,
text="Start",
command=lambda: start_conversion_wrapper(
mode_combobox, text_area, book_title_entry, root
),
)
start_button.grid(row=0, column=0, padx=5, pady=5, sticky="ew")
# Load Book button
load_book_button = tk.Button(
button_frame, text="Load Book", command=lambda: load_book(root)
)
load_book_button.grid(row=0, column=1, padx=5, pady=5, sticky="ew")
def open_empty_review_gui():
empty_chapters = [] # Empty list of chapters
empty_book_title = "" # Empty book title
display_chapters_for_review(empty_chapters, empty_book_title, root)
# New Book button
new_book_button = tk.Button(
button_frame, text="New Book", command=open_empty_review_gui
)
new_book_button.grid(row=0, column=2, padx=5, pady=5, sticky="ew")
def load_book(root):
global global_book_title # Reference the global variable
books_folder = "books"
os.makedirs(books_folder, exist_ok=True) # Ensure the books folder exists
# Open a dialog to select a book file
book_file = filedialog.askopenfilename(
initialdir=books_folder,
title="Select Book",
filetypes=(("JSON Files", "*.json"), ("All Files", "*.*")),
)
if book_file:
# Load the selected book
with open(book_file, "r", encoding="utf-8") as file:
chapters = json.load(file)
# Update the global book title
global_book_title = os.path.splitext(os.path.basename(book_file))[0].replace(
"_", " "
)
display_chapters_for_review(chapters, global_book_title, root)
def start_conversion_wrapper(mode_combobox, text_area, book_title_entry, root):
mode = mode_combobox.get()
input_text = text_area.get("1.0", tk.END).strip()
book_title = book_title_entry.get().strip() if mode == "Book" else ""
def process_text():
chapters = get_chapters_for_text(input_text, book_title) # Pass book title
display_chapters_for_review(chapters, book_title, root) # Pass book title
threading.Thread(target=process_text).start()
# Function to save chapters to a JSON file
import os
import tkinter.filedialog as filedialog
def save_chapters_to_json(book_title, chapters):
try:
books_folder = "books"
os.makedirs(books_folder, exist_ok=True)
json_filename = (
f"{book_title.replace(' ', '_')}.json" if book_title else "chapters.json"
)
json_filepath = os.path.join(books_folder, json_filename)
with open(json_filepath, "w", encoding="utf-8") as file:
json.dump(chapters, file, indent=4)
print(f"Chapters saved to {json_filepath}")
except Exception as e:
print(f"Error saving chapters: {e}")
def display_chapters_for_review(chapters, book_title, root):
review_window = tk.Toplevel(root)
review_window.title("Review Chapters")
current_chapter_index = 0
# Layout configuration for resizing
review_window.grid_columnconfigure(1, weight=1)
review_window.grid_rowconfigure(1, weight=1)
# Chapter list for navigation (made larger)
chapter_list = tk.Listbox(review_window, width=40) # Adjust width as needed
chapter_list.grid(row=0, column=0, rowspan=4, sticky="nsew", padx=5, pady=5)
for chapter in chapters:
chapter_list.insert(tk.END, chapter.get("chapter_title", "Untitled"))
# Function to update the display of the current chapter
def update_chapter_display(index):
chapter = chapters[index]
chapter_title_var.set(chapter.get("chapter_title", "Untitled"))
chapter_text_area.delete("1.0", tk.END)
chapter_text_area.insert(tk.END, chapter.get("chapter_content", ""))
# Function to update chapter titles in the list
def refresh_chapter_list():
chapter_list.delete(0, tk.END)
for chapter in chapters:
chapter_list.insert(tk.END, chapter.get("chapter_title", "Untitled"))
# Update chapter data when the text or title is modified
def update_chapter_data():
current_chapter = chapters[current_chapter_index]
current_chapter["chapter_title"] = chapter_title_var.get()
current_chapter["chapter_content"] = chapter_text_area.get(
"1.0", tk.END
).strip()
refresh_chapter_list() # Refresh the list to show updated titles
# Function to handle chapter list selection
def on_chapter_select(event):
nonlocal current_chapter_index
selection = chapter_list.curselection()
if selection:
current_chapter_index = selection[0]
update_chapter_display(current_chapter_index)
# Function to add a new chapter
def add_new_chapter():
new_chapter = {"chapter_title": "New Chapter", "chapter_content": ""}
chapters.append(new_chapter)
refresh_chapter_list()
chapter_list.selection_set(len(chapters) - 1) # Select the new chapter
update_chapter_display(len(chapters) - 1) # Display the new chapter
# Function to delete the current chapter
def delete_current_chapter():
nonlocal current_chapter_index
if 0 <= current_chapter_index < len(chapters):
del chapters[current_chapter_index]
refresh_chapter_list()
new_index = min(current_chapter_index, len(chapters) - 1)
if new_index >= 0:
chapter_list.selection_set(new_index)
update_chapter_display(new_index)
else:
chapter_title_var.set("")
chapter_text_area.delete("1.0", tk.END)
chapter_list.bind("<<ListboxSelect>>", on_chapter_select)
# Editable chapter title
chapter_title_var = tk.StringVar()
chapter_title_entry = tk.Entry(review_window, textvariable=chapter_title_var)
chapter_title_entry.grid(row=0, column=1, sticky="ew", padx=5, pady=5)
# Chapter text area
chapter_text_area = scrolledtext.ScrolledText(
review_window, wrap=tk.WORD, height=5, width=50
)
chapter_text_area.grid(row=1, column=1, sticky="nsew", padx=5, pady=5)
# Navigation buttons
previous_button = tk.Button(
review_window, text="Previous Chapter", command=lambda: change_chapter(-1)
)
previous_button.grid(row=2, column=1, sticky="w", padx=5, pady=5)
next_button = tk.Button(
review_window, text="Next Chapter", command=lambda: change_chapter(1)
)
next_button.grid(row=2, column=1, sticky="e", padx=5, pady=5)
# Audio conversion buttons
convert_current_button = tk.Button(
review_window,
text="Convert Current Chapter",
command=lambda: convert_current_chapter(current_chapter_index),
)
convert_current_button.grid(row=3, column=1, sticky="w", padx=5, pady=5)
convert_all_button = tk.Button(
review_window,
text="Convert All Chapters",
command=lambda: convert_all_chapters(chapters),
)
convert_all_button.grid(row=3, column=1, sticky="e", padx=5, pady=5)
def change_chapter(delta):
nonlocal current_chapter_index
new_index = current_chapter_index + delta
if 0 <= new_index < len(chapters):
current_chapter_index = new_index
update_chapter_display(current_chapter_index)
chapter_list.selection_clear(0, tk.END)
chapter_list.selection_set(current_chapter_index)
def convert_current_chapter(index):
chapter = chapters[index]
# Prompt user for chapter number
chapter_number = simpledialog.askinteger(
"Chapter Number", "Enter the chapter number:", parent=review_window
)
# Check if the user provided a chapter number
if chapter_number is not None:
# add default matrix prodction
voice_model_matrix = get_default_voice_model_matrix("*")
# Create the chapter audio
create_chapter_audio_for_voice_model_matrix(
voice_model_matrix, [chapter], book_title
)
# Mark the chapter as converted (e.g., change background color in the list)
chapter_list.itemconfig(index, {"bg": "green"})
else:
# Handle case where user cancels the input or enters an invalid number
print("Chapter conversion canceled or invalid chapter number entered.")
def convert_all_chapters(chapters):
# Implement conversion logic for all chapters
start_audio_conversion(chapters)
# Update chapter data when the text is modified
def update_chapter_data():
current_chapter = chapters[current_chapter_index]
current_chapter["chapter_title"] = chapter_title_var.get()
current_chapter["chapter_content"] = chapter_text_area.get(
"1.0", tk.END
).strip()
refresh_chapter_list()
chapter_text_area.bind("<KeyRelease>", lambda event: update_chapter_data())
chapter_title_entry.bind("<KeyRelease>", lambda event: update_chapter_data())
# Add and delete chapter buttons
add_chapter_button = tk.Button(
review_window, text="Add Chapter", command=add_new_chapter
)
add_chapter_button.grid(row=5, column=1, sticky="w", padx=5, pady=5)
delete_chapter_button = tk.Button(
review_window, text="Delete Chapter", command=delete_current_chapter
)
delete_chapter_button.grid(row=5, column=1, sticky="e", padx=5, pady=5)
# Button to save chapters to JSON
save_json_button = tk.Button(
review_window,
text="Save Chapters to JSON",
command=lambda: save_chapters_to_json(book_title, chapters),
)
save_json_button.grid(row=6, column=0, columnspan=2, padx=5, pady=5)
update_chapter_display(current_chapter_index)
def start_audio_conversion(chapters):
"""
Starts the audio conversion process for the reviewed chapters.
:param chapters: List of reviewed chapters.
"""
create_chapter_audio_for_voice_model_matrix(
get_default_voice_model_matrix("*"),
)
get_default_voice_model_matrix(chapters, global_book_title)
def ask_for_Book_title(root):
"""
Asks the user for the Book title and saves it to a .env file.
:param root: The root window of the tkinter application.
:return: The entered Book title.
"""
Book_title = simpledialog.askstring(
"Book title Required", "Enter your Book title:", parent=root
)
# Save the key to a .env file
with open(".env", "w") as file:
file.write(f"Book_title={Book_title}\n")
return Book_title
def ask_for_api_key(root):
"""
Asks the user for the OpenAI API key and saves it to a .env file.
:param root: The root window of the tkinter application.
:return: The entered API key.
"""
api_key = simpledialog.askstring(
"API Key Required", "Enter your OpenAI API key:", parent=root
)
# Save the key to a .env file
with open(".env", "w") as file:
file.write(f"OPENAI_API_KEY={api_key}\n")
return api_key
def load_api_key():
"""
Loads the OpenAI API key from the environment or prompts the user to enter it.
:return: The OpenAI API key.
"""
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
# Initialize a minimal Tkinter root window
root = tk.Tk()
root.withdraw() # Hide the root window
# Ask the user for the API key
api_key = ask_for_api_key(root)
# Destroy the Tkinter root window
root.destroy()
return api_key
if __name__ == "__main__":
api_key = load_api_key()
# Check if the user provided the key
if not api_key:
print("No API key provided. Exiting.")
exit(1)
# Initialize OpenAI client with your API key
openai.api_key = api_key
create_gui()
| [
" "
] |
2024-01-10 | 0xalphaprime/ai-ml-data | openai~api_test.py | import os
import openai
# Load your API key from an environment variable or secret management service
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
model="text-davinci-003", prompt="Say this is a test", temperature=0, max_tokens=7
)
# $ openai api completions.create -m text-davinci-003 -p "Say this is a test" -t 0 -M 7 --stream
| [
"Say this is a test"
] |
2024-01-10 | GovindN75/Note-Summarizer | server~backend.py | import time
import cohere
from flask import Flask, request, jsonify
from flask_cors import CORS
import re
app = Flask(__name__)
CORS(app)
api_key = 'oF6eA5FnAgLKeezfIAgjWn7PraIRJHH00riUjr5Q'
co = cohere.Client(api_key)
MAX_STRING_SIZE = 1000
# Split the prompt into chunks of 500 characters
def pre_process_prompt(prompt):
prompt_array = []
if len(prompt) > MAX_STRING_SIZE:
while prompt:
idx = prompt[:MAX_STRING_SIZE].rfind('.')
if idx == -1:
idx = prompt.find('.')
if idx == -1:
idx = len(prompt)
chunk = prompt[:idx+1]
prompt_array.append(chunk)
prompt = prompt[idx+1:]
return prompt_array
return [prompt]
@app.route('/api/summarize', methods=['POST'])
def summarize_text():
request_json = request.json
prompt = request.json.get('text')
prompt_array = pre_process_prompt(prompt)
summary = []
format = request_json.get('format').lower()
summary_length = request_json.get('summary_length').lower()
for i, input_prompt in enumerate(prompt_array):
response = co.summarize(
length=summary_length,
text=prompt,
format=format,
model='summarize-medium',
additional_command='',
temperature=0.1,
)
if format == "bullets":
summary += (response.summary.split('\n'))
else:
summary.append(response.summary)
if i != 0:
time.sleep(15) # rate limiting
return summary if format == "bullets" else [' '.join(summary)]
if __name__ == '__main__':
app.run(debug=True)
| [
"[]"
] |
2024-01-10 | daymade/WisdomWeaver | chatgpt_api.py | import logging
from openai import OpenAI
import os
from dotenv import load_dotenv
# 加载 .env 文件中的环境变量
load_dotenv()
client = OpenAI()
# 设置您的 OpenAI API 密钥
client.api_key = os.getenv('OPENAI_API_KEY')
# 定义一个函数来调用 ChatGPT API
def analyze_text_with_chatgpt(user_text,
system_prompt,
model="gpt-3.5-turbo"):
try:
# 打印请求耗时
logging.info("开始请求 API...")
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_text}
]
)
logging.info("请求 API 完成")
result = response.choices[0].message.content
logging.info("API 请求成功,返回结果: %s", result)
return result
except Exception as e:
logging.error("API 请求出错: %s", e)
return None
| [] |
2024-01-10 | TogetherCrew/qabot | packages~ml~src~memory~procedual_memory.py | import os
from pydantic import BaseModel, Field
from langchain.schema import Document
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from typing import List
from tools.base import AgentTool
from utils.constants import DEFAULT_EMBEDDINGS
class ProcedualMemoryException(Exception):
pass
class ToolNotFoundException(ProcedualMemoryException):
pass
class ProcedualMemory(BaseModel):
tools: List[AgentTool] = Field([], title="Agent Tools")
embeddings: HuggingFaceEmbeddings = Field(DEFAULT_EMBEDDINGS, title="Embeddings to use for tool retrieval")
docs: List[Document] = Field([], title="Documents to use for tool retrieval")
vector_store: FAISS = Field(
None, title="Vector store to use for tool retrieval")
class Config:
arbitrary_types_allowed = True
def memorize_tools(self, tools: List[AgentTool]) -> None:
"""Memorize tools and embed them."""
for tool in tools:
self.tools.append(tool)
self.docs = [Document(page_content=t.description, metadata={
"index": i}) for i, t in enumerate(self.tools)]
self._embed_docs()
def remember_tool_by_name(self, tool_name: str) -> AgentTool:
"""Remember a tool by name and return it."""
tool = [tool for tool in self.tools if tool.name.lower() == tool_name.lower()]
if tool:
return tool[0]
else:
raise ToolNotFoundException(f"Tool {tool_name} not found")
def remember_relevant_tools(self, query: str) -> List[AgentTool]:
"""Remember relevant tools for a query."""
retriever = self.vector_store.as_retriever()
relevant_documents = retriever.get_relevant_documents(query)
return [self.tools[d.metadata["index"]] for d in relevant_documents]
def remember_all_tools(self) -> List[AgentTool]:
"""Remember all tools and return them."""
return self.tools
def tools_to_prompt(self, tools: List[AgentTool]) -> str:
# Set up the prompt
tool_info = ""
for tool in tools:
tool_info += tool.get_tool_info() + "\n"
return tool_info
def _embed_docs(self) -> None:
"""Embed tools."""
# if self.vector_store is None:
# self.vector_store = DeepLake(dataset_path=PERIODIC_MEMORY_DIR,embedding=self.embeddings)
# self.vector_store.add_texts(texts=[doc.page_content for doc in self.docs], metadatas=[doc.metadata for doc in self.docs])
self.vector_store: FAISS = FAISS.from_documents(
self.docs, self.embeddings
)
| [] |
2024-01-10 | TogetherCrew/qabot | packages~vector_server~vectorstore~call_GPT.py | # Author Ene SS Rawa / Tjitse van der Molen
import math
import time
import openai
import backoff
import tiktoken
from logger.embedding_logger import logger
@backoff.on_exception(backoff.expo, openai.error.OpenAIError) # cover all errors (RateLimitError, APIError, Timeout etc.)
def text_completions_with_backoff(**kwargs):
"""
Calls GPT ChatCompletion with exponential backoff
Input: openai.Chatcompletion API call in same format as under normal conditions
Output: openai.Chatcompletion API output in same format as under normal conditions
"""
return openai.ChatCompletion.create(**kwargs)
# # #
@backoff.on_exception(backoff.expo, openai.error.RateLimitError)
def audio_transcription_with_backoff(audio_file):
"""
Calls GPT audio transcription with exponential backoff
Input: opened audiofile of any of the following formats: mp3, mp4, mpeg, mpga, m4a, wav, webm
Output: dictionary with "Text" as key and the transcribed text from the audio file as value
"""
return openai.Audio.transcribe("whisper-1", audio_file)
# # #
def call_GPT(gpt_prompt, gpt_role="You are a helpful assistant.", start_seq="\n\nSummary:\n",
include_start_seq_out=False, max_output_tokens=500, temp=0):
"""
Calls GPT API and returns results
Input:
gpt_prompt - str: prompt for GPT call
start_seq - str: start_inject text. forced start of GPT response
(default = "\n\nSummary:\n")
include_start_seq_out - bool: whether start_seq should be included
in the output (default = False)
max_output_tokens - int: maximum number of tokens in output (default
= 500). If a value larger than 3900 or a negative values is
provided, max_output_tokens will be set to default.
temp - float: temperature value for GPT call in range from 0 to 1
(default = 0). If a value outside of the allowed range is
provided, temp will be set to default.
Output:
result - str: GPT response to gpt_prompt
"""
# check if max_output_tokens value is within range
if max_output_tokens < 0 or max_output_tokens > 3900:
print(
"ERROR: use max_output_tokens value between 0 and 3900, default value of max_output_tokens=500 will be used")
max_output_tokens = 500
# check if temp value is within range
if temp < 0 or temp > 1:
print("ERROR: use temp value between 0 and 1, default value of temp=0 will be used")
temp = 0
# add start text to prompt
prompt_with_start = gpt_prompt + start_seq
logger.debug(f"Waiting some secs")
time.sleep(1)
logger.debug(f"Prompt with start: {prompt_with_start}")
# run gpt prompt through API
response = text_completions_with_backoff(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": gpt_role}, {"role": "user", "content": prompt_with_start}],
temperature=temp,
max_tokens=max_output_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# obtain result output
if include_start_seq_out:
result = start_seq + response["choices"][0]["message"]["content"]
else:
result = response["choices"][0]["message"]["content"]
# obtain number of tokens used
num_tokens_used = response["usage"]["total_tokens"]
logger.debug(f"=================")
logger.debug(f"Number of tokens used: {num_tokens_used}")
logger.debug(f"Result: {result}")
logger.debug(f"=================")
return result, num_tokens_used
# # #
def split_prompt_tokens(gpt_prompt, max_tokens, summarize_prompt_basis=None, sep_char=["\n", " "]):
"""
Counts tokens in prompt and splits it if tokens exceed max_tokens
Input:
gpt_prompt - str: prompt text for gpt
tokenizer - GPT2Tokenizer: tokenized gpt prompt
max_tokens - int: maximum number of tokens allowed in single prompt
summarize_prompt_basis - str: prompt basis for gpt (prompt without
text to be summarized) to be included in prompts after splitting
Output:
prompt_list - [str]: list containing split prompts of at most
max_tokens tokens each
Notes:
Prompt splitter is not implemented yet
"""
# intitialize encoding
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
# encode input text
token_list = encoding.encode(gpt_prompt)
# # # SPLIT DATA # # #
# if the prompt has more tokens than allowed
if len(token_list) > max_tokens:
# determine minimal number of splits and add 1
# num_split = (len(token_list) // max_tokens + (len(token_list) % max_tokens > 0)) + 1
num_split = math.ceil(len(token_list) / max_tokens)
# split text with newline separator
prompt_list = split_gpt_prompt(gpt_prompt, sep_char[0], round(len(gpt_prompt) / num_split),
summarize_prompt_basis)
# if text could not be split
if not prompt_list:
# split text with space separator
prompt_list = split_gpt_prompt(gpt_prompt, sep_char[1], round(len(gpt_prompt) / num_split),
summarize_prompt_basis)
# if text could not be split
if not prompt_list:
# print error message
print(
"ERROR: Text separators in sep_char do not split text into sufficiently small chunks, adjust text input")
return False
else:
# print warning message
print(
"WARNING: The first separator in sep_char does not split text into sufficiently small chunks, the second separator in sep_char is used instead. This might lead to lower performance")
else:
# make single entry list of gpt_prompt
prompt_list = [gpt_prompt]
return prompt_list
# # #
def split_gpt_prompt(text, split_string, max_char, prompt_basis):
"""
Splits a long GPT prompt into smaller equally sized prompts
Input:
prompt - str: A GPT prompt to split
max_char - int: The maximum number of characters for each prompt
Output:
prompts - [str]: A list of smaller GPT prompts
"""
# split the text based on the provided split_string
split_text = text.split(split_string)
# make empty result arrays
curr_prompt = ""
all_prompts = []
# for each split text
for sp_t in split_text:
# if the split text is longer than max_char
if len(sp_t) + len(prompt_basis) > max_char:
return False
# if the current prompt plus the added text is smaller than the maximum characters
if len(curr_prompt) + len(sp_t) <= max_char:
# add the added text to the current prompt
curr_prompt = curr_prompt + sp_t + split_string
else:
# store current prompt in all prompts
all_prompts.append(curr_prompt)
# overwrite curr_prompt with prompt basis
curr_prompt = prompt_basis + sp_t + split_string
# add last prompt to list
all_prompts.append(curr_prompt)
return all_prompts
# # #
def transcribe_audio(audio_file, OA_key):
"""
transcribes audio file into text
Input:
audio_file: opened audiofile of any of the following formats: mp3, mp4, mpeg, mpga, m4a, wav, webm
OA_key - str: OpenAI key for API call
Output:
transcribe_out - {str:str}: dictionary with "Text" as key and the transcribed text from the audio file as value
Notes:
Output can be used as input for text based GPT implementations
"""
# set openai key
openai.api_key = OA_key
# Run the audio transcription function
transcribe_out = audio_transcription_with_backoff(audio_file)
return transcribe_out
| [
"[]",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | TogetherCrew/qabot | packages~ml~src~manager~task_manager.py | from pydantic import BaseModel, Field
from langchain.llms.base import BaseLLM
from typing import List
from langchain import LLMChain
from llm.generate_task_plan.prompt import get_subquestions_template, get_template
from llm.list_output_parser import LLMListOutputParser
class Task(BaseModel):
"""Task model."""
id: int = Field(..., description="Task ID")
description: str = Field(..., description="Task description")
is_done: bool = Field(False, description="Task done or not")
result: str = Field("", description="The result of the task")
last_tool_name: str = Field(None, description="Last tool name")
last_tool_args: str = Field(None, description="Last tool args")
class TaskManager(BaseModel):
"""Task manager model."""
# subquestions: List[str] = Field([], description="The list of subquestions")
tasks: List[Task] = Field([], description="The list of tasks")
current_task_id: int = Field(1, description="The last task id")
llm: BaseLLM = Field(..., description="llm class for the agent")
def discard_current_task(self):
"""Discard the current task."""
self.tasks = [task for task in self.tasks if task.id != self.current_task_id]
async def generate_task_plan(
self, name: str, role: str, question: str, tool_info: str
):
"""Generate a task plan for the agent."""
prompt = get_template()
# If you want to change temperature use something like below:
# be_creative_llm = self.llm.copy(deep=True, update={"temperature": "0.5"})
llm_chain = LLMChain(prompt=prompt, llm=self.llm)
try:
result = await llm_chain.apredict(
name=name,
role=role,
question=question,
# subquestions_list=self.subquestions,
# tool_info=tool_info,
)
except Exception as e:
raise Exception(f"Error: {e}")
# Parse and validate the result
try:
result_list = LLMListOutputParser.parse(result, separeted_string="\t")
except Exception as e:
raise Exception("Error: " + str(e))
# Add tasks with a serial number
for i, e in enumerate(result_list, start=1):
id = int(i)
description = e
self.tasks.append(Task(id=id, description=description))
self
def get_task_by_id(self, id: int) -> Task:
"""Get a task by Task id."""
for task in self.tasks:
if task.id == id:
return task
return None
def get_current_task(self) -> Task:
"""Get the current task agent is working on."""
return self.get_task_by_id(self.current_task_id)
def get_current_task_string(self) -> str:
"""Get the current task agent is working on as a string."""
task = self.get_current_task()
if task is None:
return None
else:
return self._task_to_string(task)
def complete_task(self, id: int, result: str) -> None:
"""Complete a task by Task id."""
# Complete the task specified by ID
self.tasks[id - 1].is_done = True
self.tasks[id - 1].result = result
self.current_task_id += 1
def complete_current_task(self, result: str) -> None:
"""Complete the current task agent is working on."""
self.complete_task(self.current_task_id, result=result)
def _task_to_string(self, task: Task) -> str:
"""Convert a task to a string."""
return f"{task.id}: {task.description}"
def get_completed_tasks(self) -> List[Task]:
"""Get the list of completed tasks."""
return [task for task in self.tasks if task.is_done]
def get_completed_tasks_as_string(self) -> str:
"""Get the list of completed tasks as string."""
return "\n".join(
[self._task_to_string(task) for task in self.tasks if task.is_done]
)
def get_results_completed_tasks_as_string(self) -> str:
"""Get the list results of completed tasks as string."""
return "\n".join(
[f"{task.id}: {task.result}" for task in self.tasks if task.is_done]
)
def get_incomplete_tasks(self) -> List[Task]:
"""Get the list of incomplete tasks."""
return [task for task in self.tasks if not task.is_done]
def get_incomplete_tasks_string(self) -> str:
"""Get the list of incomplete tasks as a string."""
result = "\n"
for task in self.get_incomplete_tasks():
result += self._task_to_string(task) + "\n"
return result
def is_action_already_used_in_current_task(self, tool_name, args):
current_task = self.get_current_task()
if (
current_task
and current_task.last_tool_name == tool_name
and current_task.last_tool_args == args
):
return True
current_task.last_tool_name = tool_name
current_task.last_tool_args = args
return False
| [] |
2024-01-10 | TogetherCrew/qabot | packages~vector_server~tests~celery_test.py | from logger.embedding_logger import logger
from tasks import celery
from utils.constants import OPENAI_API_KEY, DB_CONNECTION_STR, DB_GUILD
# t = vector_task.dummy.delay()
logger.debug('Starting vector store update in celery')
# logger.debug(f"OPENAI_API_KEY: {OPENAI_API_KEY}")
# logger.debug(f"DB_CONNECTION_STR: {DB_CONNECTION_STR}")
logger.debug(f"DB_GUILD: {DB_GUILD}")
t = celery.vector_store_update.delay('random-session', OPENAI_API_KEY,DB_CONNECTION_STR,DB_GUILD)
logger.debug(f"Result: {t}") | [] |
2024-01-10 | TogetherCrew/qabot | packages~ml~src~memory~semantic_memory.py | import asyncio
import os
import traceback
import json
from typing import Any, Optional
from pydantic import BaseModel, Field
from langchain.llms.base import BaseLLM
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from llm.extract_entity.prompt import get_chat_template
from llm.extract_entity.schema import JsonSchema as ENTITY_EXTRACTION_SCHEMA
from llm.json_output_parser import LLMJsonOutputParser, LLMJsonOutputParserException
from logger.hivemind_logger import logger
from ui.cui import CommandlineUserInterface
from utils.constants import DEFAULT_EMBEDDINGS
from utils.util import atimeit, timeit
import base58
CREATE_JSON_SCHEMA_STR = json.dumps(ENTITY_EXTRACTION_SCHEMA.schema)
class SemanticMemory(BaseModel):
num_episodes: int = Field(0, description="The number of episodes")
llm: BaseLLM = Field(..., description="llm class for the agent")
openaichat: Optional[ChatOpenAI] = Field(
None, description="ChatOpenAI class for the agent"
)
embeddings: OpenAIEmbeddings = Field(DEFAULT_EMBEDDINGS,
title="Embeddings to use for tool retrieval",
)
vector_store: FAISS = Field(
None, title="Vector store to use for tool retrieval"
)
ui: CommandlineUserInterface | None = Field(None)
class Config:
arbitrary_types_allowed = True
# def __init__(self, question: str, **kwargs):
# super().__init__(**kwargs)
# filename = base58.b58encode(question.encode()).decode()
# if self.vector_store is None:
# self.vector_store = DeepLake(read_only=True, dataset_path=os.path.join(SEMANTIC_MEMORY_DIR, f"{filename}"),
# embedding=self.embeddings)
def __del__(self):
del self.embeddings
del self.vector_store
@atimeit
async def extract_entity(self, text: str, question: str, task: str) -> dict:
"""Extract an entity from a text using the LLM"""
if self.openaichat:
# print(f"semantic->extract_entity->Text1: {text}")
# If OpenAI Chat is available, it is used for higher accuracy results.
prompt = (
get_chat_template()
.format_prompt(text=text, question=question, task=task)
.to_messages()
)
full_prompt = " ".join([msg.content for msg in prompt])
logger.debug(f"semantic->extract_entity->Prompt: {full_prompt}")
llm_result = await self.openaichat._agenerate(messages=prompt)
await self.ui.call_callback_info_llm_result(llm_result)
result = llm_result.generations[0].message.content
# result = self.openaichat(prompt).content
else:
raise Exception("Should never happen!")
# Parse and validate the result
try:
# print(f"semantic->extract_entity->Result: {result}")
result_json_obj = LLMJsonOutputParser.parse_and_validate(
json_str=result, json_schema=CREATE_JSON_SCHEMA_STR, llm=self.llm
)
except LLMJsonOutputParserException as e:
raise LLMJsonOutputParserException(str(e))
try:
if len(result_json_obj) > 0:
await asyncio.create_task(self._embed_knowledge(result_json_obj))
except BaseException as e:
print(f"semantic->extract_entity->Text: {text}\n")
print(f"semantic->extract_entity->Result: {result}\n")
print(
f"semantic->extract_entity->Extracted entity: {result_json_obj}\n"
)
print(traceback.print_exc())
# raise Exception(f"Error: {e}")
return result_json_obj
@timeit
def remember_related_knowledge(self, query: str, k: int = 5) -> dict:
"""Remember relevant knowledge for a query."""
if self.vector_store is None:
return {}
relevant_documents = self.vector_store.similarity_search(query, k=k)
return {
d.metadata["entity"]: d.metadata["description"] for d in relevant_documents
}
@atimeit
async def _embed_knowledge(self, entity: dict[str:Any]):
"""Embed the knowledge into the vector store."""
description_list = []
metadata_list = []
for entity, description in entity.items():
description_list.append(description)
metadata_list.append({"entity": entity, "description": description})
if self.vector_store is None:
self.vector_store = FAISS.from_texts(texts=description_list,metadatas=metadata_list,
embedding=self.embeddings)
# self.vector_store = DeepLake(read_only=False, dataset_path=SEMANTIC_MEMORY_DIR,
# embedding=self.embeddings)
self.vector_store.add_texts(texts=description_list, metadatas=metadata_list)
# async def save_local(self, path: str) -> None:
# """Save the vector store to a local folder."""
# async def _save():
# self.vector_store.save_local(folder_path=path)
# await asyncio.create_task(_save())
# def load_local(self, path: str) -> None:
# """Load the vector store from a local folder."""
#
# # async def _load():
# # self.vector_store = FAISS.load_local(
# # folder_path=path, embeddings=self.embeddings
# # )
#
# # await asyncio.create_task(_load())
# self.vector_store = DeepLake(read_only=True, dataset_path=path, embedding=self.embeddings)
| [
" "
] |
2024-01-10 | TogetherCrew/qabot | packages~vector_server~utils~constants.py | # Define the default values
import os
from dotenv import load_dotenv
from langchain.embeddings import OpenAIEmbeddings
from logger.embedding_logger import logger
# from sentence_transformers import SentenceTransformer
ENV_FILE = os.getenv('ENV_FILE')
logger.debug(f"ENV_FILE: {ENV_FILE}")
if ENV_FILE != 'docker':
dotenv_path = os.path.join(os.path.dirname(__file__), '../.local.env')
logger.debug(f"Loading .env from {dotenv_path}")
load_dotenv(dotenv_path=dotenv_path)
OPENAI_API_MODEL = os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")
assert OPENAI_API_MODEL, "OPENAI_API_MODEL environment variable is missing from .env"
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
assert OPENAI_API_KEY, "OPENAI_API_KEY environment variable is missing from .env"
#MongoDB
DB_CONNECTION_STR = os.getenv("DB_CONNECTION_STR", "")
assert DB_CONNECTION_STR, "DB_CONNECTION_STR environment variable is missing from .env"
DB_GUILD = os.getenv("DB_GUILD", "")
assert DB_GUILD, "DB_GUILD environment variable is missing from .env"
USE_LOCAL_STORAGE = True
DEEPLAKE_FOLDER = "vector_store"
DEEPLAKE_PLATFORM_FOLDER = "discord"
DEFAULT_EMBEDDINGS = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
DEEPLAKE_RAW_FOLDER = "DeepLake_VectorStore_414_419_raw_messages"
DEEPLAKE_SUMMARY_FOLDER = "DeepLake_VectorStore_414_419_summaries"
# VECTOR SERVER CONFIG
HIVEMIND_VS_PORT = os.getenv("HIVEMIND_VS_PORT", 1234)
# RABBITMQ CONFIG
RABBITMQ_HOST = os.getenv("RABBITMQ_HOST", "localhost")
RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672)
RABBITMQ_USER = os.getenv("RABBITMQ_USER", "guest")
RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "guest")
# REDIS CONFIG
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', 6379)
REDIS_USER = os.getenv('REDIS_USER', None)
REDIS_PASS = os.getenv('REDIS_PASS', None)
USER_AND_PASS = f"{REDIS_USER if REDIS_USER else '' }:{REDIS_PASS}@" if REDIS_PASS else ''
REDIS_URI = os.getenv('REDIS_URI', f"redis://{USER_AND_PASS}{REDIS_HOST}:{REDIS_PORT}")
| [] |
2024-01-10 | TogetherCrew/qabot | packages~ml~src~ui~cui.py | import asyncio
import itertools
import sys
from enum import Enum
from typing import AsyncContextManager, Optional
from langchain.schema import LLMResult, ChatResult
from server.callback import InfoChunk, TextChunk
from ui.base import BaseHumanUserInterface
from utils.util import get_total_tokens
class Color(Enum):
"""Color codes for the commandline"""
BLACK = "\033[30m" # (Text) Black
RED = "\033[31m" # (Text) Red
GREEN = "\033[32m" # (Text) Green
YELLOW = "\033[33m" # (Text) Yellow
BLUE = "\033[34m" # (Text) Blue
MAGENTA = "\033[35m" # (Text) Magenta
CYAN = "\033[36m" # (Text) Cyan
WHITE = "\033[37m" # (Text) White
COLOR_DEFAULT = "\033[39m" # Reset text color to default
class CommandlineUserInterface(BaseHumanUserInterface):
"""Commandline user interface."""
def get_user_input(self) -> str:
"""Get user input and return the result as a string"""
user_input = input("Input:")
return str(user_input)
def get_binary_user_input(self, prompt: str) -> bool:
"""Get a binary input from the user and return the result as a bool"""
yes_patterns = ["y", "yes", "yeah", "yup", "yep"]
no_patterns = ["n", "no", "nah", "nope"]
while True:
response = input(prompt + " (y/n) ").strip().lower()
if response in yes_patterns:
return True
elif response in no_patterns:
return False
else:
# self.notify(
# "Invalid input", "Please enter y or n.", title_color=Color.RED
# )
print("Invalid input", "Please enter y or n.")
continue
async def notify(
self,
message: str,
title: Optional[str] = None,
title_color: str | Color = Color.YELLOW,
stream: bool = False,
) -> None:
"""Print a notification to the user"""
if stream:
await self.stream(title=title, message=message)
if isinstance(title_color, str):
try:
title_color = Color[title_color.upper()]
except KeyError:
raise ValueError(f"{title_color} is not a valid Color")
self._print_message(title=title, message=message, title_color=title_color)
async def stream(self, message: str, title: Optional[str] = None):
"""Print a notification to the user"""
await self._call_callback_text(f"{f'{title}: ' if title else ''}{message}")
async def _call_callback_text(self, message: str):
if self.callback is not None:
await self.callback.on_llm_new_token(TextChunk(token=f"{message}\n"))
await asyncio.sleep(0.05)
async def call_callback_info(self, count_tokens: int, model_name: str | None = None):
if self.callback is not None:
await self.callback.on_llm_new_token(InfoChunk(count_tokens=count_tokens, model_name=model_name))
await asyncio.sleep(0.05)
async def call_callback_info_llm_result(self, llm_result: LLMResult | ChatResult):
await self.call_callback_info(count_tokens=get_total_tokens(llm_result),
model_name=llm_result.llm_output["model_name"])
async def call_callback_end(self):
if self.callback is not None:
await self.callback.on_llm_end(response=None)
await asyncio.sleep(0.05)
async def call_callback_error(self, error: BaseException | KeyboardInterrupt):
if self.callback is not None:
await self.callback.on_llm_error(error=error)
await asyncio.sleep(0.05)
async def loading(
self,
message: str = "Thinking...",
delay: float = 0.1,
) -> AsyncContextManager:
"""Return a context manager that will display a loading spinner"""
await self._call_callback_text(message)
return self.Spinner(message=message, delay=delay)
def _print_message(self, message: str, title_color: Color, title: Optional[str] = None) -> None:
print(
f"{f'{title_color.value}{title}{Color.COLOR_DEFAULT.value}:' if title else ''} {message}"
)
class Spinner(AsyncContextManager):
"""A simple spinner class"""
def __init__(self, message="Loading...", delay=0.1):
"""Initialize the spinner class"""
self.spinner = itertools.cycle(["-", "/", "|", "\\"])
self.delay = delay
self.message = message
self.running = False
self.task = None
async def spin(self):
"""Spin the spinner"""
while self.running:
sys.stdout.write(next(self.spinner) + " " + self.message + "\r")
sys.stdout.flush()
await asyncio.sleep(self.delay)
sys.stdout.write("\b" * (len(self.message) + 2))
async def __aenter__(self):
"""Start the spinner"""
print("aenter")
self.running = True
self.task = asyncio.create_task(self.spin())
await self.task
return self
async def __aexit__(self, exc_type, exc_value, exc_traceback):
"""Stop the spinner"""
print("aexit")
self.running = False
self.task.cancel()
sys.stdout.write("\r" + " " * (len(self.message) + 2) + "\r")
sys.stdout.flush()
| [] |
2024-01-10 | TogetherCrew/qabot | packages~ml~src~utils~constants.py | # Define the default values
import os
from dotenv import load_dotenv
from langchain.embeddings import OpenAIEmbeddings
from logger.hivemind_logger import logger
ENV_FILE = os.getenv('ENV_FILE')
logger.debug(f"ENV_FILE: {ENV_FILE}")
if ENV_FILE != 'docker':
dotenv_path = os.path.join(os.path.dirname(__file__), '../../.local.env')
logger.debug(f"Loading .env from {dotenv_path}")
load_dotenv(dotenv_path=dotenv_path)
OPENAI_API_MODEL = os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")
assert OPENAI_API_MODEL, "OPENAI_API_MODEL environment variable is missing from .env"
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
assert OPENAI_API_KEY, "OPENAI_API_KEY environment variable is missing from .env"
# Set Agent Settings
AGENT_NAME = os.getenv("AGENT_NAME", "")
assert AGENT_NAME, "AGENT_NAME variable is missing from .env"
AGENT_ROLE = os.getenv("AGENT_ROLE", "")
assert AGENT_ROLE, "AGENT_ROLE variable is missing from .env"
AGENT_OBJECTIVE = os.getenv("AGENT_OBJECTIVE", None)
# API CONFIG
HIVEMIND_API_PORT = os.getenv("HIVEMIND_API_PORT", 3333)
# VECTOR SERVER CONFIG
HIVEMIND_VS_HOST = os.getenv("HIVEMIND_VS_HOST", "http://localhost")
HIVEMIND_VS_PORT = os.getenv("HIVEMIND_VS_PORT", 1234)
VECTOR_SERVER_URL = f"{HIVEMIND_VS_HOST}:{HIVEMIND_VS_PORT}"
# RABBITMQ CONFIG
RABBITMQ_HOST = os.getenv("RABBITMQ_HOST", "localhost")
RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672)
RABBITMQ_USER = os.getenv("RABBITMQ_USER", "guest")
RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "guest")
DEFAULT_AGENT_DIR = os.path.join(os.path.dirname(__file__), "../agent_data")
DEFAULT_EMBEDDINGS = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
# Define the base path for the serialization
BASE_PATH_SERIALIZATION = os.path.join(DEFAULT_AGENT_DIR, "serialization") | [] |
2024-01-10 | TogetherCrew/qabot | packages~vector_server~vectorstore~vector_store_data.py | # import the necessary libraries
import os.path
from datetime import datetime, timedelta
import sys
import json
from langchain.vectorstores import DeepLake
from langchain.schema import Document
from langchain.embeddings import OpenAIEmbeddings
from logger.embedding_logger import logger
from tasks.helper import set_status
from utils import constants
from . import DB_interactions
from .summarize_discord import summarize_discord_main
def main(args):
# # SET PARAMETERS
if args is None:
raise ValueError("No arguments passed to main function.")
# set openai key
OA_KEY = args[0]
# set db information
DB_CONNECTION_STR = args[1]
DB_GUILD = args[2]
task = args[3]
dates = args[4]
channels = args[5]
index_deeplake = args[6]
logger.debug(f"OA_KEY: {OA_KEY}")
logger.debug(f"DB_CONNECTION_STR: {DB_CONNECTION_STR}")
logger.debug(f"DB_GUILD: {DB_GUILD}")
CHANNELS_ID = ["968110585264898048", "1047205126709969007", "1047205182871707669", "1047390883215052880",
"1095278496147849257"] if channels is None else channels
# DATES = ['2023-07-01', '2023-07-02', '2023-07-03', '2023-07-04', '2023-07-05']
# CHANNELS_ID = ["968110585264898048"]
DATES = ['2023-10-25', '2023-10-26', '2023-10-27', '2023-10-28', '2023-10-29', '2023-10-30'] if dates is None else dates
# CHANNELS_ID = [""]
# DATES = ['2023-04-13', '2023-04-14', '2023-04-15', '2023-04-16', '2023-04-17', '2023-04-18', '2023-04-19']
# set paths to store results
# # initiate embeddings model
# # OpenAI embedding model
embeddings = OpenAIEmbeddings(openai_api_key=OA_KEY)
# set_status(task, state='A', meta={'current': 'HF start'})
# HuggingFace embeddings model
# model_name = "sentence-transformers/all-mpnet-base-v2"
# embeddings = HuggingFaceEmbeddings(model_name=model_name,client=SentenceTransformer(device='cpu'))
# set_status(task, state='B', meta={'current': 'HF end'})
# embed and store data
vector_store_discord(OA_KEY, DB_CONNECTION_STR, DB_GUILD, CHANNELS_ID, DATES, embeddings, task, index_deeplake)
return
# # #
def vector_store_discord(OA_KEY, DB_CONNECTION_STR, DB_GUILD, CHANNELS_ID, DATES, embeddings, task, index_deeplake):
# set up database access
db_access = DB_interactions.DB_access(DB_GUILD, DB_CONNECTION_STR)
query = DB_interactions.Query()
# CHANNELS_ID = list(filter(lambda x: x != "", CHANNELS_ID))
query_channels = {"channelId": {"$in": list(CHANNELS_ID)}} if len(CHANNELS_ID) > 0 else {}
set_status(task, state='1', meta={'current': 'MongoDB query'})
# obtain relations between channel id and name
cursor = db_access.query_db_find(
table="channels",
feature_projection={"__v": 0, "_id": 0, "last_update": 0},
query=query_channels
)
# store relations between channel id and name as dictionary
channel_id_name = DB_interactions.filter_channel_name_id(list(cursor), channel_name_key="name")
# CHANNELS_ID = list(channel_id_name.keys())
# initiate empty doc arrays
summary_docs = []
raw_docs = []
# initiate empty metadata arrays
all_channels = []
all_threads = []
all_authors = []
set_status(task, state='2', meta={'current': 'Data transforming'})
total_tokens_per_server = 0
# for each date
for date in DATES:
logger.debug(f"starting date: {date}")
# compute date before day
datetime_next_day = datetime.strptime(date, '%Y-%m-%d') + timedelta(days=1)
date_next_day = datetime_next_day.strftime('%Y-%m-%d')
set_status(task, state='3', meta={'current': 'Data query'})
########## And now querying the table with messages in it ##########
query_dict = query.create_query_threads(
channels_id=CHANNELS_ID,
date_range=[date, date_next_day],
channelsId_key='channelId',
date_key='createdDate'
)
projection = {
'user_mentions': 0,
'role_mentions': 0,
'reactions': 0,
'replied_user': 0,
'type': 0,
'messageId': 0,
'__v': 0
}
logger.debug(f"query_dict: {query_dict}")
cursor = db_access.query_db_find(table='rawinfos',
query=query_dict,
feature_projection=projection,
sorting=('datetime', -1)
)
logger.debug(f"cursor of results")
# getting a result as thread_results : {str:{str:{str:str}}}
thread_results = DB_interactions.filter_channel_thread(cursor_list=list(cursor),
channels_id=CHANNELS_ID,
thread_id_key='threadId',
author_key='author',
message_content_key='content')
# logger.info("\n\n")
logger.info(f"thread_results: {thread_results}")
# logger.info("\n\n")
set_status(task, state='4', meta={'current': f"Start Summarizing"})
# run the summarizing function
logger.debug("Starting summarizing")
summary_out, num_tokens = summarize_discord_main(thread_results, OA_KEY, True, True)
logger.debug(f"Finished summarizing: Date: {date} Tokens: {num_tokens}")
total_tokens_per_server += num_tokens
logger.debug(f"Until date: {date} Total_Tokens: {total_tokens_per_server}")
logger.debug(f"Summary_out: {summary_out}")
set_status(task, state='1B', meta={'current': 'Building Summarize'})
# add server summary to docs
summary_docs.append(Document(page_content=summary_out['server_summary']["whole server"],
metadata={
'date': date,
'channel': None,
'thread': None
}))
# for each channel
for channel in summary_out['channel_summaries'].keys():
# store channel summary data
summary_docs.append(Document(page_content=summary_out['channel_summaries'][channel],
metadata={
'date': date,
'channel': channel_id_name[channel],
'thread': None
}))
# add channel name to metadata array if it's not in there yet
if not channel_id_name[channel] in all_channels:
all_channels.append(channel_id_name[channel])
# for each thread
for thread_label in summary_out['thread_summaries'][channel].keys():
# split thread name
thread_name_split = thread_label.split(": ")
thread = thread_name_split[1]
# store thread summary data
summary_docs.append(Document(page_content=summary_out['thread_summaries'][channel][thread_label],
metadata={
'date': date,
'channel': channel_id_name[channel],
'thread': thread
}))
# add thread name to metadata array if it's not in there yet
if not thread in all_threads:
all_threads.append(thread)
# for each message
for mess in thread_results[channel][thread].keys():
# split message id
mess_id_split = mess.split(":")
# split author name from handle
handle_split = mess_id_split[1].split("#")
# if message contains text
if len(thread_results[channel][thread][mess]) > 1:
# store message
raw_docs.append(Document(page_content=thread_results[channel][thread][mess],
metadata={
'date': date,
'channel': channel_id_name[channel],
'thread': thread,
'author': handle_split[0],
'index': mess_id_split[0]
}))
# add author name to metadata array if it's not in there yet
if not handle_split[0] in all_authors:
all_authors.append(handle_split[0])
set_status(task, state='H', meta={'current': 'Building DeepLake'})
PLATFORM_PATH = os.path.join(constants.DEEPLAKE_FOLDER, constants.DEEPLAKE_PLATFORM_FOLDER)
# check if path exists
index = 0
CURRENT_PLATFORM_PATH = f"{PLATFORM_PATH}_{index}"
if index_deeplake < 0:
while True:
logger.debug(f"init CURRENT_PLATFORM_PATH: {CURRENT_PLATFORM_PATH}")
if os.path.exists(CURRENT_PLATFORM_PATH):
index += 1
CURRENT_PLATFORM_PATH = f"{PLATFORM_PATH}_{index}"
continue
else:
logger.debug(f"break CURRENT_PLATFORM_PATH: {CURRENT_PLATFORM_PATH}")
os.makedirs(CURRENT_PLATFORM_PATH, exist_ok=True)
break
else:
CURRENT_PLATFORM_PATH = f"{PLATFORM_PATH}_{index_deeplake}"
RAW_DB_SAVE_PATH = os.path.join(CURRENT_PLATFORM_PATH,
constants.DEEPLAKE_RAW_FOLDER)
SUM_DB_SAVE_PATH = os.path.join(CURRENT_PLATFORM_PATH,
constants.DEEPLAKE_SUMMARY_FOLDER)
METADATA_OPTIONS_SAVE_PATH = os.path.join(CURRENT_PLATFORM_PATH,
"metadata_options.json")
# store results in vector stores
db_raw = DeepLake.from_documents(raw_docs, embeddings, dataset_path=RAW_DB_SAVE_PATH)
db_summary = DeepLake.from_documents(summary_docs, embeddings, dataset_path=SUM_DB_SAVE_PATH)
set_status(task, state='I', meta={'current': 'Start write to file'})
try:
# store metadata options for vector stores
JSON_dict = {"all_channels": all_channels, "all_threads": all_threads, "all_authors": all_authors,
"all_dates": DATES}
with open(METADATA_OPTIONS_SAVE_PATH, "w") as outfile:
json.dump(JSON_dict, outfile)
set_status(task, state='J', meta={'current': 'END'})
except BaseException as e:
logger.error(f"Error on write to file: {e}")
set_status(task, state='Error', meta={'current': 'END'})
return
return
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [] |
2024-01-10 | TogetherCrew/qabot | packages~ml~src~tools~discord.py | import os.path
from enum import Enum
# import inspect
import json
from langchain.schema import Document
from pydantic.fields import Field
from logger.hivemind_logger import logger
from tools.base import AgentTool
from utils.util import async_get_request
from utils.constants import VECTOR_SERVER_URL
class ConversationType(Enum):
RAW = 0 # type=0
SUMMARY = 1 # type=1
class DiscordTool(AgentTool):
convo_type: ConversationType = Field(default=ConversationType.RAW, description="Conversation type")
# override constructor
def __init__(
self,
name: str,
convo_type: ConversationType,
description: str,
user_permission_required: bool = False,
**kwargs,
):
super().__init__(
name=name,
func=self.a_conversation_search_server,
description=description,
user_permission_required=user_permission_required,
**kwargs,
)
self.convo_type = convo_type
async def a_conversation_search_server(self, query: str,
**kwargs) -> str:
"""
**kwargs it's used to ignore hallucination params
"""
url = os.path.join(VECTOR_SERVER_URL, "search", str(self.convo_type.value), query)
logger.debug(f"a_conversation_search_server->calling: {url}")
json_response = await async_get_request(url)
logger.debug(f"a_conversation_search_server->json_response: {json_response}")
if json_response is None:
return None
list_doc = [Document(**doc) for doc in json_response]
return self.convert_list_doc_to_str(list_doc=list_doc)
def conversation_search(self, query: str, **kwargs) -> str:
list_doc = self._db.similarity_search(query=query, k=5)
return self.convert_list_doc_to_str(list_doc)
def convert_list_doc_to_str(self, list_doc):
new_list_doc = [
Document(
page_content=doc.page_content.replace("\n", " "), metadata=doc.metadata
)
for doc in list_doc
]
# AttributeError: 'dict' object has no attribute 'page_content'
# how build dict with page_content and metadata attributes
# print(new_list_doc)
l = ("\n").join(
[
f'message:"{doc.page_content}"\n metadata:{json.dumps(doc.metadata)}'
for i, doc in enumerate(new_list_doc)
]
)
# do for each doc getting page content
return l
| [] |
2024-01-10 | TogetherCrew/qabot | packages~ml~src~llm~json_output_parser.py | import json
import re
from typing import Any, Dict, List
from pydantic import BaseModel
from jsonschema import validate, ValidationError
from langchain.llms.base import BaseLLM
import contextlib
# from marvin import ai_fn
class LLMJsonOutputParserException(Exception):
"""Exception for JSON parsing errors"""
pass
class ParseJsonException(LLMJsonOutputParserException):
"""Exception for JSON parsing errors"""
pass
class ValidateJsonException(LLMJsonOutputParserException):
"""Exception for JSON validating errors"""
pass
class FixJsonException(LLMJsonOutputParserException):
"""Exception for JSON fixing errors"""
pass
# @ai_fn()
def auto_fix_json(json_str: str, schema: str) -> str:
"""
Fixes the provided JSON string to make it parseable and fully complient with the provided schema.
If an object or field specified in the schema isn't contained within the correct JSON,
it is ommited.\n This function is brilliant at guessing when the format is incorrect.
Parameters:
description: str
The description of the function
function: str
The function to run
Returns:
str
The fixed JSON string it is valid.
"""
class LLMJsonOutputParser(BaseModel):
"""Parse the output of the LLM."""
@classmethod
def parse_and_validate(cls, json_str: str, json_schema: str, llm: BaseLLM) -> str | Dict[Any, Any]:
"""
Parses and validates the JSON string.
"""
# Parse JSON
try:
json_str = cls._parse_json(json_str, json_schema, llm)
except ParseJsonException as e:
raise ParseJsonException(str(e))
# Validate JSON
try:
return cls._validate_json(json_str, json_schema, llm)
except ValidationError as e:
raise ValidateJsonException(str(e))
@classmethod
def _remove_square_brackets(cls, json_str: str) -> str:
"""
Removes square brackets from the JSON string.
"""
return re.sub(r"\[|\]", "", json_str)
@classmethod
def _parse_json(cls, json_str: str, json_schema: str, llm: BaseLLM) -> str | Dict[Any, Any]:
"""
Parses the JSON string.
"""
with contextlib.suppress(json.JSONDecodeError):
json_str = json_str.replace("\t", "")
return json.loads(json_str)
with contextlib.suppress(json.JSONDecodeError):
json_str = cls.correct_json(json_str)
return json.loads(json_str)
try:
json_str = cls._remove_square_brackets(json_str)
brace_index = json_str.index("{")
maybe_fixed_json = json_str[brace_index:]
last_brace_index = maybe_fixed_json.rindex("}")
maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1]
return json.loads(maybe_fixed_json)
except (json.JSONDecodeError, ValueError):
pass
# Now try to fix this up using the ai_functions
try:
ai_fixed_json = cls._fix_json(json_str, json_schema, llm)
return json.loads(ai_fixed_json)
except FixJsonException as e:
raise ParseJsonException("Could not parse JSON:" + str(e))
@classmethod
def _validate_json(cls, json_obj: str | Dict[Any, Any], json_schema: str, llm: BaseLLM) -> str | Dict[Any, Any]:
"""
Check if the given JSON string is fully complient with the provided schema.
"""
schema_obj = json.loads(json_schema)
try:
validate(json_obj, schema_obj)
return json_obj
except ValidationError:
# Now try to fix this up using the ai_functions
try:
ai_fixed_json = cls._fix_json(json.dumps(json_obj), json_schema, llm)
return json.loads(ai_fixed_json)
except FixJsonException as e:
raise ValidateJsonException("Could not validate JSON:" + str(e))
@staticmethod
def _fix_json(json_str: str, schema: str, llm: BaseLLM) -> str:
"""
Fix the given JSON string to make it parseable and fully complient with the provided schema.
"""
try:
print(f"trying fix json_str: {json_str}")
fixed_json_str = auto_fix_json(json_str, schema)
except Exception:
import traceback
call_stack = traceback.format_exc()
raise FixJsonException(f"Failed to fix JSON: '{json_str}' " + call_stack)
try:
# print(f"fixed_json_str: {fixed_json_str}")
json.loads(fixed_json_str)
return fixed_json_str
except Exception:
import traceback
call_stack = traceback.format_exc()
raise FixJsonException(f"Failed to load JSON: '{fixed_json_str}' " + call_stack)
@staticmethod
def _extract_char_position(error_message: str) -> int:
"""
Extract the character position from the error message.
"""
char_pattern = re.compile(r'\(char (\d+)\)')
if match := char_pattern.search(error_message):
return int(match[1])
else:
raise ValueError("Character position not found in the error message.")
@staticmethod
def _add_quotes_to_property_names(json_string: str) -> str:
"""
Add quotes to the property names in the JSON string.
"""
def replace_func(match):
return f'"{match.group(1)}":'
property_name_pattern = re.compile(r'(\w+):')
corrected_json_string = property_name_pattern.sub(
replace_func,
json_string)
try:
json.loads(corrected_json_string)
return corrected_json_string
except json.JSONDecodeError as e:
raise e
@staticmethod
def _balance_braces(json_string: str) -> str:
"""
Add missing braces to the end of the JSON string.
"""
open_braces_count = json_string.count("{")
close_braces_count = json_string.count("}")
while open_braces_count > close_braces_count:
json_string += "}"
close_braces_count += 1
while close_braces_count > open_braces_count:
json_string = json_string.rstrip("}")
close_braces_count -= 1
with contextlib.suppress(json.JSONDecodeError):
json.loads(json_string)
return json_string
@classmethod
def _fix_invalid_escape(cls, json_str: str, error_message: str) -> str:
"""
Remove the invalid escape character from the JSON string.
"""
while error_message.startswith('Invalid \\escape'):
bad_escape_location = cls._extract_char_position(error_message)
json_str = json_str[:bad_escape_location] + \
json_str[bad_escape_location + 1:]
try:
json.loads(json_str)
return json_str
except json.JSONDecodeError as e:
error_message = str(e)
return json_str
@classmethod
def correct_json(cls, json_str: str) -> str:
"""
Correct the given JSON string to make it parseable.
"""
try:
json.loads(json_str)
return json_str
except json.JSONDecodeError as e:
error_message = str(e)
if error_message.startswith('Invalid \\escape'):
json_str = cls._fix_invalid_escape(json_str, error_message)
if error_message.startswith('Expecting property name enclosed in double quotes'):
json_str = cls._add_quotes_to_property_names(json_str)
try:
json.loads(json_str)
return json_str
except json.JSONDecodeError as e:
error_message = str(e)
if balanced_str := cls._balance_braces(json_str):
return balanced_str
return json_str
| [] |
2024-01-10 | TogetherCrew/qabot | packages~ml~src~memory~episodic_memory.py | import logging
import os
from typing import List, Dict, Any
from pydantic import BaseModel, Field
from langchain.llms.base import BaseLLM
from langchain import LLMChain
from langchain.vectorstores import DeepLake, FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from llm.summarize.prompt import get_template
from ui.cui import CommandlineUserInterface
from utils.constants import DEFAULT_EMBEDDINGS
from utils.util import atimeit, timeit
import base58
class Episode(BaseModel):
thoughts: Dict[str, Any] = Field(..., description="thoughts of the agent")
action: Dict[str, Any] = Field(..., description="action of the agent")
result: str = Field(..., description="The plan of the event")
summary: str = Field("", description="summary of the event")
question: str = Field("", description="question to be answered")
task: str = Field("", description="task to be completed")
# create like equals method to compare two episodes
def __eq__(self, other):
return (
self.thoughts == other.thoughts
and self.action == other.action
and self.result == other.result
)
@staticmethod
def get_summary_of_episodes(episodes: List["Episode"]) -> str:
return "\n".join([episode.summary for episode in episodes])
class EpisodicMemory(BaseModel):
num_episodes: int = Field(0, description="The number of episodes")
store: Dict[str, Episode] = Field({}, description="The list of episodes")
llm: BaseLLM = Field(..., description="llm class for the agent")
embeddings: HuggingFaceEmbeddings = Field(DEFAULT_EMBEDDINGS,
title="Embeddings to use for tool retrieval",
)
vector_store: FAISS = Field(
None, title="Vector store to use for tool retrieval"
)
ui: CommandlineUserInterface | None = Field(None)
class Config:
arbitrary_types_allowed = True
# def __init__(self, question: str, **kwargs):
# super().__init__(**kwargs)
# filename = base58.b58encode(question.encode()).decode()
# if self.vector_store is None:
# self.vector_store = DeepLake(read_only=True, dataset_path=os.path.join(EPISODIC_MEMORY_DIR, f"{filename}"),
# embedding=self.embeddings)
def __del__(self):
del self.embeddings
del self.vector_store
async def memorize_episode(self, episode: Episode) -> None:
"""Memorize an episode."""
self.num_episodes += 1
self.store[str(self.num_episodes)] = episode
await self._embed_episode(episode)
async def summarize_and_memorize_episode(self, episode: Episode) -> str:
"""Summarize and memorize an episode."""
summary = await self._summarize(
episode.question, episode.task, episode.thoughts, episode.action, episode.result
)
episode.summary = summary
await self.memorize_episode(episode)
return summary
async def _summarize(
self, question: str, task: str, thoughts: Dict[str, Any], action: Dict[str, Any], result: str
) -> str:
"""Summarize an episode."""
prompt = get_template()
llm_chain = LLMChain(prompt=prompt, llm=self.llm)
try:
result = await llm_chain.apredict(
question=question, task=task, thoughts=thoughts, action=action, result=result
)
except Exception as e:
raise Exception(f"Error: {e}")
return result
def remember_all_episode(self) -> List[Episode]:
"""Remember all episodes."""
# return list(self.store.values())
return self.store
@timeit
def remember_recent_episodes(self, n: int = 5) -> List[Episode]:
"""Remember recent episodes."""
if not self.store: # if empty
return self.store
n = min(n, len(self.store))
return list(self.store.values())[-n:]
def remember_last_episode(self) -> Episode:
"""Remember last episode."""
if not self.store:
return None
return self.store[-1]
@timeit
def remember_related_episodes(self, query: str, k: int = 5) -> List[Episode]:
"""Remember related episodes to a query."""
logging.debug('remember_related_episodes')
if self.vector_store is None:
return []
relevant_documents = self.vector_store.similarity_search(query, k=k)
result = []
for d in relevant_documents:
episode = Episode(
thoughts=d.metadata["thoughts"],
action=d.metadata["action"],
result=d.metadata["result"],
summary=d.metadata["summary"],
question=d.metadata["question"],
task=d.metadata["task"]
)
result.append(episode)
return result
@atimeit
async def _embed_episode(self, episode: Episode) -> None:
"""Embed an episode and add it to the vector store."""
print('_embed_episode')
texts = [episode.summary]
metadatas = [
{
"index": self.num_episodes,
"thoughts": episode.thoughts,
"action": episode.action,
"result": episode.result,
"summary": episode.summary,
"question": episode.question,
"task": episode.task
}
]
if self.vector_store is None:
print('build deeplake')
# self.vector_store = DeepLake(read_only=False, dataset_path=EPISODIC_MEMORY_DIR,embedding=self.embeddings)
self.vector_store = FAISS.from_texts(
texts=texts, embedding=self.embeddings, metadatas=metadatas
)
else:
print('_embed_episode::add_texts')
self.vector_store.add_texts(texts=texts, metadatas=metadatas)
# async def save_local(self, path: str) -> None:
# """Save the vector store locally."""
# # async def _save():
# print('save_local_inner')
# # self.vector_store.save_local(folder_path=path)
# # await asyncio.to_thread(vs.save_local, folder_path=path)
# print('post save_local inner')
# # await asyncio.create_task(_save())
# def load_local(self, path: str) -> None:
# """Load the vector store locally."""
# print('local_load inner')
# async def _load():
# self.vector_store = FAISS.load_local(
# folder_path=path, embeddings=self.embeddings
# )
# self.vector_store = DeepLake(read_only=False, dataset_path=path,embedding=self.embeddings)
# await asyncio.create_task(_load())
# await asyncio.to_thread(FAISS.load_local, folder_path=path, embeddings=self.embeddings)
| [] |
2024-01-10 | ttpss930141011/LangChain-LineBot | src~interactor~use_cases~message~cor~handler_base.py | from abc import ABC, abstractmethod
from typing import List, Type
from langchain.agents import AgentExecutor
from linebot.v3.messaging.models.message import Message
from src.interactor.dtos.event_dto import EventInputDto
from src.interactor.interfaces.repositories.agent_executor_repository import (
AgentExecutorRepositoryInterface,
)
class Handler(ABC):
def __init__(self, successor: Type["Handler"] = None):
self._successor = successor
def _get_agent_executor(
self,
input_dto: EventInputDto,
repository: AgentExecutorRepositoryInterface,
) -> AgentExecutor:
"""
Retrieves the agent executor associated with the current window.
:param None: This function does not take any parameters.
:return: None
"""
window_id = input_dto.window.get("window_id")
agent_executor = repository.get(
window_id=window_id,
)
if agent_executor is None:
agent_executor = repository.create(
window_id=window_id,
)
return agent_executor
@abstractmethod
def handle(
self,
input_dto: EventInputDto,
repository: AgentExecutorRepositoryInterface,
response: List[Message],
) -> List[Message]:
pass
| [] |
2024-01-10 | ttpss930141011/LangChain-LineBot | src~infrastructure~tools~stock_performance.py | from datetime import datetime, timedelta
from typing import Type
import yfinance as yf
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
def get_stock_performance(ticker, days):
"""Method to get stock price change in percentage"""
past_date = datetime.today() - timedelta(days=days)
ticker_data = yf.Ticker(ticker)
history = ticker_data.history(start=past_date)
old_price = history.iloc[0]["Close"]
current_price = history.iloc[-1]["Close"]
return {"percent_change": ((current_price - old_price) / old_price) * 100}
class StockPercentChangeInput(BaseModel):
"""Inputs for get_stock_performance"""
ticker: str = Field(description="Ticker symbol of the stock")
days: int = Field(description="Timedelta days to get past date from current date")
class StockPerformanceTool(BaseTool):
name = "get_stock_performance"
description = """
Useful when you want to check performance of the stock.
You should enter the stock ticker symbol recognized by the yahoo finance.
You should enter days as number of days from today from which performance needs to be check.
output will be the change in the stock price represented as a percentage.
"""
args_schema: Type[BaseModel] = StockPercentChangeInput
def _run(self, ticker: str, days: int):
response = get_stock_performance(ticker, days)
return response
def _arun(self, ticker: str):
raise NotImplementedError("get_stock_performance does not support async")
| [
"\n Useful when you want to check performance of the stock.\n You should enter the stock ticker symbol recognized by the yahoo finance.\n You should enter days as number of days from today from which performance needs to be check.\n output will be the change in the stock price represented as a percentage.\n "
] |
2024-01-10 | ttpss930141011/LangChain-LineBot | src~infrastructure~tools~current_stock_price.py | from typing import Type
import yfinance as yf
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
def get_current_stock_price(ticker):
"""Method to get current stock price"""
ticker_data = yf.Ticker(ticker)
recent = ticker_data.history(period="1d")
return {"price": recent.iloc[0]["Close"], "currency": ticker_data.info["currency"]}
class CurrentStockPriceInput(BaseModel):
"""Inputs for get_current_stock_price"""
ticker: str = Field(description="Ticker symbol of the stock")
class CurrentStockPriceTool(BaseTool):
name = "get_current_stock_price"
description = """
Useful when you want to get current stock price.
You should enter the stock ticker symbol recognized by the yahoo finance
"""
args_schema: Type[BaseModel] = CurrentStockPriceInput
def _run(self, ticker: str):
price_response = get_current_stock_price(ticker)
return price_response
def _arun(self, ticker: str):
raise NotImplementedError("get_current_stock_price does not support async")
| [
"\n Useful when you want to get current stock price.\n You should enter the stock ticker symbol recognized by the yahoo finance\n "
] |
2024-01-10 | ttpss930141011/LangChain-LineBot | src~interactor~use_cases~message~cor~default_handler.py | from typing import List
from langchain.agents import AgentExecutor
from linebot.v3.messaging.models import TextMessage
from linebot.v3.messaging.models.message import Message
from src.interactor.dtos.event_dto import EventInputDto
from src.interactor.interfaces.repositories.agent_executor_repository import (
AgentExecutorRepositoryInterface,
)
from src.interactor.use_cases.message.cor.handler_base import Handler
class DefaultHandler(Handler):
def handle(
self,
input_dto: EventInputDto,
repository: AgentExecutorRepositoryInterface,
response: List[Message],
):
try:
agent_executor = self._get_agent_executor(input_dto, repository)
result = agent_executor.run(input=input_dto.user_input)
response.append(TextMessage(text=result))
except Exception as e:
print(e)
response.append(TextMessage(text="出現錯誤啦!請稍後再試。"))
finally:
return response
| [] |
2024-01-10 | ttpss930141011/LangChain-LineBot | src~infrastructure~repositories~message_history~inmemory_message_history.py | from typing import Dict, List, TypeVar
from langchain.schema import BaseChatMessageHistory
from langchain.schema.messages import BaseMessage
KT = TypeVar("KT", bound=str)
VT = TypeVar("VT", bound=List[BaseMessage])
class InMemoryChatDictMessageHistory(BaseChatMessageHistory):
"""In memory implementation of chat message history.
Stores messages in an in memory list.
"""
def __init__(self, window_id: str, data: Dict[KT, VT]) -> None:
self.window_id = window_id
self.data = data
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from Dict"""
if self.window_id not in self.data:
self.data[self.window_id] = []
return self.data[self.window_id]
def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
if self.window_id not in self.data:
self.data[self.window_id] = [message]
else:
self.data[self.window_id].append(message)
def clear(self) -> None:
"""
Clears the data associated with the current window ID.
Parameters:
None
Returns:
None
"""
self.data[self.window_id] = []
| [] |
2024-01-10 | AayushGithub/LeetAI | tools~parse_solution.py | # given an input in the following format, open the corresponding file
# python parse_solution.py <input_folder>
# input_folder should contain the following file:
# solution.py
import sys
import openai
import os
from dotenv import load_dotenv
import json
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def parse_solution(input_folder):
# open the solution file
solution_file = open(input_folder + "/solution.py", "r")
# The file has the following format:
# Link - <leetcode link>
# Question ID - <question id>
# Question Name - <question name>
# solution
link = solution_file.readline().strip().split(" - ")[1]
question_id = solution_file.readline().strip().split(" - ")[1]
question_name = solution_file.readline().strip().split(" - ")[1]
solution = solution_file.read()
solution_file.close()
# return the parsed data
return link, question_id, question_name, solution
def generate_explaination(link, question_id, question_name, solution):
prompt = f"This is my solution to the Leetcode question:\nLink - {link}\nQuestion ID - {question_id}\nQuestion Name - {question_name}\n\nSolution:\n```{solution}```\nGive me an explanation (as a markdown file) as to why it works and if it is correct:\n"
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
explanation = response["choices"][0]["message"]["content"]
return explanation
def save_to_markdown(explanation, input_folder):
# save the explanation to a markdown file
explanation_file = open(input_folder + "/explanation.md", "w")
explanation_file.write(explanation)
explanation_file.close()
def main():
input_folder = sys.argv[1]
link, question_id, question_name, solution = parse_solution(input_folder)
explaination = generate_explaination(link, question_id, question_name, solution)
save_to_markdown(explaination, input_folder)
if __name__ == "__main__":
main() | [
"This is my solution to the Leetcode question:\nLink - PLACEHOLDER\nQuestion ID - PLACEHOLDER\nQuestion Name - PLACEHOLDER\n\nSolution:\n```PLACEHOLDER```\nGive me an explanation (as a markdown file) as to why it works and if it is correct:\n"
] |
2024-01-10 | CR1502/Recommender | Email_Gen~email_app~main1.py | # This is the main python file that generates the emails
import openai
from bs4 import BeautifulSoup
import requests
# Make sure you set up your API key
openai.api_key = 'YOUR_OPENAI_API_KEY'
class EmailMarketingAssistant:
SAMPLE_EMAILS = {
'e-commerce': {
'convince_to_buy': [
"Introducing our new {product_name}: {product_description}. Grab yours now!",
"Experience the best with our new {product_name}. {product_description}. Limited stock!",
"Why wait? The {product_name} you've always wanted is here. {product_description}.",
"{product_name}: Where quality meets desire. {product_description}. Don't miss out!",
"Discover the new dimension of quality with {product_name}. {product_description}. Available now!"
]
},
'people': {
'welcome_new_user': [
"Welcome {user_name}! We're thrilled to have you on board.",
"Hi {user_name}, thanks for choosing us! Let's embark on this journey together.",
"A warm welcome to our community, {user_name}!",
"{user_name}, you've made a fantastic decision. Welcome to the family!",
"It's a pleasure to see you, {user_name}. Welcome and let's get started!"
],
'congratulate_on_purchase':[
"Congratulations on your new {product_name} purchase, {user_name}! We're sure you'll love it.",
"Hey {user_name}, great choice! Your new {product_name} is on its way. Enjoy!",
"Thank you for choosing {product_name}, {user_name}! We're excited for you to try it out.",
"{user_name}, your impeccable taste shines with your {product_name} purchase! Cheers!",
"Rock on, {user_name}! Your {product_name} will surely turn heads!"
],
},
'blog': {
'new_blog': [
"Just out: our new blog post, {post_title}, covering everything about {topic}. Dive in!",
"Unveiling our latest piece: {post_title}. Discover more about {topic}.",
"{post_title} - a fresh take on {topic}. Read now!",
"Explore the depths of {topic} in our new article: {post_title}. Check it out!",
"Hot off the press: {post_title}. Delve into the world of {topic} now!"
]
}
}
def get_sample_email(self, business_type, campaign_goal, **details):
sample_emails = self.SAMPLE_EMAILS.get(business_type, {}).get(campaign_goal, [])
if not sample_emails:
return ["Sorry, no sample email found for your criteria."] * 5
refined_emails = []
for sample in sample_emails:
refined_emails.append(self.refine_prompt(sample.format(**details)))
return refined_emails
def refine_prompt(self, prompt):
gpt3_message = {
"messages": [{
"role": "user",
"content": f"Given this sample email: '{prompt}', create a similar yet unique marketing email."
}]
}
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=gpt3_message['messages']
)
return response.choices[0].message['content'].strip()
def get_company_description(self, website_url):
try:
response = requests.get(website_url)
soup = BeautifulSoup(response.content, 'html.parser')
description = soup.find('meta', attrs={'name': 'description'}) or soup.find('meta', attrs={'property': 'og:description'})
if description:
return description.get('content')
else:
return "Description not found. Please provide manually."
except Exception as e:
return f"Error fetching description: {e}"
if __name__ == "__main__":
assistant = EmailMarketingAssistant()
mail_type = input("Enter the kind of mail to send (e-commerce, people, blog, etc.): ")
campaign_goal = input("Enter your campaign goal (convince_to_buy, congratulate_on_purchase, welcome_new_user, new_blog): ")
details = {}
# For e-commerce related prompts
if mail_type == "e-commerce":
details['product_name'] = input("Enter the product name: ")
if campaign_goal in ['convince_to_buy']:
details['product_description'] = input("Provide a brief description of the product: ")
# For new customer related prompts
if mail_type == "people" and campaign_goal in ['welcome_new_user']:
details['user_name'] = input("Provide new users name: ")
elif mail_type == "people" and campaign_goal in ['congratulate_on_purchase']:
details['user_name'] = input("Provide new users name: ")
# For blog related prompts
elif mail_type == "blog" and campaign_goal == "new_blog":
details['post_title'] = input("Enter the blog post title: ")
details['topic'] = input("Enter the post topic: ")
# Fetch company website details
website_url = input("Enter your company website URL (or press Enter to skip): ")
if website_url:
company_description = assistant.get_company_description(website_url)
print(f"Fetched company description: {company_description}")
email_contents = assistant.get_sample_email(mail_type, campaign_goal, **details)
print("\nRecommended Email Contents:\n")
for i, content in enumerate(email_contents, 1):
print(f"Email {i}:\n{content}\n")
| [
"Given this sample email: 'PLACEHOLDER', create a similar yet unique marketing email."
] |
2024-01-10 | andrewgcodes/ConsensusTranscription | v1.py | import os
import sys
import requests
import openai
from pydub import AudioSegment
from rapidfuzz import fuzz
import string
# OpenAI Whisper API settings
OPENAI_API_KEY = "YOUR_OPENAI_API_KEY"
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
MODEL_ID = "whisper-1"
# Deepgram API settings
DEEPGRAM_API_KEY = "YOUR_DEEPGRAM_API_KEY"
DEEPGRAM_API_URL = "https://api.deepgram.com/v1/listen"
# Configure OpenAI
openai.api_key = OPENAI_API_KEY
def remove_punctuation(input_string):
translator = str.maketrans('', '', string.punctuation)
no_punct = input_string.translate(translator)
return no_punct
def compare_strings(str1, str2):
str1 = remove_punctuation(str1)
str2 = remove_punctuation(str2)
return fuzz.ratio(str1, str2)
def transcribe_openai(AUDIO_FILE_PATH):
with open(AUDIO_FILE_PATH, "rb") as audio_file:
response = openai.Audio.transcribe(MODEL_ID, audio_file)
return response.text
def transcribe_deepgram(AUDIO_FILE_PATH):
headers = {
"Authorization": "Token " + DEEPGRAM_API_KEY,
"Content-Type": "audio/mpeg"
}
with open(AUDIO_FILE_PATH, "rb") as audio_file:
audio_data = audio_file.read()
response = requests.post(DEEPGRAM_API_URL, headers=headers, data=audio_data)
response.raise_for_status()
return response.json()["results"]["channels"][0]["alternatives"][0]["transcript"]
def summarize_transcript(openai_transcript, GPT_MODEL):
messages = [
{
"role": "system",
"content": "You are a editor, writer, and stenographer. Summarize the provided transcription text. Be aware that some words may be incorrect or missing."
},
{
"role": "user",
"content": openai_transcript
}
]
response = openai.ChatCompletion.create(
model=GPT_MODEL,
messages=messages,
max_tokens=100
)
return response.choices[0].message["content"]
def analyze_transcriptions(audio_content, openai_transcript, deepgram_transcript, GPT_MODEL):
messages = [
{
"role": "system",
"content": "You are a skilled editor, transcriber of speech, and stenographer. Your task is to review two transcripts of the same speech. Given context that explains the speech, provide a new corrected transcript that fixes the errors in each of the two original transcripts. Make sure to consider how words that sound similar can be mistranscribed. Use your knowledge of phonetics, pronunciation, speech patterns, modern slang, and more. Generate a highly accurate consensus transcript that preserves the original meaning and content as exactly as possible while fixing the errors. Be aware of different English dialects such as AAVE and do not correct grammar based on Standard American English. Censor inappropriate words with asterisks. Think step by step and be careful to maintain accuracy to the transcripts when possible. Do not hallucinate."
},
{
"role": "user",
"content": f"Here is a summary of the transcriptions:'{audio_content}'. Here are the two transcriptions which may have errors: OpenAI transcript: '{openai_transcript}', Deepgram transcript: '{deepgram_transcript}'. Provide a new corrected transcription that is faithful to the words used in the transcripts. Do not replace words with synonyms."
}
]
response = openai.ChatCompletion.create(
model=GPT_MODEL,
messages=messages
)
return response.choices[0].message["content"]
if __name__ == "__main__":
AUDIO_FILE_PATH = sys.argv[1]
GPT_MODEL = sys.argv[2]
openai_transcript = transcribe_openai(AUDIO_FILE_PATH)
deepgram_transcript = transcribe_deepgram(AUDIO_FILE_PATH)
similarity = compare_strings(openai_transcript.lower(), deepgram_transcript.lower())
print(f"The Levenshtein similarity between the two transcriptions is {similarity}%")
audio_content = summarize_transcript(openai_transcript, GPT_MODEL)
consensus_transcript = analyze_transcriptions(audio_content, openai_transcript, deepgram_transcript, GPT_MODEL)
print("Consensus Transcript: ", consensus_transcript)
| [
"You are a skilled editor, transcriber of speech, and stenographer. Your task is to review two transcripts of the same speech. Given context that explains the speech, provide a new corrected transcript that fixes the errors in each of the two original transcripts. Make sure to consider how words that sound similar can be mistranscribed. Use your knowledge of phonetics, pronunciation, speech patterns, modern slang, and more. Generate a highly accurate consensus transcript that preserves the original meaning and content as exactly as possible while fixing the errors. Be aware of different English dialects such as AAVE and do not correct grammar based on Standard American English. Censor inappropriate words with asterisks. Think step by step and be careful to maintain accuracy to the transcripts when possible. Do not hallucinate.",
"You are a editor, writer, and stenographer. Summarize the provided transcription text. Be aware that some words may be incorrect or missing.",
"Here is a summary of the transcriptions:'PLACEHOLDER'. Here are the two transcriptions which may have errors: OpenAI transcript: 'PLACEHOLDER', Deepgram transcript: 'PLACEHOLDER'. Provide a new corrected transcription that is faithful to the words used in the transcripts. Do not replace words with synonyms."
] |
2024-01-10 | kanishkg/talking-heads | src~human_app.py | import csv
import random
import openai
import streamlit as st
DATA_FILE = "Pew_American_Trends_Panel_disagreement_500.csv"
MAX_MESSAGES = 6
# Preliminary setup
st.title("Change My View")
print("restarting")
print(st.session_state)
# st.write(st.session_state)
if "stage" not in st.session_state:
st.session_state["stage"] = 1
print(f"session stage: {st.session_state['stage']}")
def get_data(i=None):
with open(DATA_FILE, "r") as f:
reader = csv.reader(f, delimiter="\t")
if i:
data = list(reader)[i]
else:
data = random.choice(list(reader))
question = data[3]
answer_list = list(eval(data[4]))
answer_list = answer_list[:-1] + ["Neutral"]
random.shuffle(answer_list)
return question, answer_list
# stage 4: thanks
if st.session_state["stage"] == 4:
st.markdown("### Thanks for participating! You can close this tab now.")
# stage 3: final answer
if st.session_state["stage"] == 3:
st.radio(
st.session_state["question"],
key="final_answer",
options=st.session_state["answer_list"],
)
st.button("Submit", key="final", on_click=lambda: st.session_state.update({"stage": 4}))
# stage 2: conversation
if st.session_state["stage"] == 2:
if "messages" not in st.session_state:
st.session_state.messages = []
message = f"When asked the question '{st.session_state['question']}' I feel the answer is '{st.session_state['initial_answer']}' but convince me that it is '{st.session_state['target_answer']}'."
st.session_state.messages.append(
{"role": "system", "content": "Convince the user of the opinion that they want to change to. Use rhetoric and critical arguments, dont merely list points. Be concise and respond to the arguments that the user makes. Make it more like a conversation than a list of points. Ask questions when required."}
)
st.session_state.messages.append(
{"role": "user", "content": message}
)
for message in st.session_state.messages[2:]:
with st.chat_message(message["role"]):
print(message["role"])
st.markdown(message["content"])
if len(st.session_state.messages) == 2:
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
if prompt:=st.chat_input("Type here to chat"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
if len(st.session_state.messages) > MAX_MESSAGES:
suffix = "\nThis is the last message. Also provide a concluding remark with the response based on the discussion."
st.session_state.messages[-1]["content"] += suffix # add suffix to last message
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
if len(st.session_state.messages) > MAX_MESSAGES+1:
st.button("Next", key="next2", on_click=lambda: st.session_state.update({"stage": 3}))
# stage 1: get the question and answer
if st.session_state["stage"] == 1:
st.text_input(label="OpenAI API Key", key="openai_api_key")
if st.session_state["openai_api_key"]:
openai.api_key = st.session_state["openai_api_key"]
selected_model = st.selectbox(
label="OpenAI Model",
key="openaim",
options=["gpt-4", "gpt-3.5-turbo"],
)
st.session_state["openai_model"] = selected_model
print(st.session_state["openai_model"])
if "question" not in st.session_state:
st.session_state["question"], st.session_state["answer_list"] = get_data()
# show the question and answer
left_column, right_column = st.columns(2)
with left_column:
st.radio(
st.session_state["question"],
key="initial_answer",
options=st.session_state["answer_list"],
)
with right_column:
st.radio(
"Target Answer",
key="target_answer",
options=st.session_state["answer_list"],
)
st.button("Next", key="next", on_click=lambda: st.session_state.update({"stage": 2})) | [
"content",
"Convince the user of the opinion that they want to change to. Use rhetoric and critical arguments, dont merely list points. Be concise and respond to the arguments that the user makes. Make it more like a conversation than a list of points. Ask questions when required."
] |
2024-01-10 | denverbaumgartner/autoSQL | autoSQL~autosql~predict~predict.py | import json
import logging
import requests
from _decimal import Decimal
from typing import Optional, Dict, List, Union
import openai
from openai.openai_object import OpenAIObject
import replicate
from replicate import Client as rc
import sqlglot
from datasets import DatasetDict, Dataset
# from .helper import Prompts
from helper import Prompts
logger = logging.getLogger(__name__)
class SQLPredict:
"""This class handles the dispatching of inference requests to various models.
"""
def __init__(
self,
openai_api_key: str,
replicate_api_key: str,
hugging_face_api_key: Optional[str] = None,
) -> None:
"""Initialize the class"""
openai.api_key = openai_api_key
self.openai = openai
self.prompts = Prompts()
self.rc = rc(replicate_api_key)
self.hf_key = hugging_face_api_key
self.replicate_models = {}
self.openai_api_models = {}
self.model_endpoints = {}
@classmethod
def from_replicate_model(
cls,
openai_api_key: str,
replicate_api_key: str,
model_name: str,
model_id: str,
) -> "SQLPredict":
"""Initialize the class with a Replicate model
:param openai_api_key: The OpenAI API key.
:type openai_api_key: str
:param replicate_api_key: The Replicate API key.
:type replicate_api_key: str
:param model_name: The name of the Replicate model.
:type model_name: str
:param model_id: The ID of the Replicate model.
:type model_id: str
:return: The initialized class.
:rtype: SQLPredict
"""
instance = cls(openai_api_key, replicate_api_key)
instance.replicate_models[model_name] = model_id
return instance
def __repr__(self):
items = ("{}={!r}".format(k, self.__dict__[k]) for k in self.__dict__)
return "{}({})".format(type(self).__name__, ", ".join(items))
#########################################
# Class Methods #
#########################################
def add_replicate_model(
self,
model_name: str,
model_id: str,
) -> None:
"""Adds a Replicate model to the class.
:param model_name: The name of the Replicate model.
:type model_name: str
:param model_id: The ID of the Replicate model.
:type model_id: str
"""
self.replicate_models[model_name] = model_id
def add_model_endpoint(
self,
model_name: str,
model_endpoint: str,
) -> None:
"""Adds a model endpoint to the class.
:param model_name: The name of the model.
:type model_name: str
:param model_endpoint: The endpoint of the model.
:type model_endpoint: str
"""
self.model_endpoints[model_name] = model_endpoint
#########################################
# Request Construction Methods #
#########################################
def _openai_sql_data_structure(
self,
user_context: str,
user_question: str,
user_answer: str,
system_context: Optional[str] = None,
) -> List[Dict[str, str]]:
"""Constructs a SQL data structure request for OpenAI's API.
:param user_context: The context of the SQL query.
:type user_context: str
:param user_question: The question of the SQL query.
:type user_question: str
:param user_answer: The answer of the SQL query.
:type user_answer: str
:param system_context: The context of the SQL query, None results in class default
:type system_context: Optional[str], optional
:return: The constructed SQL data structure request.
:rtype: List[Dict[str, str]]
"""
if system_context is None:
system_context = self.prompts._openai_sql_data_structure_prompt
message = [
{"role": "system", "content": system_context},
{"role": "user", "content": f'Context: {user_context}\n\nQuestion": {user_question}\n\nAnswer: {user_answer}'},
]
return message
def _openai_sql_request_structure(
self,
user_context: str,
user_question: str,
system_context: Optional[str] = None,
) -> List[Dict[str, str]]:
"""Constructs a SQL request structure for OpenAI's API.
:param user_context: The context of the SQL query.
:type user_context: str
:param user_question: The question of the SQL query.
:type user_question: str
:param system_context: The context of the SQL query, None results in class default
:type system_context: Optional[str], optional
:return: The constructed SQL request structure.
:rtype: List[Dict[str, str]]
"""
if system_context is None:
system_context = self.prompts._openai_sql_request_structure_prompt
message = [
{"role": "system", "content": system_context},
{"role": "user", "content": f'Context: {user_context}\n\nQuestion": {user_question}'},
]
return message
def openai_sql_response(
self,
response_object: Union[OpenAIObject, Dict[str, str]],
atl: Optional[bool] = False,
) -> Optional[str]:
"""Parses the response from OpenAI's API.
:param response_object: The response from OpenAI's API.
:type response_object: OpenAIObject
:return: The parsed response.
:rtype: Optional[str]
"""
if isinstance(response_object, OpenAIObject):
response_object = response_object.to_dict()
try:
response = response_object['openai_inference']['choices'][0]['message']
except Exception as e:
logger.warning(f"OpenAI response failed to parse with error: {e}")
return None
if len(response.keys()) > 2:
logger.warning(f"OpenAI response has more than 2 keys: {response.keys()}")
if atl:
try:
sqlglot.parse(response["content"])
return response["content"]
except Exception as e:
logger.warning(f"SQL query failed to parse with error: {e}")
return None
return response["content"]
def openai_sql_request(
self,
user_context: str,
user_question: str,
model: Optional[str] = "gpt-3.5-turbo", # TODO: consider using an enum for this
system_context: Optional[str] = None,
validate_response: Optional[bool] = False,
) -> Optional[OpenAIObject]:
"""Constructs a prompt to request a SQL query from OpenAI's API.
:param user_context: The context of the SQL query.
:type user_context: str
:param user_question: The question of the SQL query.
:type user_question: str
:param model: The model to use for the request, defaults to "gpt-3.5-turbo"
:type model: Optional[str], optional
:param system_context: The context of the SQL query, None results in class default
:type system_context: Optional[str], optional
:param validate_response: Whether to validate the response, defaults to True. Returns None if validation fails.
:type validate_response: Optional[bool], optional
:return: The constructed SQL request.
:rtype: OpenAIObject
"""
message = self._openai_sql_request_structure(user_context, user_question, system_context)
try:
request = self.openai.ChatCompletion.create(
model=model,
messages=message,
)
except Exception as e:
logger.warning(f"OpenAI request failed with error: {e}")
raise e
if validate_response:
return self.openai_sql_response(request)
return request
def openai_dataset_request(
self,
dataset: Dataset,
): # -> Dict[str, OpenAIObject]:
"""Constructs a prompt to request a SQL query from OpenAI's API.
:param dataset: The dataset item to request.
:type dataset: Dataset
:return: The constructed SQL request.
:rtype: OpenAIObject
"""
try:
context = dataset['context']
question = dataset['question']
inference = self.openai_sql_request(user_context=context, user_question=question)
except Exception as e:
logger.warning(f"OpenAI request failed with error: {e}")
return {"openai_inference": inference}
def replicate_sql_request(
self,
prompt: str,
model_name: str,
) -> str:
"""Constructs a prompt to request a SQL query from Replicate's API.
:param prompt: The prompt to use for the request.
:type prompt: str
:return: The constructed SQL request.
:rtype: str
"""
try:
request = self.rc.run(
self.replicate_models[model_name],
input={"prompt": prompt},
)
return ''.join(item for item in request)
except Exception as e:
logger.warning(f"Replicate request failed with error: {e}")
raise e
def replicate_dataset_request(
self,
dataset: Dataset,
model_name: Optional[str] = "llama_2_13b_sql",
column_name: Optional[str] = "replicate_inference",
prompt_type: Optional[str] = "tuning_format",
):
"""Constructs a prompt and requests a SQL query from Replicate's API.
:param dataset: The dataset item to request.
:type dataset: Dataset
:return: The constructed SQL request.
:rtype: str
"""
if prompt_type == "tuning_format":
prompt = json.loads(dataset['tuning_format'])['prompt']
if prompt_type == "basic_text_generation":
prompt = self.basic_text_generation_prompt(dataset['context'], dataset['question'])
# assumes the prompt is in the dataset, contained within 'tuning_format'
try:
# prompt = json.loads(dataset['tuning_format'])['prompt']
inference = self.replicate_sql_request(prompt, model_name=model_name)
return {column_name: inference}
except Exception as e:
logger.warning(f"Replicate request failed with error: {e}")
def basic_text_generation_prompt(
self,
context: str,
question: str,
) -> str:
"""Constructs a basic text generation prompt.
:param context: The context of the SQL query.
:type context: str
"""
prompt = "Context details the databse: " + context + " # " "Question to answer: " + question + " # " + "Answer as a SQL query: "
return prompt
def basic_text_generation_request(
self,
context: str,
question: str,
model_name: str,
api_key: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> str:
"""Constructs a basic text generation request.
:param context: The context of the SQL query.
:type context: str
:param question: The question of the SQL query.
:type question: str
:param model_name: The name of the model.
:type model_name: str
:param api_key: The API key to use for the request, defaults to None. Defaults to class default.
:type api_key: Optional[str], optional
:param headers: The headers to use for the request, defaults to None. Defaults to class default.
:type headers: Optional[Dict[str, str]], optional
:return: The constructed SQL request.
:rtype: str
"""
if api_key is None:
api_key = self.hf_key
if headers is None:
headers = {"Authorization": api_key}
prompt = self.basic_text_generation_prompt(context, question)
try:
response = requests.post(
self.model_endpoints[model_name],
headers=headers,
json={"inputs": prompt},
)
return response.json()
except Exception as e:
logger.warning(f"Basic text generation request failed with error: {e}")
raise e
def basic_text_generation_dataset_request(
self,
dataset: Dataset,
model_name: str,
response_column_name: str,
context_column_name: Optional[str] = "context",
question_column_name: Optional[str] = "question",
api_key: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
):
"""Constructs a prompt and requests a SQL query from a generic API."""
try:
context = dataset[context_column_name]
question = dataset[question_column_name]
inference = self.basic_text_generation_request(context, question, model_name, api_key)
return {response_column_name: inference}
except Exception as e:
logger.warning(f"Basic text generation request failed with error: {e}") | [
"question",
"Context details the databse: PLACEHOLDER # Question to answer: PLACEHOLDER # Answer as a SQL query: ",
"tuning_format",
"Context: PLACEHOLDER\n\nQuestion\": PLACEHOLDER\n\nAnswer: PLACEHOLDER",
"Context: PLACEHOLDER\n\nQuestion\": PLACEHOLDER",
"context"
] |
2024-01-10 | IHA114/GirlfriendGPT | src~agent~tools~selfie.py | """Tool for generating images."""
import logging
from langchain.agents import Tool
from steamship import Steamship
from steamship.base.error import SteamshipError
NAME = "GenerateSelfie"
DESCRIPTION = """
Useful for when you need to generate a selfie showing what you're doing or where you are.
Input: A detailed stable-diffusion prompt describing where you are and what's visible in your environment.
Output: the UUID of the generated selfie showing what you're doing or where you are.
"""
PLUGIN_HANDLE = "stable-diffusion"
NEGATIVE_PROMPT = (
"(bonnet), (hat), (beanie), cap, (((wide shot))), (cropped head), bad framing, "
"out of frame, deformed, cripple, old, fat, ugly, poor, missing arm, additional arms, "
"additional legs, additional head, additional face, dyed hair, black and white, grayscale"
)
class SelfieTool(Tool):
"""Tool used to generate images from a text-prompt."""
client: Steamship
def __init__(self, client: Steamship):
super().__init__(
name=NAME, func=self.run, description=DESCRIPTION, client=client
)
@property
def is_single_input(self) -> bool:
"""Whether the tool only accepts a single input."""
return True
def run(self, prompt: str, **kwargs) -> str:
"""Generate an image using the input prompt."""
image_generator = self.client.use_plugin(
plugin_handle=PLUGIN_HANDLE, config={"n": 1, "size": "768x768"}
)
prompt = prompt + (
"professional portrait photograph of a gorgeous Norwegian girl with long wavy blonde hair, "
f"{prompt}"
"((sultry flirty look)), freckles, beautiful symmetrical face, cute natural makeup, "
"((standing outside in snowy city street)), "
"stunning modern urban upscale environment, ultra realistic, concept art, elegant, highly detailed, "
"intricate, sharp focus, depth of field, f/1. 8, 85mm, medium shot, mid shot, (centered image composition), "
"(professionally color graded), ((bright soft diffused light)), volumetric fog, "
"trending on instagram, trending on tumblr, hdr 4k, 8k"
)
task = image_generator.generate(
text=prompt,
append_output_to_file=True,
options={"negative_prompt": NEGATIVE_PROMPT},
)
task.wait()
blocks = task.output.blocks
logging.info(f"[{self.name}] got back {len(blocks)} blocks")
if len(blocks) > 0:
logging.info(f"[{self.name}] image size: {len(blocks[0].raw())}")
return blocks[0].id
raise SteamshipError(f"[{self.name}] Tool unable to generate image!")
| [
"promptbae3aee2-d606-408b-b83b-0e39e0fe6327professional portrait photograph of a gorgeous Norwegian girl with long wavy blonde hair, promptbae3aee2-d606-408b-b83b-0e39e0fe6327((sultry flirty look)), freckles, beautiful symmetrical face, cute natural makeup, ((standing outside in snowy city street)), stunning modern urban upscale environment, ultra realistic, concept art, elegant, highly detailed, intricate, sharp focus, depth of field, f/1. 8, 85mm, medium shot, mid shot, (centered image composition), (professionally color graded), ((bright soft diffused light)), volumetric fog, trending on instagram, trending on tumblr, hdr 4k, 8kprofessional portrait photograph of a gorgeous Norwegian girl with long wavy blonde hair, promptbae3aee2-d606-408b-b83b-0e39e0fe6327professional portrait photograph of a gorgeous Norwegian girl with long wavy blonde hair, promptbae3aee2-d606-408b-b83b-0e39e0fe6327((sultry flirty look)), freckles, beautiful symmetrical face, cute natural makeup, ((standing outside in snowy city street)), stunning modern urban upscale environment, ultra realistic, concept art, elegant, highly detailed, intricate, sharp focus, depth of field, f/1. 8, 85mm, medium shot, mid shot, (centered image composition), (professionally color graded), ((bright soft diffused light)), volumetric fog, trending on instagram, trending on tumblr, hdr 4k, 8k((sultry flirty look)), freckles, beautiful symmetrical face, cute natural makeup, ((standing outside in snowy city street)), stunning modern urban upscale environment, ultra realistic, concept art, elegant, highly detailed, intricate, sharp focus, depth of field, f/1. 8, 85mm, medium shot, mid shot, (centered image composition), (professionally color graded), ((bright soft diffused light)), volumetric fog, trending on instagram, trending on tumblr, hdr 4k, 8k",
"(bonnet), (hat), (beanie), cap, (((wide shot))), (cropped head), bad framing, out of frame, deformed, cripple, old, fat, ugly, poor, missing arm, additional arms, additional legs, additional head, additional face, dyed hair, black and white, grayscale"
] |
2024-01-10 | IHA114/GirlfriendGPT | src~agent~parser.py | from __future__ import annotations
from typing import Union, Any
from langchain.agents import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish
from prompts import FORMAT_INSTRUCTIONS
class MultiModalOutputParser(AgentOutputParser):
parser: AgentOutputParser
def __init__(self, parser, **data: Any):
super().__init__(**data, parser=parser)
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
cleaned_output = text.strip()
if cleaned_output.startswith("AI: "):
cleaned_output = cleaned_output[len("AI: ") :]
return self.parser.parse(cleaned_output)
@property
def _type(self) -> str:
return "conversational_chat"
| [] |
2024-01-10 | IHA114/GirlfriendGPT | src~agent~tools~album_art.py | """Tool for generating album art.
The purpose of this tool is to illustrate how to wrap the GenerateImageTool
with a custom tool description & some prompt engineering to steer the image
one way or another.
The GenerateImageTool leaves the user + LLM in complete control of the image
generation prompt... but what if you wanted to make sure the prompt was:
- A particular style?
- A particular mood?
- Something else entirely, involving web scraping and other operations?
You can do that by wrapping the GenerateImageTool, as you see here, and then
sending in your own custom prompt.
"""
import json
import logging
from langchain.agents import Tool
from steamship import Steamship
from steamship.base.error import SteamshipError
from steamship.data.plugin.plugin_instance import PluginInstance
from .image import GenerateImageTool
NAME = "GenerateAlbumArt"
DESCRIPTION = """
Useful for when you need to generate album art.
Input: A description of the album that needs art
Output: the UUID of a generated image
"""
class GenerateAlbumArtTool(Tool):
"""Tool used to generate album art from a album description."""
client: Steamship
tool: GenerateImageTool
def __init__(self, client: Steamship):
super().__init__(
name=NAME,
func=self.run,
description=DESCRIPTION,
client=client,
tool=GenerateImageTool(client),
)
@property
def is_single_input(self) -> bool:
"""Whether the tool only accepts a single input."""
return True
def run(self, prompt: str, **kwargs) -> str:
"""Respond to LLM prompt."""
# Here we create a NEW prompt, which is based on the prompt provided
# to this tool, but including extra terms.
image_gen_prompt = f"album art, 4k, high def, pop art, professional, high quality, award winning, grammy, platinum, {prompt}"
# Then we just return the results of the wrapped GenerateImageTool,
# passing it the new prompt that we created.
return self.tool.run(image_gen_prompt)
| [
"album art, 4k, high def, pop art, professional, high quality, award winning, grammy, platinum, PLACEHOLDER"
] |
2024-01-10 | ahoho/topics | soup_nuts~metrics.py | import random
from multiprocessing import Pool
from collections import defaultdict
from itertools import combinations
import rbo
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy.spatial.distance import cdist, jensenshannon
from scipy.optimize import linear_sum_assignment
from gensim.models import CoherenceModel
from gensim.corpora import Dictionary
from sklearn.metrics import rand_score, normalized_mutual_info_score , adjusted_rand_score
def _gen_measure_name(coherence_measure, window_size, top_n):
"""
Make a unique measure name from the arguments
"""
measure_name = f"{coherence_measure}_win{window_size}_top{top_n}"
return measure_name
def _summarize(data):
return {**pd.Series(data).describe().to_dict(), "sum": np.sum(data)}
def coherence(
topics,
vocab,
reference_text,
coherence_measure,
window_size,
top_n,
):
"""
Calculates coherence for a single model
"""
data_dict = Dictionary([vocab])
topics = [t[:top_n] for t in topics]
cm = CoherenceModel(
topics=topics,
texts=tqdm(reference_text),
dictionary=data_dict,
coherence=coherence_measure,
window_size=window_size,
)
confirmed_measures = cm.get_coherence_per_topic()
mean = cm.aggregate_measures(confirmed_measures)
measure_name = _gen_measure_name(coherence_measure, cm.window_size, top_n)
return measure_name, float(mean), [float(i) for i in confirmed_measures]
def purity(model_labels, gold_labels):
"""
Calculates the Purity metric as described in https://aclanthology.org/P16-1110/
"ALTO: Active Learning with Topic Overviews for Speeding Label Induction and Document Labeling"
For sanity check - The purity of any two user labels should be 1
"""
assert len(model_labels) == len(gold_labels)
# somewhat faster than a pure-python implementation
purity_sum = (
pd.DataFrame({"pred": model_labels, "true": gold_labels})
.groupby(["pred", "true"], as_index=False)
.size()
.groupby("pred")["size"]
.max()
.sum()
)
return purity_sum / len(model_labels)
def rbo_dist(x, y, pval):
"""Rank-biased overlap distance. Assumes x, y truncated to top n items"""
return 1 - rbo.RankingSimilarity(x, y).rbo(p=pval)
def avg_jcrd_agreement(x, y):
"""Average Jaccard distance. Assumes x, y truncated to top n items"""
score = 0.0
max_set_size = len(x)
for depth in range(1, max_set_size + 1):
s1 = set(x[:depth])
s2 = set(y[:depth])
jcrd = len(s1 & s2)/len(s1 | s2)
score += jcrd
score = score/max_set_size
return 1.0 - score
def unique_doc_words_over_runs(doc_topic_runs, topic_word_runs, top_n=15, hard_assignment=True, summarize=False):
"""
Given a collection of estimates of document-topic distributions over a set of runs,
determine how stable the topic assignments are per document by taking the union of
the set of top words predicted for each document
To determine what words are predicted for a given document, we use the reconstructed
bag-of-words: the admixture of (global) topic-word probabilities, weighted by the
document's topic probabilities
Setting `hard_assignment` uses the top words from the most-probable topic for the
document
"""
runs = len(doc_topic_runs)
n = doc_topic_runs[0].shape[0]
top_words_over_runs = np.zeros((n, runs * top_n))
# for each run, determine the set of words that are predicted for each topic
for i, (doc_topic, topic_word) in enumerate(zip(doc_topic_runs, topic_word_runs)):
if hard_assignment:
# find the top words per topic, then index by documents' top topic
top_words = (-topic_word).argsort(1)[doc_topic.argmax(1), :top_n]
else:
# calculate the mixture over topic-word probabilities per doc
top_words = (-(doc_topic @ topic_word)).argsort(1)[:, :top_n]
# store them in an array
top_words_over_runs[:, i*top_n:(i+1)*top_n] = top_words
# then, determine the unique number of words predicted for each document
# stackoverflow.com/questions/48473056/number-of-unique-elements-per-row-in-a-numpy-array
nunique = np.count_nonzero(np.diff(np.sort(top_words_over_runs, axis=1)), axis=1) + 1
# finally, normalize between the lowest possible and highest possible number of unique terms
punique = (nunique - top_n) / (top_n * (runs - 1))
if summarize: # summary is _over n_ (not runs)
return _summarize(punique)
return nunique, punique
def unique_topic_words_over_runs(topic_word_runs, top_n=15, summarize=False):
"""
Given a collection of estimates of topic-word distributions, calculate
how stable the topics are by comparing the set of top words
"""
runs = len(topic_word_runs)
k = topic_word_runs[0].shape[0]
max_count_digits = int(10 ** np.ceil(np.log10(k)))
unique_words = set()
for topic_word in topic_word_runs:
top_words = (-topic_word).argsort(1)[:, :top_n].reshape(-1)
word_counter = defaultdict(lambda: -1)
# problem: if repeated words appear across the topics in a single run,
# this will underestimate the number of unique words produced over runs.
# to solve: a word can be repeated at most `k` times in a run (once per topic).
# count each occurrence per run as a unique term, i.e., "mouse_0", "mouse_1", etc.
# this count gets stored in the first `max_count_digits`, so if "mouse" has index
# 155, the 3rd appearance is coded as 15502.
for w in top_words:
word_counter[w] += 1
w_c = w*max_count_digits + word_counter[w]
unique_words.add(w_c)
nunique = len(unique_words)
words_per_run = k * top_n
# normalize the score between lowest and highest possible number of unique terms
punique = (nunique - words_per_run) / (words_per_run * (runs - 1))
if summarize: # `punique` is a single value, but this unifies the API
return _summarize(punique)
return nunique, punique
def topic_dists_over_runs(
*, # enforce named arguments to avoid ambiguity
doc_topic_runs=None,
topic_word_runs=None,
metric="jensenshannon",
sample_n=1.0,
summarize=False,
seed=None,
workers=1,
top_n_items=None, # for rank based metrics ("rbo", "jaccard"), select top n items
tqdm_kwargs={},
):
"""
Estimate the stability of topics by calculating the distance between topics across runs.
Works on either topic-word or document-topic estimates, where "topics" are considered
the vector for each topic dimension in the estimate. That is, for a topic-word estimate
the vector is the size of the vocabulary, |V|, and for a doc-topic estimate it's the number
of documents N.
For each of the (runs*(runs-1))/2 pairs of runs, there is a run_a and a run_b with
associated estimates est_a and est_b.
We take the pairwise distances between the k topic vectors contained in est_a and est_b,
finding the minimum weight match between the topic pairs.
To speed up computation, can set `sample_n` to use only a subset of possible combinations.
TODO:
- does pairwise js-distance depend on whether betas came from a softmax vs. some
other method (e.g., gibbs?). does this matter?
- does it make sense to have a pairwise spearman?
"""
if topic_word_runs is not None and doc_topic_runs is not None:
raise ValueError("Supply either `topic_word_runs` or `doc_topic_runs`, not both")
# prepare the estimates
estimates = doc_topic_runs if doc_topic_runs is not None else topic_word_runs
for i in range(len(estimates)):
x = estimates[i]
# apprently float64 faster for cdist,stackoverflow.com/a/50671733/5712749
x = x.astype(np.float64)
if doc_topic_runs is not None:
x = x.T
if metric == "jensenshannon" and not np.allclose(x.sum(1), 1):
x = x/x.sum(1, keepdims=True)
if top_n_items == "auto":
top_n_items = x.shape[1] // x.shape[0]
if top_n_items is not None: # for rank-based metrics
x = (-x).argsort(1)[:, :top_n_items]
estimates[i] = x
# sample the combinations of runs
runs = len(estimates)
combins = list(combinations(range(runs), 2))
sample_n = sample_n if sample_n > 1 else int(sample_n * len(combins))
random.seed(seed)
random.shuffle(combins)
combins = combins[:sample_n]
# compute distances
# for each run pair, find the minimum global distances
if workers <= 1:
# first, initialize the matrix in which to store the distances
num_topics = estimates[0].shape[0]
min_dists = np.zeros((len(combins), num_topics))
for i, (idx_a, idx_b) in enumerate(tqdm(combins, **tqdm_kwargs)):
args = (estimates[idx_a], estimates[idx_b], metric)
min_dists[i] = _min_total_topic_dist(args)
else:
with Pool(processes=workers) as pool:
args = [(estimates[idx_a], estimates[idx_b], metric) for idx_a, idx_b in combins]
result = pool.imap_unordered(_min_total_topic_dist, args)
min_dists = np.array([r for r in tqdm(result, total=sample_n, **tqdm_kwargs)])
min_dists = np.sort(min_dists, axis=1)
if summarize:
# not totally obvious how to report a summary
# for now: we take the total cost and report summary over runs
# could be an issue if different models' distrubtions have different entropies
return _summarize(min_dists.sum(1))
return min_dists
def _min_total_topic_dist(args):
"""Helper function to find the minimum total cost TODO: quicker approximation?"""
x, y, metric = args
dists = cdist(x, y, metric=metric) # get distances: produces a [k x k] "cost" matrix
row_idx, col_idx = linear_sum_assignment(dists) # minimize the global match cost
return dists[row_idx, col_idx]
def doc_words_dists_over_runs(
doc_topic_runs,
topic_word_runs,
batchsize=1000,
sample_n=1,
workers=1,
seed=None,
tqdm_kwargs={}
):
"""
For each document, calculate the jensen-shannon distance between its predicted word
probabilities (i.e., reconstructed BoW) in each run
TODO: since this is not a true forward pass of the model (e.g., in some models,
`topic_word` is not normalized; a softmax is applied after `doc_topic @ topic_word`).
Hence, this may introduce some problems---worth revisiting.
"""
runs = len(doc_topic_runs)
n = doc_topic_runs[0].shape[0]
v = topic_word_runs[0].shape[1]
assert(np.allclose(topic_word.sum(1).max(), 1)) # should be normalized
# create the document-word estimates
doc_word_probs = np.zeros((runs, n, v))
for run_i, (doc_topic, topic_word) in enumerate(zip(doc_topic_runs, topic_word_runs)):
for j in range(n // batchsize + 1):
bs = np.s_[j*batchsize:(j+1)*batchsize]
# NB: assumes topic-word is normalized, which will violate the
# true model for d-vae, scholar, and others
p_hat = doc_topic[bs] @ topic_word
doc_word_probs[run_i][bs] = p_hat
# sample the combinations of runs
combins = list(combinations(range(runs), 2))
sample_n = sample_n if sample_n > 1 else int(sample_n * len(combins))
random.seed(seed)
random.shuffle(combins)
combins = combins[:sample_n]
doc_word_dists = np.zeros((len(combins), n))
for i, (idx_a, idx_b) in enumerate(tqdm(combins, **tqdm_kwargs)):
# may need to dispatch to workers; axis arg requires scipy 1.7.
dists = jensenshannon(doc_word_probs[idx_a], doc_word_probs[idx_b], axis=1)
doc_word_dists[i] = dists
return doc_word_dists
| [] |
2024-01-10 | csinva/clinical-rule-analysis | notebooks_data_prep~pubmed.py | import pathlib
import re
import numpy as np
import mdcalc
from mdcalc import try_or_none
from collections import defaultdict
import fitz
import dvu
import matplotlib.pyplot as plt
import pandas as pd
from os.path import join
import os.path
from bs4 import BeautifulSoup
from tqdm import tqdm
import imodelsx.llm
import json
import requests
import joblib
import os
import numpy as np
import pubmed
import openai
plt.style.use("default")
dvu.set_style()
@try_or_none
def parse_name(name: str):
name_arr = name.split()
# drop if too long
if len(name) > 40:
return None
# drop special names
for k in [
"investigator",
"group",
"committee",
"network",
]:
if k in name.lower():
return None
# drop when first name is only one letter
if len(name_arr[0]) == 1:
return None
# drop middle initial
if len(name_arr) > 2 and len(name_arr[1]) == 1:
name_arr = [name_arr[0], name_arr[-1]]
# return name
return " ".join(name_arr)
def get_metadata(paper_id: str):
cache_file = f"../data/metadata/{paper_id}.json"
if os.path.exists(cache_file):
metadata = json.load(open(cache_file))
else:
resp = requests.get(
f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&id={paper_id}&retmode=json"
)
metadata = json.loads(resp.text)
with open(cache_file, "w") as f:
json.dump(metadata, f, indent=2)
return metadata
def get_authors_with_firstname(paper_link: str, paper_id: str):
cache_file = f"../data/metadata/{paper_id}_full.joblib"
if os.path.exists(cache_file):
return joblib.load(cache_file)["author_names"]
else:
resp = requests.get(paper_link).text
soup = BeautifulSoup(resp)
author_names = set()
# print(soup.find_all("span", {"class": "authors-list-item"}))
for s in soup.find_all("span", {"class": "authors-list-item"}):
try:
author_name = s.a["data-ga-label"]
author_names.add(author_name)
# print('author_name', author_name)
except:
pass
# print('a', author_names)
joblib.dump({"author_names": author_names, "resp": resp}, cache_file)
return author_names
def get_author_affiliations(paper_id):
cache_file = cache_file = f"../data/metadata/{paper_id}_full.joblib"
cache_dict = joblib.load(cache_file)
if "author_affils" in cache_dict:
return cache_dict["author_affils"]
else:
resp = cache_dict["resp"]
soup = BeautifulSoup(resp)
affils = soup.find_all("div", {"class": "affiliations"})
if len(affils) == 0:
return None
affils = affils[0]
affils_list_return = []
for li in affils.ul.find_all("li"):
x = li.text
# remove leading numbers
while x[0].isdigit():
x = x[1:]
affils_list_return.append(x.strip())
cache_dict["author_affils"] = affils_list_return
joblib.dump(cache_dict, cache_file)
return affils_list_return
# @try_or_none
# def get_free_text_link(paper_id: str):
# cache_file = f"../data/metadata/{paper_id}_free_text_link.json"
# if os.path.exists(cache_file):
# free_text_link = json.load(open(cache_file))
# else:
# resp = requests.get(
# f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?dbfrom=pubmed&id={paper_id}&cmd=prlinks&retmode=json"
# )
# free_text_link = resp.json()
# with open(cache_file, "w") as f:
# json.dump(free_text_link, f, indent=2)
# return free_text_link["linksets"][0]["idurllist"][0]["objurls"][0]["url"]["value"]
def get_paper_id(paper_link: str):
if paper_link.endswith("/"):
paper_link = paper_link[:-1]
paper_id = paper_link.split("/")[-1]
# remove leading zeros
while paper_id.startswith("0"):
paper_id = paper_id[1:]
return paper_id
def get_updated_refs(df):
refs = df["ref_href"].values
idxs_corrected = df["ref_href_corrected"].notna() & ~(
df["ref_href_corrected"] == "Unk"
)
refs[idxs_corrected] = df["ref_href_corrected"][idxs_corrected]
return refs
@try_or_none
def clean_llm_country_output(s):
if " is " in s:
s = s.split(" is ")[-1]
# remove punctuation
s = s.replace(".", "")
# remove all parenthetical phrases
ind0 = s.find("(")
ind1 = s.find(")")
while ind0 != -1 and ind1 != -1:
s = s[:ind0] + s[ind1 + 1 :]
ind0 = s.find("(")
ind1 = s.find(")")
s = s.replace("the", "")
s = s.split(",")[-1]
return s.strip()
| [] |
2024-01-10 | csinva/clinical-rule-analysis | notebooks_llm~eval_extraction.py | import pathlib
import re
from typing import Dict, List
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
import pandas as pd
from os.path import join
import os.path
from tqdm import tqdm
import json
import os
import numpy as np
import openai
from os.path import dirname
path_to_file = dirname(__file__)
path_to_repo = dirname(path_to_file)
papers_dir = join(path_to_repo, "papers")
def compute_metrics_within_1(
df,
preds_col_to_gt_col_dict={
"num_male": "num_male_corrected",
"num_female": "num_female_corrected",
"num_total": "num_total_corrected",
},
) -> pd.DataFrame:
d = defaultdict(list)
one_perc = (df["participants___total"].astype(float) / 100).apply(np.ceil)
for k in df.columns:
# if k.startswith('num_') and k + '_corrected' in df.columns:
# print(one_perc)
if k in preds_col_to_gt_col_dict:
gt_col = preds_col_to_gt_col_dict[k]
# print(df.columns, gt_col)
idxs_with_labels = df[gt_col].notnull() & ~(df[gt_col].isin({-1}))
gt = df[gt_col][idxs_with_labels].astype(int)
pred = df[k].apply(cast_int)[idxs_with_labels].astype(int)
pred = pred.apply(lambda x: x if x >= 0 else np.nan)
# print('preds', (pred >= 0).sum())
# print('gt', gt)
d["target"].append(gt_col)
d["n_gt"].append(len(gt))
# print(df[k])
# d['n_pred'].append(df[k].notna().sum())
d["n_pred"].append((pred.notna() & (pred >= 0)).sum())
# print((gt - pred).values.tolist())
# d["n_correct_within_1"].append((np.abs(gt - pred) <= 1).sum())
d["n_correct_1_perc"].append(
(np.abs(gt - pred) <= one_perc[idxs_with_labels]).sum()
)
# d['n_predicted'].append(df[k].notnull().sum())
# count number of values which contain a number
metrics = pd.DataFrame.from_dict(d)
metrics["recall"] = metrics["n_correct_1_perc"] / metrics["n_gt"]
metrics["precision"] = metrics["n_correct_1_perc"] / metrics["n_pred"]
return metrics.round(2)
def convert_percentages_when_total_is_known(num, tot):
if tot is not None and isinstance(tot, str):
tot = tot.replace(",", "").replace(" ", "")
if (
str_contains_number(num)
and str_is_percentage(num)
and str_contains_number(tot)
and not str_is_percentage(tot)
):
num = percentage_to_num(num)
tot = int(tot)
num = round(num * tot / 100)
return num
def cast_int(x):
try:
return int(x)
except:
return -1
def int_or_empty(x):
try:
return int(x)
except:
return ""
def int_or_neg1(x):
try:
return int(x)
except:
return -1
def str_is_parsable(x):
"""Check that string only contains numbers, percent, or periods"""
return x is not None and all(
char.isdigit() or char in [".", "%", " ", ","] for char in str(x)
)
def str_contains_number(x):
return (
x is not None
and any(char.isdigit() for char in str(x))
and not any(char.isalpha() for char in str(x))
)
def str_is_percentage(s):
return "%" in s or "." in s
def percentage_to_num(s):
if "%" in s:
s = s.replace("%", "")
return float(s)
| [] |
2024-01-10 | csinva/clinical-rule-analysis | notebooks_llm~02_classification.py | from imodelsx import LinearFinetuneClassifier, LinearNgramClassifier, AugGAMClassifier
from tqdm import tqdm
from sklearn.linear_model import LogisticRegressionCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn import preprocessing
import openai
import pandas as pd
import argparse
from copy import deepcopy
import logging
import random
from collections import defaultdict
from os.path import join
import numpy as np
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
import joblib
import imodels
import inspect
import os.path
from imodelsx import cache_save_utils
from skllm.config import SKLLMConfig
from skllm import MultiLabelZeroShotGPTClassifier
path_to_repo = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
openai.api_key = open("/home/chansingh/.OPENAI_KEY").read().strip()
SKLLMConfig.set_openai_key(openai.api_key)
def add_eval(r, y_test, y_pred, y_pred_proba):
cls_report = classification_report(
y_test, y_pred, output_dict=True, zero_division=0
)
for k1 in ["macro"]:
for k in ["precision", "recall", "f1-score"]:
r[f"{k1}_{k}"].append(cls_report[k1 + " avg"][k])
r["accuracy"].append(accuracy_score(y_test, y_pred))
r["roc_auc"].append(roc_auc_score(y_test, y_pred_proba))
return r
def get_classification_data(lab="categorization___chief_complaint", input_text='description'):
# read data
df = pd.read_pickle(join(path_to_repo, 'data/data_clean.pkl'))
# prepare output
classes = df[lab].explode()
vc = classes.value_counts()
# restrict to top classes
top_classes = vc.index[vc.values >= 20]
df[lab] = df[lab].apply(lambda l: [x for x in l if x in top_classes])
# label binarizer
# top classes put most frequent first and last (helps with zero-shot)
top_classes = top_classes[::2].tolist(
) + top_classes[1::2].tolist()[::-1]
le = MultiLabelBinarizer(classes=top_classes)
y = le.fit_transform(df[lab])
# input text
# set up text for prediction
if input_text == 'raw_text':
X = df["paper___raw_text"]
elif input_text == 'description':
def get_text_representation(row):
# return f"""- Title: {row["title"]}
# - Description: {row["description"]}
# - Predictor variables: {str(row["feature_names"])[1:-1]}"""
return f"""{row["title"]}. {row["description"]}.""" # Keywords: {str(row["info___keywords"])[1:-1]}"""
X = df.apply(get_text_representation, axis=1)
idxs = X.notna()
X = X[idxs].tolist()
y = y[idxs]
# train test split
return X, y, le.classes_, le
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=random_state)
# return X_train, X_test, y_train, y_test, le.classes_
def get_model(model_name="decision_tree", random_state=42, class_name=None, input_text='raw_text'):
if model_name == "decision_tree":
return Pipeline(
[
("tfidf", TfidfVectorizer()),
("clf", DecisionTreeClassifier(random_state=random_state)),
]
)
elif model_name == "random_forest":
return Pipeline(
[
("tfidf", TfidfVectorizer()),
("clf", RandomForestClassifier(random_state=random_state)),
]
)
# elif model_name == 'figs':
# return Pipeline(
# [
# ]
# )
elif model_name == "logistic":
return Pipeline(
[
("tfidf", TfidfVectorizer()),
(
"clf",
# MultiOutputClassifier(
LogisticRegressionCV(random_state=random_state)
# ),
),
]
)
elif model_name == "aug-linear":
return AugGAMClassifier(
checkpoint="bert-base-uncased",
normalize_embs=False,
random_state=random_state,
cache_embs_dir=os.path.expanduser(
join(os.path.expanduser("~/.cache_mdcalc_embeddings"),
class_name, input_text)
),
ngrams=2,
)
elif model_name == "bert-base-uncased":
# pipe = MultiOutputClassifier(
return LinearFinetuneClassifier(
checkpoint="bert-base-uncased",
normalize_embs=True,
random_state=random_state,
cache_embs_dir=join(os.path.expanduser(
"~/.cache_mdcalc_embeddings"), class_name, input_text),
)
elif model_name == 'zero-shot':
return MultiLabelZeroShotGPTClassifier(
max_labels=5, openai_model="gpt-4-0314")
# initialize args
def add_main_args(parser):
"""Caching uses the non-default values from argparse to name the saving directory.
Changing the default arg an argument will break cache compatibility with previous runs.
"""
# dataset args
parser.add_argument(
"--label_name",
type=str,
default="categorization___chief_complaint",
choices=["categorization___chief_complaint",
"categorization___specialty",
"categorization___purpose",
"categorization___system",
"categorization___disease",],
help="name of label",
)
parser.add_argument(
'--input_text',
type=str,
default='raw_text',
help='input text to use'
)
# training misc args
parser.add_argument("--seed", type=int, default=1, help="random seed")
parser.add_argument(
"--save_dir",
type=str,
default=join(path_to_repo, "results"),
help="directory for saving",
)
# model args
parser.add_argument(
"--model_name",
type=str,
default="decision_tree",
help="name of model",
)
return parser
def add_computational_args(parser):
"""Arguments that only affect computation and not the results (shouldnt use when checking cache)"""
parser.add_argument(
"--use_cache",
type=int,
default=1,
choices=[0, 1],
help="whether to check for cache",
)
return parser
if __name__ == "__main__":
# get args
parser = argparse.ArgumentParser()
parser_without_computational_args = add_main_args(parser)
parser = add_computational_args(
deepcopy(parser_without_computational_args))
args = parser.parse_args()
# set up logging
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
# set up saving directory + check for cache
already_cached, save_dir_unique = cache_save_utils.get_save_dir_unique(
parser, parser_without_computational_args, args, args.save_dir
)
if args.use_cache and already_cached:
logging.info(f"cached version exists! Successfully skipping :)\n\n\n")
exit(0)
for k in sorted(vars(args)):
logger.info("\t" + k + " " + str(vars(args)[k]))
logging.info(f"\n\n\tsaving to " + save_dir_unique + "\n")
# set seed
np.random.seed(args.seed)
random.seed(args.seed)
# torch.manual_seed(args.seed)
# get data
X, y, classes, le = get_classification_data(
lab=args.label_name, input_text=args.input_text)
# set up saving dictionary + save params file
r = defaultdict(list)
r.update(vars(args))
r["save_dir_unique"] = save_dir_unique
os.makedirs(save_dir_unique, exist_ok=True)
# cache_save_utils.save_json(
# args=args, save_dir=save_dir_unique, fname="params.json", r=r
# )
# fit + eval
if not args.model_name == 'zero-shot':
for i, c in enumerate(tqdm(classes)):
m = get_model(
args.model_name,
random_state=42,
class_name=c,
input_text=args.input_text,
)
y_i = y[:, i]
X_train, X_test, y_train, y_test = train_test_split(
X, y_i, test_size=0.25, random_state=42, stratify=y_i
)
m.fit(X_train, y_train)
# df['y_pred_train'].append(m.predict(X_train))
y_pred = m.predict(X_test)
y_pred_proba = m.predict_proba(X_test)[:, 1]
# df['y_pred_test'].append(y_test)
r = add_eval(r, y_test, y_pred, y_pred_proba)
elif args.model_name == 'zero-shot':
m = get_model(
args.model_name,
random_state=42,
input_text=args.input_text,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42
)
m.fit(None, [classes.tolist()])
# df['y_pred_train'].append(m.predict(X_train))
y_pred_strs = m.predict(X_test)
y_pred = le.transform(y_pred_strs)
y_pred_proba = y_pred
for i in range(len(classes)):
r = add_eval(r, y_test[:, i], y_pred[:, i], y_pred_proba[:, i])
for k in ['macro_precision', 'macro_recall', 'macro_f1-score', 'accuracy', 'roc_auc']:
r[f"mean_{k}"] = np.mean(r[k])
# save results
joblib.dump(
r, join(save_dir_unique, "results.pkl")
) # caching requires that this is called results.pkl
# joblib.dump(model, join(save_dir_unique, "model.pkl"))
logging.info("Succesfully completed :)\n\n")
| [] |
2024-01-10 | csinva/clinical-rule-analysis | notebooks_llm~extraction.py | import pathlib
import re
from typing import Dict, List
import numpy as np
from collections import defaultdict
import pandas as pd
from os.path import join
import os.path
from tqdm import tqdm
import json
import os
import numpy as np
import openai
from os.path import dirname
import imodelsx
import prompts_extraction
path_to_repo = dirname(dirname(os.path.abspath(__file__)))
openai.api_key = open("/home/chansingh/.OPENAI_KEY").read().strip()
# imodelsx.llm.LLM_CONFIG["LLM_REPEAT_DELAY"] = 30
def extract_nums_df(
texts: List[str],
repeat_delay=30,
verbose=True,
checkpoint="gpt-4-0613",
subset_len_tokens=4750,
) -> pd.DataFrame:
"""Return dataframe with different extracted fields as columns"""
# get prompt
llm = imodelsx.llm.get_llm(
checkpoint, repeat_delay=repeat_delay
) # gpt-3.5-turbo-0613
# properties, functions, content_str = prompts_extraction.get_prompts_gender_and_race()
# print('attempting to add', properties.keys())
# add_columns_based_on_properties(df, ids_with_paper, properties, functions, content_str, llm)
properties, functions, content_str = prompts_extraction.get_prompts_gender()
print("attempting to add", properties.keys())
extractions1 = extract_columns_based_on_properties(
texts,
properties,
functions,
content_str,
llm,
verbose=verbose,
subset_len_tokens=subset_len_tokens,
)
properties, functions, content_str = prompts_extraction.get_prompts_race()
print("attempting to add", properties.keys())
extractions2 = extract_columns_based_on_properties(
texts,
properties,
functions,
content_str,
llm,
verbose=verbose,
subset_len_tokens=subset_len_tokens,
)
return pd.DataFrame.from_dict(extractions1 | extractions2)
def rename_to_none(x: str):
if x in {"", "unknown", "N/A"}:
return None
else:
return x
def extract_columns_based_on_properties(
texts,
properties,
functions,
content_str,
llm,
verbose=True,
subset_len_tokens=4750,
) -> Dict[str, List]:
# initialize empty columns
out = {}
for k in properties.keys():
out[k] = len(texts) * [None]
# run loop
for i, text in tqdm(enumerate(texts)):
try:
args = call_on_subsets(
text,
content_str=content_str,
functions=functions,
llm=llm,
verbose=verbose,
subset_len_tokens=subset_len_tokens,
)
if args is not None:
for k in properties.keys():
if k in args:
out[k][i] = rename_to_none(args[k])
# remove spans if they are not actually contained in the text
if "_span" in k:
if not _check_evidence(args[k], text):
out[k][i] = None
except Exception as e:
print(e)
return out
def call_on_subsets(
x: str,
content_str: str,
functions: List[Dict],
llm,
subset_len_tokens=4750,
max_calls=3,
verbose=True,
):
messages = [
{
"role": "user",
"content": content_str,
}
]
subset_len_chars = subset_len_tokens * 4
args = None
subset_num = 0
while args is None and subset_num < max_calls:
subset = x[subset_num * subset_len_chars : (subset_num + 1) * subset_len_chars]
# if approx_tokens < 6000:
messages[0]["content"] = content_str.format(input=subset)
msg = llm(
messages,
functions=functions,
return_str=False,
temperature=0.0,
verbose=verbose,
)
if msg is not None and "function_call" in msg["choices"][0]["message"]:
args = json.loads(
msg["choices"][0]["message"]["function_call"]["arguments"]
)
# and msg.get("function_call") is not None:
# args = json.loads(msg.get("function_call")["arguments"])
return args
subset_num += 1
# next segment should have atleast 0.5 * subset_len_chars_left
if len(x) < (subset_num + 0.5) * subset_len_chars:
break
return None
def _check_evidence(ev: str, real_input: str):
if ev is not None:
# remove all whitespace
ev = "".join(ev.split())
real_input = "".join(real_input.split())
return ev.lower() in real_input.lower()
return False
if __name__ == "__main__":
df = pd.read_pickle(join(path_to_repo, "data/data_clean.pkl"))
gt_cols = [
"participants___male",
"participants___female",
"participants___total",
"participants___white",
"participants___black",
"participants___latino",
"participants___asian",
]
idxs = df["paper___raw_text"].notna() & ((df[gt_cols] > 0).any(axis=1))
texts = df.loc[idxs, "paper___raw_text"].values.tolist()
extractions = extract_nums_df(
texts, verbose=True, checkpoint="gpt-3.5-turbo-0613", subset_len_tokens=3000
)
| [] |
2024-01-10 | HalfBloody/prompt-lib | prompt_lib~inference.py | # Given the path to a text file, queries alpa for each line in the file.
import json
import os.path
from datetime import datetime
from itertools import chain
import pathlib
import sys
from typing import List
from tqdm import tqdm
import pandas as pd
import wandb
import glob
import re
import os
import logging
from prompt_lib.backends.openai_api import OpenaiAPIWrapper
from prompt_lib.prompts.utils import (
TaskConfig,
make_task_file_from_config,
get_question_from_prompt,
)
from prompt_lib.eval.eval_utils import read_jsonl
logging.basicConfig(level=logging.INFO)
def inference_loop(task_config: TaskConfig) -> None:
"""Query a language model API for each line in the file."""
task_file = make_task_file_from_config(task_config).to_dict(orient="records")
n_task_original = len(task_file)
task_file = task_file[: task_config.num_inference_examples]
# make output directory
outdir = get_outdir(task_config)
# remove cached examples from task_file
cached_examples, thread_offset = load_cached_examples(outdir, task_config)
task_file = [
example
for example in task_file
if not (
(task_config.num_prompt_examples > 0 and
get_question_from_prompt(example["question"], task_config) in cached_examples)
or example["question"] in cached_examples
)
]
print(
f"Found {len(cached_examples)} cached examples, {len(task_file)} examples to query, found {n_task_original - len(task_file)} in cache"
)
pathlib.Path(f"{outdir}").mkdir(parents=True, exist_ok=True)
# split tasks into subtasks. This is redundant for now, but will be useful when we want to parallelize. Also helps with caching/restarting, as intermediate results are saved.
batched_tasks = create_task_batches(task_config, task_file)
outputs = []
accuracy_so_far = 0
# post-process cached examples and newly queried examples
for r_file in glob.glob(f"{outdir}/outputs_part*.jsonl"):
cached = read_jsonl(r_file)
outputs = cached
# run inference
for (batch, batch_idx) in tqdm(batched_tasks):
thread_outputs = run_inference_on_batch(batch, batch_idx, task_config=task_config)
outputs.append(thread_outputs)
progress_perc = round(len(outputs) * 100 / len(batched_tasks), 2)
wandb.log({"progress_so_far": progress_perc})
pd.DataFrame(thread_outputs).to_json(
f"{outdir}/outputs_part{batch_idx + thread_offset}.jsonl",
orient="records",
lines=True,
)
accuracy_so_far += task_config.eval_function(pd.DataFrame(thread_outputs))
wandb.log({"accuracy_so_far": accuracy_so_far / len(outputs)})
outputs = pd.DataFrame(chain(*outputs))
# remove duplicates
# BUG: we should not be doing this: there may be good reasons to have duplicates in the input: someone benchmarking
# outputs = outputs.drop_duplicates(subset=["question"])
if "logprobs" in outputs.columns:
outputs = outputs.drop("logprobs", axis=1)
wandb.log({"accuracy": task_config.eval_function(outputs)})
wandb.log({"num_inference_examples": len(outputs)})
wandb.log({"num_inference_examples_with_answer": len(outputs[outputs["answer"].notnull()])})
# convert all columns to type string
# drop all rows with any nan values
outputs = outputs.dropna()
for col in outputs.columns:
outputs[col] = outputs[col].astype(str)
wandb.log({"outputs": wandb.Table(dataframe=outputs)})
logging.info(f"Number of successful queries: {len(outputs)}")
outputs.to_json(f"{outdir}/outputs.jsonl", orient="records", lines=True)
with open(f"{outdir}/task_config.json", "w") as f:
f.write(json.dumps(task_config.to_dict(), indent=4))
return outputs
def create_task_batches(task_config: TaskConfig, task_file: List) -> List:
"""Generates batches of tasks. Currently, we don't parallelize, but it's useful for caching and restarting.
Args:
task_config (_type_): TaskConfig
task_file (List): List of tasks
Returns:
List of (batch, batch_idx) tuples
"""
num_chunks = len(task_file) // task_config.num_questions_per_thread
load_per_task = []
for i in range(num_chunks):
load_per_task.append(
(
task_file[
i
* task_config.num_questions_per_thread : (i + 1)
* task_config.num_questions_per_thread
],
i,
)
)
if len(task_file) % task_config.num_questions_per_thread != 0:
load_per_task.append(
(task_file[num_chunks * task_config.num_questions_per_thread :], num_chunks)
)
return load_per_task
def load_cached_examples(outdir, task_config):
"""Loads cached examples from a directory."""
cached_examples = set()
thread_offset = 0
if pathlib.Path(outdir).exists():
for r_file in glob.glob(f"{outdir}/outputs_part*.jsonl"):
cached = read_jsonl(r_file)
for i, row in cached.iterrows():
cached_examples.add(get_question_from_prompt(row["question"], task_config))
part_idx = re.search("outputs_part(\d+).jsonl", os.path.basename(r_file)).group(1)
thread_offset = max(thread_offset, int(part_idx))
thread_offset += 1
return cached_examples, thread_offset
def get_outdir(task_config: TaskConfig) -> str:
if task_config.cached_timestamp is None:
time_stamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
else:
print(f"Using cached timestamp: {task_config.cached_timestamp}")
time_stamp = task_config.cached_timestamp
outdir = f"data/logs/{task_config.task_id}/{task_config.model_name}/temp_{task_config.temperature}/seed_{task_config.seed}/num_completions_{task_config.num_completions}/"
if task_config.num_prompt_examples == -1:
outdir += "/k_all/"
else:
outdir += f"/k_{task_config.num_prompt_examples}/"
if task_config.tag is not None:
outdir += f"{task_config.tag}/"
outdir += f"{time_stamp}/"
return outdir
def run_inference_on_batch(
rows: List[dict],
thread_id: int,
task_config: TaskConfig,
max_retries: int = 10,
) -> List[dict]:
outputs = []
i = 0
n = len(rows)
pbar = tqdm(total=n, desc=f"Querying {task_config.model_name} [thread_id={thread_id}]")
num_retries = 0
while i < n:
try:
response = OpenaiAPIWrapper.call(
temperature=task_config.temperature,
prompt=rows[i]["question"],
max_tokens=task_config.max_tokens,
engine=task_config.model_name,
stop_token=task_config.prompt_config.inter_example_sep, # generate till the model starts generating a new question
num_completions=task_config.num_completions,
)
if task_config.prompt_config.inter_example_sep:
prompt_only = rows[i]["question"].split(task_config.prompt_config.inter_example_sep)[
:-1
]
prompt_only = task_config.prompt_config.inter_example_sep.join(prompt_only)
question = rows[i]["question"].split(task_config.prompt_config.inter_example_sep)[-1]
else: # zero-shot, everything is the prompt
prompt_only = rows[i]["question"]
question = rows[i]["question"]
res = {
"prompt": prompt_only,
"question": question,
"answer": rows[i]["answer"],
"entire_prompt": rows[i]["question"],
}
res.update({k: v for k, v in rows[i].items() if k not in res})
if task_config.num_completions == 1:
entire_response = OpenaiAPIWrapper.get_first_response(response)
generated_answer = extract_answer_from_response(entire_response, task_config)
# nicely print the question and generated answer
logging.info("\n" + f"Question ({i}):" + "\n" + question)
logging.info("\n" + f"Answer ({i}):" + "\n" + generated_answer)
res.update(
{
"generated_answer": generated_answer,
"entire_response": entire_response, # everything generated by the model
}
)
if "choices" in response and "logprobs" in response["choices"][0]:
res.update({"logprobs": response["choices"][0]["logprobs"]})
else:
all_responses = OpenaiAPIWrapper.get_all_responses(response)
generated_answer_list = [response["generated_answer"] for response in all_responses]
logprobs = [response["logprobs"] for response in all_responses]
generated_answers = [
extract_answer_from_response(r, task_config) for r in generated_answer_list
]
res.update(
{
"generated_answers": generated_answers,
"generated_answer": generated_answers[0],
"logprobs": logprobs,
}
)
outputs.append(res)
i += 1
pbar.update(1)
except Exception as e:
# raise e
logging.info(f"Exception: {e}")
if "code" not in task_config.model_name:
i += 1
elif num_retries < max_retries:
num_retries += 1
logging.info("Retrying...")
continue
else:
num_retries = 0
i += 1
logging.info("Skipping...")
return outputs
def extract_answer_from_response(response, task_config: TaskConfig) -> str:
"""Extracts the answer from the response generated by LLM.
Args:
response (str): Response from the model
task_config (TaskConfig): TaskConfig
Returns:
str: Answer
"""
if task_config.prompt_config.inter_example_sep and task_config.prompt_config.inter_example_sep in response:
answer = response.split(task_config.prompt_config.inter_example_sep)[0]
else:
answer = response
return answer
| [
"question"
] |
2024-01-10 | HalfBloody/prompt-lib | prompt_lib~backends~openai_api.py | from collections import Counter
import os
from typing import Dict, Any, List, Optional, Union
import openai
import random
import time
import json
from prompt_lib.backends.wrapper import BaseAPIWrapper
from prompt_lib.backends.self_hosted import OpenSourceAPIWrapper
from prompt_lib.backends.anthropic_api import AnthropicAPIWrapper
openai.api_key = os.getenv("OPENAI_API_KEY")
# check if orgainization is set
if os.getenv("OPENAI_ORG") is not None:
openai.organization = os.getenv("OPENAI_ORG")
# from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_handle_rate_limits.ipynb
def retry_with_exponential_backoff(
func,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = True,
max_retries: int = 10,
errors: tuple = (openai.error.RateLimitError,openai.error.ServiceUnavailableError,),
):
"""Retry a function with exponential backoff."""
def wrapper(*args, **kwargs):
# Initialize variables
num_retries = 0
delay = initial_delay
# Loop until a successful response or max_retries is hit or an exception is raised
while True:
try:
return func(*args, **kwargs)
# Retry on specified errors
except errors as e:
# Increment retries
num_retries += 1
# Check if max retries has been reached
if num_retries > max_retries:
raise Exception(f"Maximum number of retries ({max_retries}) exceeded.")
# Increment the delay
delay *= exponential_base * (1 + jitter * random.random())
# Sleep for the delay
time.sleep(delay)
# Raise exceptions for any errors not specified
except Exception as e:
raise e
return wrapper
class CompletionAPIWrapper(BaseAPIWrapper):
@staticmethod
@retry_with_exponential_backoff
def _call_api(
prompt: str,
max_tokens: int,
engine: str,
stop_token: str,
temperature: float,
num_completions: int = 1,
top_p: float = 1,
logprobs: Optional[int] = None,
) -> dict:
response = openai.Completion.create(
model=engine,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
stop=[stop_token],
n=num_completions,
logprobs=logprobs,
)
return response
@staticmethod
def call(
prompt: str,
max_tokens: int,
engine: str,
stop_token: str,
temperature: float,
num_completions: int = 1,
top_p: float = 1,
logprobs: Optional[int] = None,
) -> dict:
if num_completions > 2:
response_combined = dict()
num_completions_remaining = num_completions
for i in range(0, num_completions, 2):
response = CompletionAPIWrapper._call_api(
prompt=prompt,
max_tokens=max_tokens,
engine=engine,
stop_token=stop_token,
temperature=temperature,
top_p=top_p,
num_completions=min(num_completions_remaining, 2),
logprobs=logprobs,
)
num_completions_remaining -= 2
if i == 0:
response_combined = response
else:
response_combined["choices"] += response["choices"]
return response_combined
response = CompletionAPIWrapper._call_api(
prompt=prompt,
max_tokens=max_tokens,
engine=engine,
stop_token=stop_token,
temperature=temperature,
num_completions=num_completions,
logprobs=logprobs,
top_p=top_p,
)
return response
@staticmethod
def get_first_response(response) -> Dict[str, Any]:
"""Returns the first response from the list of responses."""
text = response["choices"][0]["text"]
return text
@staticmethod
def get_majority_answer(response) -> Dict[str, Any]:
"""Returns the majority answer from the list of responses."""
answers = [choice["text"] for choice in response["choices"]]
answers = Counter(answers)
# if there is a tie, return the first answer
if answers.most_common(1)[0][1] == answers.most_common(2)[1][1]:
return CompletionAPIWrapper.get_first_response(response)
return answers.most_common(1)[0][0]
@staticmethod
def get_all_responses(response) -> Dict[str, Any]:
"""Returns the list of responses."""
return [
{
"generated_answer": choice["text"],
"logprobs": choice["logprobs"] if "logprobs" in choice else None,
}
for choice in response["choices"]
]
class ChatGPTAPIWrapper(BaseAPIWrapper):
@staticmethod
@retry_with_exponential_backoff
def call(
prompt: Union[str, List[Dict[str, str]]],
max_tokens: int,
engine: str,
stop_token: str,
temperature: float,
top_p: float = 1,
num_completions: int = 1,
system_message: Optional[str] = None,
) -> dict:
"""Calls the Chat API.
if the num_completions is > 2, we call the API multiple times. This is to prevent
overflow issues that can occur when the number of completions is too large.
"""
system_message = (
system_message or "You are ChatGPT, a large language model trained by OpenAI."
)
if isinstance(prompt, str):
messages = []
if system_message:
messages.append({"role": "system", "content": system_message})
messages.append({"role": "user", "content": prompt})
elif isinstance(prompt, list):
messages = prompt
if system_message:
messages.insert(0, {"role": "system", "content": system_message})
else:
raise ValueError(
"Invalid prompt type. Prompt should be a string or a list of messages."
)
if num_completions > 2:
response_combined = dict()
num_completions_remaining = num_completions
for i in range(0, num_completions, 2):
# note that we are calling the same function --- this prevents backoff from being reset for the entire function
response = ChatGPTAPIWrapper.call(
prompt=prompt,
max_tokens=max_tokens,
engine=engine,
stop_token=stop_token,
temperature=temperature,
top_p=top_p,
num_completions=min(num_completions_remaining, 2),
)
num_completions_remaining -= 2
if i == 0:
response_combined = response
else:
response_combined["choices"] += response["choices"]
return response_combined
response = openai.ChatCompletion.create(
model=engine,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
stop=[stop_token] if stop_token else None,
# logprobs=3,
n=num_completions,
)
return response
@staticmethod
def get_first_response(response) -> Dict[str, Any]:
"""Returns the first response from the list of responses."""
text = response["choices"][0]["message"]["content"]
return text
@staticmethod
def get_majority_answer(response) -> Dict[str, Any]:
"""Returns the majority answer from the list of responses."""
answers = [choice["message"]["content"] for choice in response["choices"]]
answers = Counter(answers)
# if there is a tie, return the first answer
if len(answers) == 1:
return answers.most_common(1)[0][0]
if answers.most_common(1)[0][1] == answers.most_common(2)[1][1]:
return ChatGPTAPIWrapper.get_first_response(response)
return answers.most_common(1)[0][0]
@staticmethod
def get_all_responses(response) -> Dict[str, Any]:
"""Returns the list of responses."""
return [choice["message"]["content"] for choice in response["choices"]] # type: ignore
class OpenaiAPIWrapper:
chat_engines = ["gpt-3.5-turbo", "gpt-4"]
opensource_engines = ["self-vulcan-13b", "self-vicuna-13b", "togethercomputer/llama-2-70b"]
@staticmethod
def get_api_wrapper(engine: str) -> BaseAPIWrapper:
if any(k in engine for k in OpenaiAPIWrapper.chat_engines):
return ChatGPTAPIWrapper
elif engine in OpenaiAPIWrapper.opensource_engines:
return OpenSourceAPIWrapper
elif "claude" in engine:
return AnthropicAPIWrapper
else:
return CompletionAPIWrapper
@staticmethod
def call(
prompt: str,
max_tokens: int,
engine: str,
stop_token: str,
temperature: float,
num_completions: int = 1,
**kwargs,
) -> dict:
api_wrapper = OpenaiAPIWrapper.get_api_wrapper(engine)
return api_wrapper.call(
prompt=prompt, max_tokens=max_tokens, engine=engine, stop_token=stop_token, temperature=temperature, num_completions=num_completions, **kwargs
)
@staticmethod
def get_first_response(response) -> Dict[str, Any]:
api_wrapper = OpenaiAPIWrapper.get_api_wrapper(response["model"])
return api_wrapper.get_first_response(response)
@staticmethod
def get_majority_answer(response) -> Dict[str, Any]:
api_wrapper = OpenaiAPIWrapper.get_api_wrapper(response["model"])
return api_wrapper.get_majority_answer(response)
@staticmethod
def get_all_responses(response) -> Dict[str, Any]:
api_wrapper = OpenaiAPIWrapper.get_api_wrapper(response["model"])
return api_wrapper.get_all_responses(response)
def test_completion():
prompt = 'Optimize the following Python code:\n\n# Start of code\n\nimport sys\n\nimport numpy as np\n\nn,m = [int(x) for x in sys.stdin.readline().split()]\n\nr = np.zeros(n)\n\nfor i in range(m):\n\n\ta, b = [int(x) for x in sys.stdin.readline().split()]\n\n\tr[a-1] += 1\n\n\tr[b-1] += 1\n\nfor i in range(n):\n\n\tprint((int(r[i])))\n\n# End of code\nRewrite the above Python code only from "Start of code" to "End of code", to make it more efficient WITHOUT CHANGING ITS RESULTS. Assume the code has already executed all the imports; do NOT include them in the optimized code.\n\nUse native libraries if that would make it faster than pure Python.\n\nYour output should only consist of valid Python code. Output the resulting Python with brief explanations only included as comments prefaced with #. Include a detailed explanatory comment before the code, starting with the text "# Proposed optimization:". Make the code as clear and simple as possible, while also making it as fast and memory-efficient as possible. Use vectorized operations whenever it would substantially increase performance, and quantify the speedup in terms of orders of magnitude. Eliminate as many for loops, while loops, and list or dict comprehensions as possible, replacing them with vectorized equivalents. If the performance is not likely to increase, leave the code unchanged. Fix any errors in the optimized code.'
engine = "text-davinci-003"
num_completions = 3
max_tokens = 300
response = OpenaiAPIWrapper.call(
prompt=prompt,
max_tokens=max_tokens,
engine=engine,
stop_token="Optimize the following Python code:\n\n",
temperature=0.7,
num_completions=num_completions,
)
print(response)
print(OpenaiAPIWrapper.get_first_response(response))
print(OpenaiAPIWrapper.get_majority_answer(response))
def test_chat():
prompt = 'Optimize the following Python code:\n\n# Start of code\n\nimport sys\n\nimport numpy as np\n\nn,m = [int(x) for x in sys.stdin.readline().split()]\n\nr = np.zeros(n)\n\nfor i in range(m):\n\n\ta, b = [int(x) for x in sys.stdin.readline().split()]\n\n\tr[a-1] += 1\n\n\tr[b-1] += 1\n\nfor i in range(n):\n\n\tprint((int(r[i])))\n\n# End of code\nRewrite the above Python code only from "Start of code" to "End of code", to make it more efficient WITHOUT CHANGING ITS RESULTS. Assume the code has already executed all the imports; do NOT include them in the optimized code.\n\nUse native libraries if that would make it faster than pure Python.\n\nYour output should only consist of valid Python code. Output the resulting Python with brief explanations only included as comments prefaced with #. Include a detailed explanatory comment before the code, starting with the text "# Proposed optimization:". Make the code as clear and simple as possible, while also making it as fast and memory-efficient as possible. Use vectorized operations whenever it would substantially increase performance, and quantify the speedup in terms of orders of magnitude. Eliminate as many for loops, while loops, and list or dict comprehensions as possible, replacing them with vectorized equivalents. If the performance is not likely to increase, leave the code unchanged. Fix any errors in the optimized code.'
engine = "gpt-3.5-turbo"
num_completions = 3
max_tokens = 300
response = OpenaiAPIWrapper.call(
prompt=prompt,
max_tokens=max_tokens,
engine=engine,
stop_token="End of code",
temperature=0.7,
num_completions=num_completions,
)
print(response)
print(OpenaiAPIWrapper.get_first_response(response))
print(OpenaiAPIWrapper.get_majority_answer(response))
def test_basic_chat():
prompt = "What is the capital of France?"
engine = "gpt-3.5-turbo"
max_tokens = 10
response = OpenaiAPIWrapper.call(
prompt=prompt,
max_tokens=max_tokens,
engine=engine,
temperature=0.7,
stop_token=None,
num_completions=1,
)
print(json.dumps(response, indent=2))
print(OpenaiAPIWrapper.get_first_response(response))
def test_chat_with_system_message():
prompt = "What is the capital of France?"
engine = "gpt-3.5-turbo"
max_tokens = 10
system_message = "You are ChatGPT, a large language model trained by OpenAI."
response = OpenaiAPIWrapper.call(
prompt=prompt,
max_tokens=max_tokens,
engine=engine,
stop_token=None,
temperature=0.7,
num_completions=1,
system_message=system_message,
)
print(json.dumps(response, indent=2))
print(OpenaiAPIWrapper.get_first_response(response))
def test_chat_with_multiple_completions():
prompt = "What is the capital of France?"
engine = "gpt-3.5-turbo"
max_tokens = 10
response = OpenaiAPIWrapper.call(
prompt=prompt,
max_tokens=max_tokens,
engine=engine,
stop_token=None,
temperature=0.7,
num_completions=3,
)
print(json.dumps(response, indent=2))
print(OpenaiAPIWrapper.get_first_response(response))
print(OpenaiAPIWrapper.get_majority_answer(response))
print(OpenaiAPIWrapper.get_all_responses(response))
def test_chat_with_message_list():
messages = [
{"role": "system", "content": "You are ChatGPT, a large language model trained by OpenAI."},
{"role": "user", "content": "What is the capital of France?"},
]
engine = "gpt-3.5-turbo"
max_tokens = 10
response = OpenaiAPIWrapper.call(
prompt=messages,
max_tokens=max_tokens,
engine=engine,
stop_token=None,
temperature=0.7,
num_completions=1,
)
print(json.dumps(response, indent=2))
print(OpenaiAPIWrapper.get_first_response(response))
# Test case 1: Test with basic parameters
def test_completion_basic_parameters():
prompt = "Once upon a time"
max_tokens = 50
engine = "text-davinci-002"
stop_token = "\n"
temperature = 0.8
response = CompletionAPIWrapper.call(prompt, max_tokens, engine, stop_token, temperature)
assert "choices" in response, "Test case 1 failed: 'choices' not found in the response"
print("Test case 1 passed")
# Test case 2: Test with multiple completions
def test_completion_multiple_completions():
prompt = "Once upon a time"
max_tokens = 50
engine = "text-davinci-002"
stop_token = "\n"
temperature = 0.8
num_completions = 3
response = CompletionAPIWrapper.call(
prompt, max_tokens, engine, stop_token, temperature, num_completions
)
assert "choices" in response, "Test case 2 failed: 'choices' not found in the response"
assert (
len(response["choices"]) == num_completions
), f"Test case 2 failed: expected {num_completions} completions, but got {len(response['choices'])}"
print("Test case 2 passed")
# Test case 3: Test helper methods
def test_completion_helper_methods():
prompt = "Once upon a time"
max_tokens = 50
engine = "text-davinci-002"
stop_token = "\n"
temperature = 0.8
num_completions = 2
response = CompletionAPIWrapper.call(
prompt, max_tokens, engine, stop_token, temperature, num_completions
)
first_response = CompletionAPIWrapper.get_first_response(response)
assert isinstance(
first_response, str
), "Test case 3 failed: 'get_first_response' did not return a string"
majority_answer = CompletionAPIWrapper.get_majority_answer(response)
assert isinstance(
majority_answer, str
), "Test case 3 failed: 'get_majority_answer' did not return a string"
all_responses = CompletionAPIWrapper.get_all_responses(response)
assert isinstance(
all_responses, list
), "Test case 3 failed: 'get_all_responses' did not return a list"
print("Test case 3 passed")
def test_top_p():
print(f"Testing top_p")
prompt = "Once upon a time"
max_tokens = 50
engine = "text-davinci-002"
stop_token = "\n"
temperature = 0.8
num_completions = 2
top_p = 0.5
response = CompletionAPIWrapper.call(
prompt, max_tokens, engine, stop_token, temperature, num_completions, top_p
)
first_response = CompletionAPIWrapper.get_first_response(response)
assert isinstance(
first_response, str
), "Test case 3 failed: 'get_first_response' did not return a string"
majority_answer = CompletionAPIWrapper.get_majority_answer(response)
assert isinstance(
majority_answer, str
), "Test case 3 failed: 'get_majority_answer' did not return a string"
all_responses = CompletionAPIWrapper.get_all_responses(response)
assert isinstance(
all_responses, list
), "Test case 3 failed: 'get_all_responses' did not return a list"
print("Test case 3 passed")
# top_p with chat
engine = "gpt-3.5-turbo"
for top_p in [0.0001, 0.01, 0.2, 0.5, 0.75, 0.9]:
response = OpenaiAPIWrapper.call(prompt, max_tokens, engine, stop_token, temperature, num_completions=2, top_p=top_p)
print(f"Top_p={top_p}: {OpenaiAPIWrapper.get_all_responses(response)}")
if __name__ == "__main__":
print("Testing basic chat")
test_basic_chat()
print("Testing chat with system message")
test_chat_with_system_message()
print("Testing chat with multiple completions")
test_chat_with_multiple_completions()
print("Testing chat with message list")
test_chat_with_message_list()
# test the API
print("Testing completion API")
test_completion()
print("Testing chat API")
test_chat()
test_completion_basic_parameters()
test_completion_multiple_completions()
test_completion_helper_methods()
test_top_p()
| [
"Once upon a time",
"What is the capital of France?",
"Optimize the following Python code:\n\n# Start of code\n\nimport sys\n\nimport numpy as np\n\nn,m = [int(x) for x in sys.stdin.readline().split()]\n\nr = np.zeros(n)\n\nfor i in range(m):\n\n\ta, b = [int(x) for x in sys.stdin.readline().split()]\n\n\tr[a-1] += 1\n\n\tr[b-1] += 1\n\nfor i in range(n):\n\n\tprint((int(r[i])))\n\n# End of code\nRewrite the above Python code only from \"Start of code\" to \"End of code\", to make it more efficient WITHOUT CHANGING ITS RESULTS. Assume the code has already executed all the imports; do NOT include them in the optimized code.\n\nUse native libraries if that would make it faster than pure Python.\n\nYour output should only consist of valid Python code. Output the resulting Python with brief explanations only included as comments prefaced with #. Include a detailed explanatory comment before the code, starting with the text \"# Proposed optimization:\". Make the code as clear and simple as possible, while also making it as fast and memory-efficient as possible. Use vectorized operations whenever it would substantially increase performance, and quantify the speedup in terms of orders of magnitude. Eliminate as many for loops, while loops, and list or dict comprehensions as possible, replacing them with vectorized equivalents. If the performance is not likely to increase, leave the code unchanged. Fix any errors in the optimized code.",
"You are ChatGPT, a large language model trained by OpenAI."
] |
2024-01-10 | RasaHQ/OpenAI_func_calling | actions~actions.py | import os
from typing import Any, Text, Dict, List
import pandas as pd
import requests
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import SlotSet
import openai
import json
class RestaurantAPI(object):
def __init__(self):
self.db = pd.read_csv("restaurants.csv")
def fetch_restaurants(self):
return self.db.head()
def format_restaurants(self, df, header=True) -> Text:
return df.to_csv(index=False, header=header)
class ChatGPT(object):
def __init__(self):
self.url = "https://api.openai.com/v1/chat/completions"
self.model = "gpt-3.5-turbo"
self.headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {os.getenv('OPENAI_API_KEY')}"
}
self.prompt = "Answer the following question, based on the data shown. " \
"Answer in a complete sentence and don't say anything else."
def ask(self, restaurants, question):
content = self.prompt + "\n\n" + restaurants + "\n\n" + question
body = {
"model":self.model,
"messages":[{"role": "user", "content": content}]
}
result = requests.post(
url=self.url,
headers=self.headers,
json=body,
)
return result.json()["choices"][0]["message"]["content"]
def ask_distance(restaurant_list):
content = "measure the least distance with each given restaurant" +'/n/n' + restaurant_list
completion = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=[{"role": "user", "content": content}],
functions=[
{
"name": "get_measure",
"description": "Get the least distance",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "list of all the restaurants and distances as a dictionary(restuarant_name:distance)",
},
},
"required": ["distance"],
},
}
],
function_call={"name":"get_measure"}
)
return completion.choices[0].message
restaurant_api = RestaurantAPI()
chatGPT = ChatGPT()
class ActionShowRestaurants(Action):
def name(self) -> Text:
return "action_show_restaurants"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
restaurants = restaurant_api.fetch_restaurants()
results = restaurant_api.format_restaurants(restaurants)
readable = restaurant_api.format_restaurants(restaurants[['Restaurants', 'Rating']], header=False)
dispatcher.utter_message(text=f"Here are some restaurants:\n\n{readable}")
return [SlotSet("results", results)]
def get_distance(d):
d = json.loads(d)
for i in d.keys():
d[i]= float(d[i])
t = min(d, key =d.get)
return t
class ActionRestaurantsDetail(Action):
def name(self) -> Text:
return "action_restaurants_detail"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
previous_results = tracker.get_slot("results")
question = tracker.latest_message["text"]
answer = chatGPT.ask(previous_results, question)
dispatcher.utter_message(text = answer)
class ActionRestaurantsDistance(Action):
def name(self) -> Text:
return "action_distance"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
previous_results = tracker.get_slot("results")
func_calling= ask_distance(previous_results)
reply_content = func_calling.to_dict()['function_call']['arguments']
distance = json.load(reply_content)['distance']
dispatcher.utter_message(text = get_distance(distance)) | [] |
2024-01-10 | diegomarzaa/gpt-tests | Hello.py | from openai import OpenAI
from dotenv import load_dotenv
import os
import streamlit as st
import base64
from PIL import Image
from io import BytesIO
import re
def encode_image(image_file):
"""Encode image to base64 string."""
if isinstance(image_file, str):
image = Image.open(image_file)
else:
image = Image.open(image_file)
# Convert RGBA images to RGB
if image.mode in ("RGBA", "P"):
image = image.convert("RGB")
buffered = BytesIO()
image.save(buffered, format="JPEG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def main():
# VARIABLES DE SESIÓN
if "usos_dev_key" not in st.session_state:
st.session_state.usos_dev_key = 1
if "api_key" not in st.session_state:
st.session_state.api_key = None
if "chat" not in st.session_state:
st.session_state.chat = None
# PAGE CONFIG
st.set_page_config(page_title="DESCRIPTOR DE IMÁGENES", page_icon=":robot_face:", layout="centered")
st.header("DESCRIPTOR DE IMÁGENES")
st.write("1 - Introduce la API KEY de OpenAI (si no tienes, escribe 'contraseña', de esta forma usarás la clave gratuita de Dieguito, pero solo puedes usarla 1 vez)"
"\n\n2 - Sube una imagen"
"\n\n3 - Si quieres, añade instrucciones personalizadas, esto no es necesario, por defecto se describirá la imagen."
"\n\n4 - Pulsa el botón de analizar imagen y espera a que se genere la descripción.\n\n\n")
################## API KEY ##################
load_dotenv()
secret_developer_key = os.getenv("OPENAI_API_KEY")
col1, col2, col3 = st.columns([3, 2, 2])
with col1:
input_key = st.text_input("API KEY", placeholder="Introduce la API key", type="password")
with col2:
# Move the botton down a bit
st.markdown("""<style>.css-1aumxhk {margin-top: 3rem;}</style>""", unsafe_allow_html=True)
st.markdown("""<style>.css-1aumxhk {margin-top: 2rem;}</style>""", unsafe_allow_html=True)
boton_key = st.button("Guardar API KEY")
if boton_key:
with col3:
success_message = st.empty()
st.markdown("""<style>.css-1aumxhk {margin-top: 3rem;}</style>""", unsafe_allow_html=True)
st.markdown("""<style>.css-1aumxhk {margin-top: 2rem;}</style>""", unsafe_allow_html=True)
if re.match(r"sk-[a-zA-Z0-9]+", input_key):
st.session_state["api_key"] = input_key
success_message.success("Clave cargada!")
try:
st.session_state["chat"] = OpenAI(api_key=st.session_state["api_key"])
except Exception as e:
success_message.error(f"Error: {e}")
elif input_key == "contraseña":
st.session_state["api_key"] = secret_developer_key
success_message.success("Clave gratuita cargada! (solo 1 uso)")
# Inicializar cliente de OpenAI
try:
st.session_state["chat"] = OpenAI(api_key=st.session_state["api_key"])
except Exception as e:
success_message.error(f"Error: {e}")
elif input_key == "":
success_message.error("No dejes el campo vacío bobo")
else:
success_message.error("Este tipo de clave es inválida!")
# UPLOAD IMAGE
uploaded_file = st.file_uploader("Sube una fotito", type=["png", "jpg", "jpeg"])
if uploaded_file:
# DISPLAY IMAGE
st.image(uploaded_file, width=250)
# Toggle details
show_details = st.toggle("Agregar instrucciones?", value=False)
if show_details:
# Texto de detalles
additional_details = st.text_area(
"Añade conexto adicional: ",
disabled=not show_details,
)
# Botón de enviar
analyze_button = st.button("Analizar imagen")
if uploaded_file and st.session_state['api_key'] != None and analyze_button and st.session_state['chat'] and st.session_state["usos_dev_key"] > 0:
print("Analizando imagen...")
# Restar uso de la clave
st.session_state["usos_dev_key"] -= 1
# Texto de carga
with st.spinner("Analizando imagen..."):
# Encode image
base64_image = encode_image(uploaded_file)
# Prompt optimizado + detalles extra
prompt_text = (
"Eres un analizador de imágenes."
"Tu tarea es analizar la imagen en gran detalle."
"Presenta tu análisis markdown, no uses los carácteres: ``` para rodear tu texto."
)
if show_details and additional_details:
prompt_text += (
f'\n\nContexto adicional:\n{additional_details}'
)
# Generar payload
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt_text},
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{base64_image}",
},
],
}
]
# Hacer solicitud a los servidores de OpenAI
try:
# Sin stream
# response = chat.chat.completions.create(
# model='gpt-4-vision-preview', messages=messages, max_tokens=100, stream=False
# )
# Con stream
full_response = ""
message_placeholder = st.empty()
for completion in st.session_state["chat"].chat.completions.create(
model='gpt-4-vision-preview', messages=messages, max_tokens=1200, stream=True
):
# Hay contenido?
if completion.choices[0].delta.content is not None:
full_response += completion.choices[0].delta.content
message_placeholder.markdown(full_response + " ")
# Mensaje final cuando se acaba el stream
message_placeholder.markdown(full_response)
# Poner respuesta en la app
# st.write(completion.choices[0].messages.content)
except Exception as e:
st.error(f"Error: {e}")
else:
if not uploaded_file and analyze_button:
st.warning("Sube una imagen!")
elif not st.session_state["api_key"] and analyze_button:
st.warning("Necesitas una API KEY de OpenAI para usar esta app!")
elif st.session_state["usos_dev_key"] <= 0 and analyze_button:
st.warning("Has usado la clave gratuita de Dieguito demasiadas veces!")
elif analyze_button:
st.warning("Error")
if __name__ == "__main__":
main() | [
"\n\nContexto adicional:\nPLACEHOLDER",
"Eres un analizador de imágenes.Tu tarea es analizar la imagen en gran detalle.Presenta tu análisis markdown, no uses los carácteres: ``` para rodear tu texto.",
"[{'type': 'text', 'text': PLACEHOLDER}, {'type': 'image_url', 'image_url': 'data:image/jpeg;base64,PLACEHOLDER'}]"
] |
2024-01-10 | shaiyon/SubredditBot | extra~sample.py | # Script from OpenAI: https://github.com/nshepperd/gpt-2/blob/finetuning/src/sample.py
import tensorflow as tf
import extra.model as model
def top_k_logits(logits, k):
if k == 0:
# no truncation
return logits
def _top_k():
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1, tf.newaxis]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
return tf.cond(
tf.equal(k, 0),
lambda: logits,
lambda: _top_k(),
)
def top_p_logits(logits, p):
with tf.variable_scope('top_p_logits'):
logits_sort = tf.sort(logits, direction='DESCENDING')
probs_sort = tf.nn.softmax(logits_sort)
probs_sums = tf.cumsum(probs_sort, axis=1, exclusive=True)
logits_masked = tf.where(probs_sums < p, logits_sort, tf.ones_like(logits_sort)*1000) # [batchsize, vocab]
min_logits = tf.reduce_min(logits_masked, axis=1, keepdims=True) # [batchsize, 1]
return tf.where(
logits < min_logits,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
def sample_sequence(*, hparams, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, top_p=0.0):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = tf.fill([batch_size, 1], start_token)
def step(hparams, tokens, past=None):
lm_output = model.model(hparams=hparams, X=tokens, past=past, reuse=tf.AUTO_REUSE)
logits = lm_output['logits'][:, :, :hparams.n_vocab]
presents = lm_output['present']
presents.set_shape(model.past_shape(hparams=hparams, batch_size=batch_size))
return {
'logits': logits,
'presents': presents,
}
with tf.name_scope('sample_sequence'):
# Don't feed the last context token -- leave that to the loop below
# TODO: Would be slightly faster if we called step on the entire context,
# rather than leaving the last token transformer calculation to the while loop.
context_output = step(hparams, context[:, :-1])
def body(past, prev, output):
next_outputs = step(hparams, prev[:, tf.newaxis], past=past)
logits = next_outputs['logits'][:, -1, :] / tf.to_float(temperature)
if top_p > 0.0:
logits = top_p_logits(logits, p=top_p)
else:
logits = top_k_logits(logits, k=top_k)
samples = tf.multinomial(logits, num_samples=1, output_dtype=tf.int32)
return [
tf.concat([past, next_outputs['presents']], axis=-2),
tf.squeeze(samples, axis=[1]),
tf.concat([output, samples], axis=1),
]
def cond(*args):
return True
_, _, tokens = tf.while_loop(
cond=cond, body=body,
maximum_iterations=length,
loop_vars=[
context_output['presents'],
context[:, -1],
context,
],
shape_invariants=[
tf.TensorShape(model.past_shape(hparams=hparams, batch_size=batch_size)),
tf.TensorShape([batch_size]),
tf.TensorShape([batch_size, None]),
],
back_prop=False,
)
return tokens | [] |
2024-01-10 | shaiyon/SubredditBot | extra~encoder.py | # Script from OpenAI: https://github.com/nshepperd/gpt-2/blob/finetuning/src/encoder.py
"""Byte pair encoding utilities"""
import os
import json
import regex as re
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def get_encoder(model_name):
with open(os.path.join('models', model_name, 'encoder.json'), 'r') as f:
encoder = json.load(f)
with open(os.path.join('models', model_name, 'vocab.bpe'), 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
) | [] |
2024-01-10 | shaiyon/SubredditBot | download_model.py | # Script from OpenAI on https://github.com/openai/gpt-2/blob/master/download_model.py
import os
import sys
import requests
from tqdm import tqdm
if len(sys.argv) != 2:
print('You must enter the model name as a parameter, e.g.: download_model.py 124M')
sys.exit(1)
model = sys.argv[1]
subdir = os.path.join('models', model)
if not os.path.exists(subdir):
os.makedirs(subdir)
subdir = subdir.replace('\\','/') # needed for Windows
for filename in ['checkpoint','encoder.json','hparams.json','model.ckpt.data-00000-of-00001', 'model.ckpt.index', 'model.ckpt.meta', 'vocab.bpe']:
r = requests.get("https://storage.googleapis.com/gpt-2/" + subdir + "/" + filename, stream=True)
with open(os.path.join(subdir, filename), 'wb') as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size) | [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.