_id
stringlengths
2
6
title
stringlengths
9
130
partition
stringclasses
3 values
text
stringlengths
66
10.5k
language
stringclasses
1 value
meta_information
dict
q2200
QuartzTorrent.Reactor.withReadFiber
train
def withReadFiber(ioInfo) if ioInfo.readFiber.nil? || ! ioInfo.readFiber.alive? ioInfo.readFiber = Fiber.new do |ioInfo| yield ioInfo.readFiberIoFacade end end # Allow handler to read some data. # This call will return either if: # 1. the handler needs more data but it isn't available yet, # 2. if it's read all the data it wanted to read for the current message it's building # 3. if a read error occurred. # # In case 2 the latter case the fiber will be dead. In cases 1 and 2, we should select on the socket # until data is ready. For case 3, the state of the ioInfo is set to error and the io should be # removed. ioInfo.readFiber.resume(ioInfo) if ioInfo.state == :error @currentHandlerCallback = :error @handler.error(ioInfo.metainfo, ioInfo.lastReadError) disposeIo(ioInfo) end end
ruby
{ "resource": "" }
q2201
QuartzTorrent.PeerManager.manageConnections
train
def manageConnections(classifiedPeers) n = classifiedPeers.handshakingPeers.size + classifiedPeers.establishedPeers.size if n < @targetActivePeerCount result = classifiedPeers.disconnectedPeers.shuffle.first(@targetActivePeerCount - n) @logger.debug "There are #{n} peers connected or in handshaking. Will establish #{result.size} more connections to peers." result else [] end end
ruby
{ "resource": "" }
q2202
QuartzTorrent.PeerManager.selectOptimisticPeer
train
def selectOptimisticPeer(classifiedPeers) # "at any one time there is a single peer which is unchoked regardless of its upload rate (if interested, it counts as one of the four allowed downloaders). Which peer is optimistically # unchoked rotates every 30 seconds. Newly connected peers are three times as likely to start as the current optimistic unchoke as anywhere else in the rotation. This gives them a decent chance # of getting a complete piece to upload." if !@lastOptimisticPeerChangeTime || (Time.new - @lastOptimisticPeerChangeTime > @optimisticPeerChangeDuration) list = [] classifiedPeers.establishedPeers.each do |peer| if (Time.new - peer.firstEstablishTime) < @newlyConnectedDuration 3.times{ list.push peer } else list.push peer end end @optimisticUnchokePeer = list[rand(list.size)] if @optimisticUnchokePeer @logger.info "Optimistically unchoked peer set to #{@optimisticUnchokePeer.trackerPeer}" @lastOptimisticPeerChangeTime = Time.new end end end
ruby
{ "resource": "" }
q2203
Rapidoc.ControllerExtractor.extract_blocks
train
def extract_blocks( lines ) init_doc_lines = lines.each_index.select{ |i| lines[i].include? "=begin" } end_doc_lines = lines.each_index.select{ |i| lines[i].include? "=end" } blocks = init_doc_lines.each_index.map do |i| { :init => init_doc_lines[i], :end => end_doc_lines[i] } end end
ruby
{ "resource": "" }
q2204
DoSnapshot.Command.create_snapshot
train
def create_snapshot(droplet) # rubocop:disable MethodLength,Metrics/AbcSize fail_if_shutdown(droplet) logger.info "Start creating snapshot for droplet id: #{droplet.id} name: #{droplet.name}." today = DateTime.now name = "#{droplet.name}_#{today.strftime('%Y_%m_%d')}" # noinspection RubyResolve snapshot_size = api.snapshots(droplet).size logger.debug 'Wait until snapshot will be created.' api.create_snapshot droplet.id, name snapshot_size += 1 logger.info "Snapshot name: #{name} created successfully." logger.info "Droplet id: #{droplet.id} name: #{droplet.name} snapshots: #{snapshot_size}." # Cleanup snapshots. cleanup_snapshots droplet, snapshot_size if clean rescue => e case e.class.to_s when 'DoSnapshot::SnapshotCleanupError' raise e.class, e.message, e.backtrace when 'DoSnapshot::DropletPowerError' return else raise SnapshotCreateError.new(droplet.id), e.message, e.backtrace end end
ruby
{ "resource": "" }
q2205
DoSnapshot.Command.dispatch_droplets
train
def dispatch_droplets droplets.each do |droplet| id = droplet.id.to_s next if exclude.include? id next unless only.empty? || only.include?(id) prepare_droplet id, droplet.name end end
ruby
{ "resource": "" }
q2206
DoSnapshot.Command.prepare_droplet
train
def prepare_droplet(id, name) logger.debug "Droplet id: #{id} name: #{name}\n" droplet = api.droplet id return unless droplet logger.info "Preparing droplet id: #{droplet.id} name: #{droplet.name} to take snapshot." return if too_much_snapshots?(droplet) processed_droplet_ids << droplet.id thread_runner(droplet) end
ruby
{ "resource": "" }
q2207
DoSnapshot.Command.cleanup_snapshots
train
def cleanup_snapshots(droplet, size) # rubocop:disable Metrics/AbcSize return unless size > keep warning_size(droplet.id, droplet.name, size) logger.debug "Cleaning up snapshots for droplet id: #{droplet.id} name: #{droplet.name}." api.cleanup_snapshots(droplet, size - keep - 1) rescue => e raise SnapshotCleanupError, e.message, e.backtrace end
ruby
{ "resource": "" }
q2208
Twilio.AvailablePhoneNumbers.search
train
def search(opts={}) iso_country_code = opts[:iso_country_code] || 'US' resource = opts.delete(:resource) params = { :AreaCode => opts[:area_code], :InPostalCode => opts[:postal_code], :InRegion => opts[:in_region], :Contains => opts[:contains], :NearLatLong => opts[:near_lat_long], :NearNumber => opts[:near_number], :InLata => opts[:in_lata], :InRateCenter => opts[:in_rate_center], :Distance => opts[:distance], :Page => opts[:page], :PageSize => opts[:page_size] }.reject {|k,v| v == nil} unless opts.empty? Twilio.get("/AvailablePhoneNumbers/#{iso_country_code}/#{resource}", :query => params) end
ruby
{ "resource": "" }
q2209
QuartzTorrent.PeerClientHandler.addTrackerClient
train
def addTrackerClient(infoHash, info, trackerclient) raise "There is already a tracker registered for torrent #{QuartzTorrent.bytesToHex(infoHash)}" if @torrentData.has_key? infoHash torrentData = TorrentData.new(infoHash, info, trackerclient) trackerclient.alarms = torrentData.alarms @torrentData[infoHash] = torrentData torrentData.info = info torrentData.state = :initializing queue(torrentData) dequeue torrentData end
ruby
{ "resource": "" }
q2210
QuartzTorrent.PeerClientHandler.removeTorrent
train
def removeTorrent(infoHash, deleteFiles = false) # Can't do this right now, since it could be in use by an event handler. Use an immediate, non-recurring timer instead. @logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Scheduling immediate timer to remove torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files." @reactor.scheduleTimer(0, [:removetorrent, infoHash, deleteFiles], false, true) end
ruby
{ "resource": "" }
q2211
QuartzTorrent.PeerClientHandler.setDownloadRateLimit
train
def setDownloadRateLimit(infoHash, bytesPerSecond) torrentData = @torrentData[infoHash] if ! torrentData @logger.warn "Asked to set download rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}" return end if bytesPerSecond if ! torrentData.downRateLimit torrentData.downRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0) else torrentData.downRateLimit.unitsPerSecond = bytesPerSecond end else torrentData.downRateLimit = nil end torrentData.peers.all.each do |peer| withPeersIo(peer, "setting download rate limit") do |io| io.readRateLimit = torrentData.downRateLimit end end end
ruby
{ "resource": "" }
q2212
QuartzTorrent.PeerClientHandler.setUploadRateLimit
train
def setUploadRateLimit(infoHash, bytesPerSecond) torrentData = @torrentData[infoHash] if ! torrentData @logger.warn "Asked to set upload rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}" return end if bytesPerSecond if ! torrentData.upRateLimit torrentData.upRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0) else torrentData.upRateLimit.unitsPerSecond = bytesPerSecond end else torrentData.upRateLimit = nil end torrentData.peers.all.each do |peer| withPeersIo(peer, "setting upload rate limit") do |io| io.writeRateLimit = torrentData.upRateLimit end end end
ruby
{ "resource": "" }
q2213
QuartzTorrent.PeerClientHandler.adjustBytesDownloaded
train
def adjustBytesDownloaded(infoHash, adjustment) torrentData = @torrentData[infoHash] if ! torrentData @logger.warn "Asked to adjust uploaded bytes for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}" return end runInReactorThread do torrentData.bytesDownloaded += adjustment torrentData.bytesDownloadedDataOnly += adjustment end end
ruby
{ "resource": "" }
q2214
QuartzTorrent.PeerClientHandler.updateDelegateTorrentData
train
def updateDelegateTorrentData(delegate) return if stopped? # Use an immediate, non-recurring timer. semaphore = Semaphore.new @reactor.scheduleTimer(0, [:update_torrent_data, delegate, semaphore], false, true) semaphore.wait result end
ruby
{ "resource": "" }
q2215
QuartzTorrent.PeerClientHandler.serverInit
train
def serverInit(metadata, addr, port) # A peer connected to us # Read handshake message @logger.warn "Peer connection from #{addr}:#{port}" begin msg = PeerHandshake.unserializeExceptPeerIdFrom currentIo rescue @logger.warn "Peer failed handshake: #{$!}" close return end torrentData = torrentDataForHandshake(msg, "#{addr}:#{port}") # Are we tracking this torrent? if !torrentData @logger.warn "Peer sent handshake for unknown torrent" close return end trackerclient = torrentData.trackerClient # If we already have too many connections, don't allow this connection. classifiedPeers = ClassifiedPeers.new torrentData.peers.all if classifiedPeers.establishedPeers.length > @targetActivePeerCount @logger.warn "Closing connection to peer from #{addr}:#{port} because we already have #{classifiedPeers.establishedPeers.length} active peers which is > the target count of #{@targetActivePeerCount} " close return end # Send handshake outgoing = PeerHandshake.new outgoing.peerId = trackerclient.peerId outgoing.infoHash = torrentData.infoHash outgoing.serializeTo currentIo # Send extended handshake if the peer supports extensions if (msg.reserved.unpack("C8")[5] & 0x10) != 0 @logger.warn "Peer supports extensions. Sending extended handshake" extended = Extension.createExtendedHandshake torrentData.info extended.serializeTo currentIo end # Read incoming handshake's peerid msg.peerId = currentIo.read(PeerHandshake::PeerIdLen) if msg.peerId == trackerclient.peerId @logger.info "We got a connection from ourself. Closing connection." close return end peer = nil peers = torrentData.peers.findById(msg.peerId) if peers peers.each do |existingPeer| if existingPeer.state != :disconnected @logger.warn "Peer with id #{msg.peerId} created a new connection when we already have a connection in state #{existingPeer.state}. Closing new connection." close return else if existingPeer.trackerPeer.ip == addr && existingPeer.trackerPeer.port == port peer = existingPeer end end end end if ! peer peer = Peer.new(TrackerPeer.new(addr, port)) updatePeerWithHandshakeInfo(torrentData, msg, peer) torrentData.peers.add peer if ! peers @logger.warn "Unknown peer with id #{msg.peerId} connected." else @logger.warn "Known peer with id #{msg.peerId} connected from new location." end else @logger.warn "Known peer with id #{msg.peerId} connected from known location." end @logger.info "Peer #{peer} connected to us. " peer.state = :established peer.amChoked = true peer.peerChoked = true peer.amInterested = false peer.peerInterested = false if torrentData.info peer.bitfield = Bitfield.new(torrentData.info.pieces.length) else peer.bitfield = EmptyBitfield.new @logger.info "We have no metainfo yet, so setting peer #{peer} to have an EmptyBitfield" end # Send bitfield sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState setMetaInfo(peer) setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit end
ruby
{ "resource": "" }
q2216
QuartzTorrent.PeerClientHandler.clientInit
train
def clientInit(peer) # We connected to a peer # Send handshake torrentData = @torrentData[peer.infoHash] if ! torrentData @logger.warn "No tracker client found for peer #{peer}. Closing connection." close return end trackerclient = torrentData.trackerClient @logger.info "Connected to peer #{peer}. Sending handshake." msg = PeerHandshake.new msg.peerId = trackerclient.peerId msg.infoHash = peer.infoHash msg.serializeTo currentIo peer.state = :handshaking @reactor.scheduleTimer(@handshakeTimeout, [:handshake_timeout, peer], false) @logger.debug "Done sending handshake." # Send bitfield sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit end
ruby
{ "resource": "" }
q2217
QuartzTorrent.PeerClientHandler.recvData
train
def recvData(peer) msg = nil @logger.debug "Got data from peer #{peer}" if peer.state == :handshaking # Read handshake message begin @logger.debug "Reading handshake from #{peer}" msg = PeerHandshake.unserializeFrom currentIo rescue @logger.warn "Peer #{peer} failed handshake: #{$!}" setPeerDisconnected(peer) close return end else begin @logger.debug "Reading wire-message from #{peer}" msg = peer.peerMsgSerializer.unserializeFrom currentIo #msg = PeerWireMessage.unserializeFrom currentIo rescue EOFError @logger.info "Peer #{peer} disconnected." setPeerDisconnected(peer) close return rescue @logger.warn "Unserializing message from peer #{peer} failed: #{$!}" @logger.warn $!.backtrace.join "\n" setPeerDisconnected(peer) close return end peer.updateUploadRate msg torrentData = @torrentData[peer.infoHash] torrentData.bytesDownloaded += msg.length if torrentData @logger.debug "Peer #{peer} upload rate: #{peer.uploadRate.value} data only: #{peer.uploadRateDataOnly.value}" end if msg.is_a? PeerHandshake # This is a remote peer that we connected to returning our handshake. processHandshake(msg, peer) peer.state = :established peer.amChoked = true peer.peerChoked = true peer.amInterested = false peer.peerInterested = false elsif msg.is_a? BitfieldMessage @logger.debug "Received bitfield message from peer." handleBitfield(msg, peer) elsif msg.is_a? Unchoke @logger.debug "Received unchoke message from peer." peer.amChoked = false elsif msg.is_a? Choke @logger.debug "Received choke message from peer." peer.amChoked = true elsif msg.is_a? Interested @logger.debug "Received interested message from peer." peer.peerInterested = true elsif msg.is_a? Uninterested @logger.debug "Received uninterested message from peer." peer.peerInterested = false elsif msg.is_a? Piece @logger.debug "Received piece message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.data.length}." handlePieceReceive(msg, peer) elsif msg.is_a? Request @logger.debug "Received request message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.blockLength}." handleRequest(msg, peer) elsif msg.is_a? Have @logger.debug "Received have message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex}" handleHave(msg, peer) elsif msg.is_a? KeepAlive @logger.debug "Received keep alive message from peer." elsif msg.is_a? ExtendedHandshake @logger.debug "Received extended handshake message from peer." handleExtendedHandshake(msg, peer) elsif msg.is_a? ExtendedMetaInfo @logger.debug "Received extended metainfo message from peer." handleExtendedMetainfo(msg, peer) else @logger.warn "Received a #{msg.class} message but handler is not implemented" end end
ruby
{ "resource": "" }
q2218
QuartzTorrent.PeerClientHandler.timerExpired
train
def timerExpired(metadata) if metadata.is_a?(Array) && metadata[0] == :manage_peers managePeers(metadata[1]) elsif metadata.is_a?(Array) && metadata[0] == :request_blocks requestBlocks(metadata[1]) elsif metadata.is_a?(Array) && metadata[0] == :check_piece_manager checkPieceManagerResults(metadata[1]) elsif metadata.is_a?(Array) && metadata[0] == :handshake_timeout handleHandshakeTimeout(metadata[1]) elsif metadata.is_a?(Array) && metadata[0] == :removetorrent handleRemoveTorrent(metadata[1], metadata[2]) elsif metadata.is_a?(Array) && metadata[0] == :pausetorrent handlePause(metadata[1], metadata[2]) elsif metadata.is_a?(Array) && metadata[0] == :get_torrent_data @torrentData.each do |k,v| begin if metadata[3].nil? || k == metadata[3] v = TorrentDataDelegate.new(v, self) metadata[1][k] = v end rescue @logger.error "Error building torrent data response for user: #{$!}" @logger.error "#{$!.backtrace.join("\n")}" end end metadata[2].signal elsif metadata.is_a?(Array) && metadata[0] == :update_torrent_data delegate = metadata[1] if ! @torrentData.has_key?(infoHash) delegate.state = :deleted else delegate.internalRefresh end metadata[2].signal elsif metadata.is_a?(Array) && metadata[0] == :request_metadata_pieces requestMetadataPieces(metadata[1]) elsif metadata.is_a?(Array) && metadata[0] == :check_metadata_piece_manager checkMetadataPieceManagerResults(metadata[1]) elsif metadata.is_a?(Array) && metadata[0] == :runproc metadata[1].call else @logger.info "Unknown timer #{metadata} expired." end end
ruby
{ "resource": "" }
q2219
QuartzTorrent.PeerClientHandler.requestMetadataPieces
train
def requestMetadataPieces(infoHash) torrentData = @torrentData[infoHash] if ! torrentData @logger.error "Request metadata pices: torrent data for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found." return end return if torrentData.paused || torrentData.queued # We may not have completed the extended handshake with the peer which specifies the torrent size. # In this case torrentData.metainfoPieceState is not yet set. return if ! torrentData.metainfoPieceState @logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Obtained all pieces of metainfo." if torrentData.metainfoPieceState.complete? pieces = torrentData.metainfoPieceState.findRequestablePieces classifiedPeers = ClassifiedPeers.new torrentData.peers.all peers = torrentData.metainfoPieceState.findRequestablePeers(classifiedPeers) if peers.size > 0 # For now, just request all pieces from the first peer. pieces.each do |pieceIndex| msg = ExtendedMetaInfo.new msg.msgType = :request msg.piece = pieceIndex withPeersIo(peers.first, "requesting metadata piece") do |io| sendMessageToPeer msg, io, peers.first torrentData.metainfoPieceState.setPieceRequested(pieceIndex, true) @logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Requesting metainfo piece from #{peers.first}: piece #{pieceIndex}" end end else @logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: No peers found that have metadata." end end
ruby
{ "resource": "" }
q2220
QuartzTorrent.PeerClientHandler.withPeersIo
train
def withPeersIo(peer, what = nil) io = findIoByMetainfo(peer) if io yield io else s = "" s = "when #{what}" if what @logger.warn "Couldn't find the io for peer #{peer} #{what}" end end
ruby
{ "resource": "" }
q2221
QuartzTorrent.PeerClientHandler.getPeersFromTracker
train
def getPeersFromTracker(torrentData, infoHash) addPeer = Proc.new do |trackerPeer| peer = Peer.new(trackerPeer) peer.infoHash = infoHash torrentData.peers.add peer true end classifiedPeers = nil replaceDisconnectedPeer = Proc.new do |trackerPeer| classifiedPeers = ClassifiedPeers.new(torrentData.peers.all) if ! classifiedPeers if classifiedPeers.disconnectedPeers.size > 0 torrentData.peers.delete classifiedPeers.disconnectedPeers.pop addPeer.call trackerPeer true else false end end trackerclient = torrentData.trackerClient addProc = addPeer flipped = false trackerclient.peers.each do |p| if ! flipped && torrentData.peers.size >= @maxPeerCount addProc = replaceDisconnectedPeer flipped = true end # Don't treat ourself as a peer. next if p.id && p.id == trackerclient.peerId if ! torrentData.peers.findByAddr(p.ip, p.port) @logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Adding tracker peer #{p} to peers list" break if ! addProc.call(p) end end end
ruby
{ "resource": "" }
q2222
QuartzTorrent.PeerClientHandler.handleRemoveTorrent
train
def handleRemoveTorrent(infoHash, deleteFiles) torrentData = @torrentData.delete infoHash if ! torrentData @logger.warn "Asked to remove a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}" return end @logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files." @logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.metainfoRequestTimer" if ! torrentData.metainfoRequestTimer @logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.managePeersTimer" if ! torrentData.managePeersTimer @logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkMetadataPieceManagerTimer" if ! torrentData.checkMetadataPieceManagerTimer @logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkPieceManagerTimer" if ! torrentData.checkPieceManagerTimer @logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.requestBlocksTimer" if ! torrentData.requestBlocksTimer # Stop all timers cancelTimer torrentData.metainfoRequestTimer if torrentData.metainfoRequestTimer cancelTimer torrentData.managePeersTimer if torrentData.managePeersTimer cancelTimer torrentData.checkMetadataPieceManagerTimer if torrentData.checkMetadataPieceManagerTimer cancelTimer torrentData.checkPieceManagerTimer if torrentData.checkPieceManagerTimer cancelTimer torrentData.requestBlocksTimer if torrentData.requestBlocksTimer torrentData.trackerClient.removePeersChangedListener(torrentData.peerChangeListener) # Remove all the peers for this torrent. torrentData.peers.all.each do |peer| if peer.state != :disconnected # Close socket withPeersIo(peer, "when removing torrent") do |io| setPeerDisconnected(peer) close(io) @logger.debug "Closing connection to peer #{peer}" end end torrentData.peers.delete peer end # Stop tracker client torrentData.trackerClient.stop if torrentData.trackerClient # Stop PieceManagers torrentData.pieceManager.stop if torrentData.pieceManager torrentData.metainfoPieceState.stop if torrentData.metainfoPieceState # Remove metainfo file if it exists begin torrentData.metainfoPieceState.remove if torrentData.metainfoPieceState rescue @logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting metainfo file for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}" end if deleteFiles if torrentData.info begin path = @baseDirectory + File::SEPARATOR + torrentData.info.name if File.exists? path FileUtils.rm_r path @logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleted #{path}" else @logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting '#{path}' for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}" end rescue @logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: When removing torrent, deleting '#{path}' failed because it doesn't exist" end end end dequeue end
ruby
{ "resource": "" }
q2223
QuartzTorrent.PeerClientHandler.handlePause
train
def handlePause(infoHash, value) torrentData = @torrentData[infoHash] if ! torrentData @logger.warn "Asked to pause a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}" return end return if torrentData.paused == value torrentData.paused = value if !value # On unpause, queue the torrent since there might not be room for it to run. # Make sure it goes to the head of the queue. queue(torrentData, :unshift) end setFrozen infoHash, value if ! torrentData.queued dequeue end
ruby
{ "resource": "" }
q2224
QuartzTorrent.PeerClientHandler.queue
train
def queue(torrentData, mode = :queue) return if torrentData.queued # Queue the torrent if mode == :unshift @torrentQueue.unshift torrentData else @torrentQueue.push torrentData end setFrozen torrentData, true if ! torrentData.paused end
ruby
{ "resource": "" }
q2225
QuartzTorrent.PeerClientHandler.dequeue
train
def dequeue torrents = @torrentQueue.dequeue(@torrentData.values) torrents.each do |torrentData| if torrentData.state == :initializing initTorrent torrentData else setFrozen torrentData, false if ! torrentData.paused end end end
ruby
{ "resource": "" }
q2226
QuartzTorrent.PeerClientHandler.setFrozen
train
def setFrozen(torrent, value) torrentData = torrent if ! torrent.is_a?(TorrentData) torrentData = @torrentData[torrent] if ! torrentData @logger.warn "Asked to freeze a non-existent torrent #{QuartzTorrent.bytesToHex(torrent)}" return end end if value # Disconnect from all peers so we won't reply to any messages. torrentData.peers.all.each do |peer| if peer.state != :disconnected # Close socket withPeersIo(peer, "when removing torrent") do |io| setPeerDisconnected(peer) close(io) end end torrentData.peers.delete peer end else # Get our list of peers and start connecting right away # Non-recurring and immediate timer torrentData.managePeersTimer = @reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], false, true) end end
ruby
{ "resource": "" }
q2227
QuartzTorrent.PeerClient.addTorrentByMetainfo
train
def addTorrentByMetainfo(metainfo) raise "addTorrentByMetainfo should be called with a Metainfo object, not #{metainfo.class}" if ! metainfo.is_a?(Metainfo) trackerclient = TrackerClient.createFromMetainfo(metainfo, false) addTorrent(trackerclient, metainfo.infoHash, metainfo.info) end
ruby
{ "resource": "" }
q2228
QuartzTorrent.PeerClient.addTorrentWithoutMetainfo
train
def addTorrentWithoutMetainfo(announceUrl, infoHash, magnet = nil) raise "addTorrentWithoutMetainfo should be called with a Magnet object, not a #{magnet.class}" if magnet && ! magnet.is_a?(MagnetURI) trackerclient = TrackerClient.create(announceUrl, infoHash, 0, false) addTorrent(trackerclient, infoHash, nil, magnet) end
ruby
{ "resource": "" }
q2229
QuartzTorrent.PeerClient.addTorrentByMagnetURI
train
def addTorrentByMagnetURI(magnet) raise "addTorrentByMagnetURI should be called with a MagnetURI object, not a #{magnet.class}" if ! magnet.is_a?(MagnetURI) trackerUrl = magnet.trackers raise "addTorrentByMagnetURI can't handle magnet links that don't have a tracker URL." if !trackerUrl addTorrentWithoutMetainfo(trackerUrl, magnet.btInfoHash, magnet) end
ruby
{ "resource": "" }
q2230
QuartzTorrent.PeerClient.adjustBytesDownloaded
train
def adjustBytesDownloaded(infoHash, adjustment) return if ! adjustment raise "Bytes downloaded adjustment must be an Integer, not a #{adjustment.class}" if !adjustment.is_a?(Integer) @handler.adjustBytesDownloaded(infoHash, adjustment) end
ruby
{ "resource": "" }
q2231
QuartzTorrent.PeerClient.addTorrent
train
def addTorrent(trackerclient, infoHash, info, magnet = nil) trackerclient.port = @port torrentData = @handler.addTrackerClient(infoHash, info, trackerclient) torrentData.magnet = magnet trackerclient.dynamicRequestParamsBuilder = Proc.new do torrentData = @handler.torrentData[infoHash] dataLength = (info ? info.dataLength : nil) result = TrackerDynamicRequestParams.new(dataLength) if torrentData && torrentData.blockState result.left = torrentData.blockState.totalLength - torrentData.blockState.completedLength result.downloaded = torrentData.bytesDownloadedDataOnly result.uploaded = torrentData.bytesUploadedDataOnly end result end # If we haven't started yet then add this trackerclient to a queue of # trackerclients to start once we are started. If we start too soon we # will connect to the tracker, and it will try to connect back to us before we are listening. if ! trackerclient.started? if @stopped @toStart.push trackerclient else trackerclient.start end end torrentData.infoHash end
ruby
{ "resource": "" }
q2232
Rapidoc.Config.target_dir
train
def target_dir( f = nil ) if File.exists?( config_file_path ) form_file_name( target_dir_from_config, f ) else form_file_name( File.join( ::Rails.root.to_s, 'public/docs' ), f ) end end
ruby
{ "resource": "" }
q2233
Rapidoc.Config.examples_dir
train
def examples_dir( f = nil ) if File.exists?( config_file_path ) form_file_name( examples_dir_from_config_file, f ) else form_file_name( config_dir( '/examples' ), f ) end end
ruby
{ "resource": "" }
q2234
Twilio.Verb.say
train
def say(*args) options = {:voice => 'man', :language => 'en', :loop => 1} args.each do |arg| case arg when String options[:text_to_speak] = arg when Hash options.merge!(arg) else raise ArgumentError, 'say expects String or Hash argument' end end output { if options[:pause] loop_with_pause(options[:loop], @xml) do @xml.Say(options[:text_to_speak], :voice => options[:voice], :language => options[:language]) end else @xml.Say(options[:text_to_speak], :voice => options[:voice], :language => options[:language], :loop => options[:loop]) end } end
ruby
{ "resource": "" }
q2235
Twilio.Verb.gather
train
def gather(*args, &block) options = args.shift || {} output { if block_given? @xml.Gather(options) { block.call} else @xml.Gather(options) end } end
ruby
{ "resource": "" }
q2236
Twilio.Verb.dial
train
def dial(*args, &block) number_to_dial = '' options = {} args.each do |arg| case arg when String number_to_dial = arg when Hash options.merge!(arg) else raise ArgumentError, 'dial expects String or Hash argument' end end output { if block_given? @xml.Dial(options) { block.call } else @xml.Dial(number_to_dial, options) end } end
ruby
{ "resource": "" }
q2237
Rapidoc.ResourceDoc.generate_info
train
def generate_info( routes_info ) if routes_info extractor = get_controller_extractor @description = extractor.get_resource_info['description'] if extractor @actions_doc = get_actions_doc( routes_info, extractor ) # template need that description will be an array @description = [ @description ] unless @description.class == Array end end
ruby
{ "resource": "" }
q2238
QuartzTorrent.PieceMapper.findBlock
train
def findBlock(pieceIndex, offset, length) leftOffset = @pieceSize*pieceIndex + offset rightOffset = leftOffset + length-1 findPart(leftOffset, rightOffset) end
ruby
{ "resource": "" }
q2239
QuartzTorrent.PieceIO.writeBlock
train
def writeBlock(pieceIndex, offset, block) regions = @pieceMapper.findBlock(pieceIndex, offset, block.length) indexInBlock = 0 regions.each do |region| # Get the IO for the file with path 'path'. If we are being used in a reactor, this is the IO facade. If we # are not then this is a real IO. io = @ioManager.get(region.path) if ! io # No IO for this file. raise "This process doesn't have write permission for the file #{region.path}" if File.exists?(region.path) && ! File.writable?(region.path) # Ensure parent directories exist. dir = File.dirname region.path FileUtils.mkdir_p dir if ! File.directory?(dir) begin io = @ioManager.open(region.path) rescue @logger.error "Opening file #{region.path} failed: #{$!}" raise "Opening file #{region.path} failed" end end io.seek region.offset, IO::SEEK_SET begin io.write(block[indexInBlock, region.length]) indexInBlock += region.length rescue # Error when writing... @logger.error "Writing block to file #{region.path} failed: #{$!}" piece = nil break end break if indexInBlock >= block.length end end
ruby
{ "resource": "" }
q2240
QuartzTorrent.PieceIO.readRegions
train
def readRegions(regions) piece = "" regions.each do |region| # Get the IO for the file with path 'path'. If we are being used in a reactor, this is the IO facade. If we # are not then this is a real IO. io = @ioManager.get(region.path) if ! io # No IO for this file. if ! File.exists?(region.path) # This file hasn't been created yet by having blocks written to it. piece = nil break end raise "This process doesn't have read permission for the file #{region.path}" if ! File.readable?(region.path) begin io = @ioManager.open(region.path) rescue @logger.error "Opening file #{region.path} failed: #{$!}" raise "Opening file #{region.path} failed" end end io.seek region.offset, IO::SEEK_SET begin piece << io.read(region.length) rescue # Error when reading. Likely EOF, meaning this peice isn't all there yet. piece = nil break end end piece end
ruby
{ "resource": "" }
q2241
QuartzTorrent.PieceManager.readBlock
train
def readBlock(pieceIndex, offset, length) id = returnAndIncrRequestId return id if @state == :after_stop @requests.push [id, :read_block, pieceIndex, offset, length] @requestsSemaphore.signal id end
ruby
{ "resource": "" }
q2242
QuartzTorrent.PieceManager.writeBlock
train
def writeBlock(pieceIndex, offset, block) id = returnAndIncrRequestId return id if @state == :after_stop @requests.push [id, :write_block, pieceIndex, offset, block] @requestsSemaphore.signal id end
ruby
{ "resource": "" }
q2243
QuartzTorrent.PieceManager.readPiece
train
def readPiece(pieceIndex) id = returnAndIncrRequestId return id if @state == :after_stop @requests.push [id, :read_piece, pieceIndex] @requestsSemaphore.signal id end
ruby
{ "resource": "" }
q2244
QuartzTorrent.PieceManager.checkPieceHash
train
def checkPieceHash(pieceIndex) id = returnAndIncrRequestId return id if @state == :after_stop @requests.push [id, :hash_piece, pieceIndex] @requestsSemaphore.signal id end
ruby
{ "resource": "" }
q2245
Halite.Gem.license_header
train
def license_header IO.readlines(spec_file).take_while { |line| line.strip.empty? || line.strip.start_with?('#') }.join('') end
ruby
{ "resource": "" }
q2246
Halite.Gem.issues_url
train
def issues_url if spec.metadata['issues_url'] spec.metadata['issues_url'] elsif spec.homepage =~ /^http(s)?:\/\/(www\.)?github\.com/ spec.homepage.chomp('/') + '/issues' end end
ruby
{ "resource": "" }
q2247
Halite.Gem.platforms
train
def platforms raw_platforms = spec.metadata.fetch('platforms', '').strip case raw_platforms when '' [] when 'any', 'all', '*' # Based on `ls lib/fauxhai/platforms | xargs echo`. %w{aix amazon arch centos chefspec debian dragonfly4 fedora freebsd gentoo ios_xr mac_os_x nexus omnios openbsd opensuse oracle raspbian redhat slackware smartos solaris2 suse ubuntu windows}.map {|p| [p] } when /,/ # Comma split mode. String looks like "name, name constraint, name constraint" raw_platforms.split(/\s*,\s*/).map {|p| p.split(/\s+/, 2) } else # Whitepace split mode, assume no constraints. raw_platforms.split(/\s+/).map {|p| [p] } end end
ruby
{ "resource": "" }
q2248
Halite.Gem.find_misc_path
train
def find_misc_path(name) [name, name.upcase, name.downcase].each do |base| ['.md', '', '.txt', '.html'].each do |suffix| path = File.join(spec.full_gem_path, base+suffix) return path if File.exist?(path) && Dir.entries(File.dirname(path)).include?(File.basename(path)) end end # Didn't find anything nil end
ruby
{ "resource": "" }
q2249
Halite.Gem.dependency_to_spec
train
def dependency_to_spec(dep) # #to_spec doesn't allow prereleases unless the requirement is # for a prerelease. Just use the last valid spec if possible. spec = dep.to_spec || dep.to_specs.last raise Error.new("Cannot find a gem to satisfy #{dep}") unless spec spec rescue ::Gem::LoadError => ex raise Error.new("Cannot find a gem to satisfy #{dep}: #{ex}") end
ruby
{ "resource": "" }
q2250
RRD.Base.resize
train
def resize(rra_num, options) info = self.info step = info["step"] rra_step = info["rra[#{rra_num}].pdp_per_row"] action = options.keys.first.to_s.upcase delta = (options.values.first / (step * rra_step)).to_i # Force an integer Wrapper.resize(rrd_file, rra_num.to_s, action, delta.to_s) end
ruby
{ "resource": "" }
q2251
Berkshelf.GemLocation.install
train
def install cache_path.rmtree if cache_path.exist? cache_path.mkpath Halite.convert(gem_name, cache_path) validate_cached!(cache_path) end
ruby
{ "resource": "" }
q2252
QuartzTorrent.PeerHolder.add
train
def add(peer) raise "Peer must have it's infoHash set." if ! peer.infoHash # Do not add if peer is already present by address if @peersByAddr.has_key?(byAddrKey(peer)) @log.debug "Not adding peer #{peer} since it already exists by #{@peersById.has_key?(peer.trackerPeer.id) ? "id" : "addr"}." return end if peer.trackerPeer.id @peersById.pushToList(peer.trackerPeer.id, peer) # If id is null, this is probably a peer received from the tracker that has no ID. end @peersByAddr[byAddrKey(peer)] = peer @peersByInfoHash.pushToList(peer.infoHash, peer) end
ruby
{ "resource": "" }
q2253
QuartzTorrent.PeerHolder.idSet
train
def idSet(peer) @peersById.each do |e| return if e.eql?(peer) end @peersById.pushToList(peer.trackerPeer.id, peer) end
ruby
{ "resource": "" }
q2254
QuartzTorrent.PeerHolder.delete
train
def delete(peer) @peersByAddr.delete byAddrKey(peer) list = @peersByInfoHash[peer.infoHash] if list list.collect! do |p| if !p.eql?(peer) peer else nil end end list.compact! end if peer.trackerPeer.id list = @peersById[peer.trackerPeer.id] if list list.collect! do |p| if !p.eql?(peer) peer else nil end end list.compact! end end end
ruby
{ "resource": "" }
q2255
QuartzTorrent.PeerHolder.to_s
train
def to_s(infoHash = nil) def makeFlags(peer) s = "[" s << "c" if peer.amChoked s << "i" if peer.peerInterested s << "C" if peer.peerChoked s << "I" if peer.amInterested s << "]" s end if infoHash s = "Peers: \n" peers = @peersByInfoHash[infoHash] if peers peers.each do |peer| s << " #{peer.to_s} #{makeFlags(peer)}\n" end end else "PeerHolder" end s end
ruby
{ "resource": "" }
q2256
QuartzTorrent.MagnetURI.btInfoHash
train
def btInfoHash result = nil @params['xt'].each do |topic| if topic =~ /urn:btih:(.*)/ hash = $1 if hash.length == 40 # Hex-encoded info hash. Convert to binary. result = [hash].pack "H*" else # Base32 encoded result = Base32.decode hash end break end end result end
ruby
{ "resource": "" }
q2257
Rapidoc.ResourcesExtractor.get_routes_doc
train
def get_routes_doc puts "Executing 'rake routes'..." if trace? routes_doc = RoutesDoc.new routes = Dir.chdir( ::Rails.root.to_s ) { `rake routes` } routes.split("\n").each do |entry| routes_doc.add_route( entry ) unless entry.match(/URI/) end routes_doc end
ruby
{ "resource": "" }
q2258
Rapidoc.ResourcesExtractor.get_resources
train
def get_resources routes_doc = get_routes_doc resources_names = routes_doc.get_resources_names - resources_black_list resources_names.map do |resource| puts "Generating #{resource} documentation..." if trace? ResourceDoc.new( resource, routes_doc.get_actions_route_info( resource ) ) end end
ruby
{ "resource": "" }
q2259
Cleanroom.ClassMethods.evaluate_file
train
def evaluate_file(instance, filepath) absolute_path = File.expand_path(filepath) file_contents = IO.read(absolute_path) evaluate(instance, file_contents, absolute_path, 1) end
ruby
{ "resource": "" }
q2260
Cleanroom.ClassMethods.evaluate
train
def evaluate(instance, *args, &block) cleanroom.new(instance).instance_eval(*args, &block) end
ruby
{ "resource": "" }
q2261
Cleanroom.ClassMethods.cleanroom
train
def cleanroom exposed = exposed_methods.keys parent = self.name || 'Anonymous' Class.new(Object) do class << self def class_eval raise Cleanroom::InaccessibleError.new(:class_eval, self) end def instance_eval raise Cleanroom::InaccessibleError.new(:instance_eval, self) end end define_method(:initialize) do |instance| define_singleton_method(:__instance__) do unless caller[0].include?(__FILE__) raise Cleanroom::InaccessibleError.new(:__instance__, self) end instance end end exposed.each do |exposed_method| define_method(exposed_method) do |*args, &block| __instance__.public_send(exposed_method, *args, &block) end end define_method(:class_eval) do raise Cleanroom::InaccessibleError.new(:class_eval, self) end define_method(:inspect) do "#<#{parent} (Cleanroom)>" end alias_method :to_s, :inspect end end
ruby
{ "resource": "" }
q2262
Rapidoc.RoutesDoc.add_resource_route
train
def add_resource_route( method, url, controller_action ) #resource = get_resource_name( url ) resource = controller_action.split('#').first info = { resource: resource, action: controller_action.split('#').last, method: method, url: url , controller: controller_action.split('#').first } @resources_routes[resource.to_sym] ||= [] @resources_routes[resource.to_sym].push( info ) end
ruby
{ "resource": "" }
q2263
Rapidoc.RoutesDoc.get_resource_name
train
def get_resource_name( url ) new_url = url.gsub( '(.:format)', '' ) return $1 if new_url =~ /\/(\w+)\/:id$/ # /users/:id (users) return $1 if new_url =~ /\/(\w+)\/:id\/edit$/ # /users/:id/edit (users) return $1 if new_url =~ /^\/(\w+)$/ # /users (users) return $1 if new_url =~ /\/:\w*id\/(\w+)$/ # /users/:id/images (images) return $1 if new_url =~ /\/:\w*id\/(\w+)\/\w+$/ # /users/:id/config/edit (users) return $1 if new_url =~ /^\/(\w+)\/\w+$/ # /users/edit (users) return $1 if new_url =~ /\/(\w+)\/\w+\/\w+$/ # /users/password/edit (users) return url end
ruby
{ "resource": "" }
q2264
QuartzTorrent.PeerWireMessageSerializer.classForMessage
train
def classForMessage(id, payload) if @@classForMessage.nil? @@classForMessage = [Choke, Unchoke, Interested, Uninterested, Have, BitfieldMessage, Request, Piece, Cancel] @@classForMessage[20] = Extended end if @@classForExtendedMessage.nil? @@classForExtendedMessage = [] @@classForExtendedMessage[Extension::MetadataExtensionId] = ExtendedMetaInfo end result = @@classForMessage[id] if result == Extended && payload # Extended messages have further subtypes. extendedMsgId = payload.unpack("C")[0] if extendedMsgId == 0 result = ExtendedHandshake else # In this case the extended message number is the one we told our peers to use, not the one the peer told us. result = @@classForExtendedMessage[extendedMsgId] raise "Unsupported extended peer message id '#{extendedMsgId}'" if ! result end end result end
ruby
{ "resource": "" }
q2265
QuartzTorrent.BlockInfo.getRequest
train
def getRequest m = Request.new m.pieceIndex = @pieceIndex m.blockOffset = @offset m.blockLength = @length m end
ruby
{ "resource": "" }
q2266
QuartzTorrent.BlockState.findRequestableBlocks
train
def findRequestableBlocks(classifiedPeers, numToReturn = nil) # Have a list of the current pieces we are working on. Each time this method is # called, check the blocks in the pieces in list order to find the blocks to return # for requesting. If a piece is completed, remove it from this list. If we need more blocks # than there are available in the list, add more pieces to the end of the list (in rarest-first # order). result = [] # Update requestable peers to only be those that we can still request pieces from. peersHavingPiece = computePeersHavingPiece(classifiedPeers) requestable = @completeBlocks.union(@requestedBlocks).compliment! rarityOrder = nil currentPiece = 0 while true if currentPiece >= @currentPieces.length # Add more pieces in rarest-first order. If there are no more pieces, break. rarityOrder = computeRarity(classifiedPeers) if ! rarityOrder added = false rarityOrder.each do |pair| pieceIndex = pair[1] peersWithPiece = peersHavingPiece[pieceIndex] if peersWithPiece && peersWithPiece.size > 0 && [email protected](pieceIndex) && ! pieceCompleted?(pieceIndex) @logger.debug "Adding piece #{pieceIndex} to the current downloading list" @currentPieces.push pieceIndex added = true break end end if ! added @logger.debug "There are no more pieces to add to the current downloading list" break end end currentPieceIndex = @currentPieces[currentPiece] if pieceCompleted?(currentPieceIndex) @logger.debug "Piece #{currentPieceIndex} complete so removing it from the current downloading list" @currentPieces.delete_at(currentPiece) next end peersWithPiece = peersHavingPiece[currentPieceIndex] if !peersWithPiece || peersWithPiece.size == 0 @logger.debug "No peers have piece #{currentPieceIndex}" currentPiece += 1 next end eachBlockInPiece(currentPieceIndex) do |blockIndex| if requestable.set?(blockIndex) result.push createBlockinfoByPieceAndBlockIndex(currentPieceIndex, peersWithPiece, blockIndex) break if numToReturn && result.size >= numToReturn end end break if numToReturn && result.size >= numToReturn currentPiece += 1 end result end
ruby
{ "resource": "" }
q2267
QuartzTorrent.BlockState.setBlockRequested
train
def setBlockRequested(blockInfo, bool) if bool @requestedBlocks.set blockInfo.blockIndex else @requestedBlocks.clear blockInfo.blockIndex end end
ruby
{ "resource": "" }
q2268
QuartzTorrent.BlockState.setPieceCompleted
train
def setPieceCompleted(pieceIndex, bool) eachBlockInPiece(pieceIndex) do |blockIndex| if bool @completeBlocks.set blockIndex else @completeBlocks.clear blockIndex end end if bool @completePieces.set pieceIndex else @completePieces.clear pieceIndex end end
ruby
{ "resource": "" }
q2269
QuartzTorrent.BlockState.completedLength
train
def completedLength num = @completeBlocks.countSet # Last block may be smaller extra = 0 if @completeBlocks.set?(@completeBlocks.length-1) num -= 1 extra = @lastBlockLength end num*@blockSize + extra end
ruby
{ "resource": "" }
q2270
QuartzTorrent.BlockState.createBlockinfoByPieceAndBlockIndex
train
def createBlockinfoByPieceAndBlockIndex(pieceIndex, peersWithPiece, blockIndex) # If this is the very last block, then it might be smaller than the rest. blockSize = @blockSize blockSize = @lastBlockLength if blockIndex == @numBlocks-1 offsetWithinPiece = (blockIndex % @blocksPerPiece)*@blockSize BlockInfo.new(pieceIndex, offsetWithinPiece, blockSize, peersWithPiece, blockIndex) end
ruby
{ "resource": "" }
q2271
QuartzTorrent.BlockState.computePeersHavingPiece
train
def computePeersHavingPiece(classifiedPeers) # Make a list of each peer having the specified piece peersHavingPiece = Array.new(@numPieces) # This first list represents rarity by number if peers having that piece. 1 = rarest. classifiedPeers.requestablePeers.each do |peer| @numPieces.times do |i| if peer.bitfield.set?(i) if peersHavingPiece[i] peersHavingPiece[i].push peer else peersHavingPiece[i] = [peer] end end end end peersHavingPiece end
ruby
{ "resource": "" }
q2272
QuartzTorrent.RegionMap.findValue
train
def findValue(value) if ! @sorted @map.sort{ |a,b| a[0] <=> b[0] } @sorted = true end @map.binsearch{|x| x[0] >= value}[1] end
ruby
{ "resource": "" }
q2273
Rapidoc.YamlParser.extract_resource_info
train
def extract_resource_info( lines, blocks, file_name ) blocks ? info = [] : blocks = [] blocks.each.map do |b| if lines[ b[:init] ].include? "=begin resource" n_lines = b[:end] - b[:init] - 1 begin info.push YAML.load( lines[ b[:init] +1, n_lines ].join.gsub(/\ *#/, '') ) rescue Psych::SyntaxError => e puts "Error parsing block in #{file_name} file [#{b[:init]} - #{b[:end]}]" rescue => e puts e end end end info.first ? info.first : {} end
ruby
{ "resource": "" }
q2274
Rapidoc.YamlParser.extract_actions_info
train
def extract_actions_info( lines, blocks, file_name ) info = [] blocks = [] unless blocks blocks.each do |b| if lines[ b[:init] ].include? "=begin action" n_lines = b[:end] - b[:init] - 1 begin info << YAML.load( lines[ b[:init] + 1, n_lines ].join.gsub(/\ *#/, '') ) rescue Exception => e puts "Error parsing block in #{file_name} file [#{b[:init]} - #{b[:end]}]" end end end return info end
ruby
{ "resource": "" }
q2275
Twilio.IncomingPhoneNumber.create
train
def create(opts) raise "You must set either :PhoneNumber or :AreaCode" if !opts.include?(:AreaCode) && !opts.include?(:PhoneNumber) Twilio.post("/IncomingPhoneNumbers", :body => opts) end
ruby
{ "resource": "" }
q2276
QuartzTorrent.MemProfiler.getCounts
train
def getCounts result = {} @classes.each do |c| count = 0 ObjectSpace.each_object(c){ count += 1 } result[c] = count end result end
ruby
{ "resource": "" }
q2277
I18nDocs.MissingKeysFinder.all_keys
train
def all_keys I18n.backend.send(:translations).collect do |_check_locale, translations| collect_keys([], translations).sort end.flatten.uniq end
ruby
{ "resource": "" }
q2278
I18nDocs.MissingKeysFinder.key_exists?
train
def key_exists?(key, locale) I18n.locale = locale I18n.translate(key, raise: true) return true rescue I18n::MissingInterpolationArgument return true rescue I18n::MissingTranslationData return false end
ruby
{ "resource": "" }
q2279
QuartzTorrent.PeerHandshake.serializeTo
train
def serializeTo(io) raise "PeerId is not set" if ! @peerId raise "InfoHash is not set" if ! @infoHash result = [ProtocolName.length].pack("C") result << ProtocolName result << [0,0,0,0,0,0x10,0,0].pack("C8") # Reserved. 0x10 means we support extensions (BEP 10). result << @infoHash result << @peerId io.write result end
ruby
{ "resource": "" }
q2280
QuartzTorrent.TrackerClient.start
train
def start @stopped = false return if @started @started = true @worker = Thread.new do QuartzTorrent.initThread("trackerclient") @logger.info "Worker thread starting" @event = :started trackerInterval = nil while ! @stopped begin response = nil driver = currentDriver if driver begin @logger.debug "Sending request to tracker #{currentAnnounceUrl}" response = driver.request(@event) @event = nil trackerInterval = response.interval rescue addError $! @logger.info "Request failed due to exception: #{$!}" @logger.debug $!.backtrace.join("\n") changeToNextTracker next @alarms.raise Alarm.new(:tracker, "Tracker request failed: #{$!}") if @alarms end end if response && response.successful? @alarms.clear :tracker if @alarms # Replace the list of peers peersHash = {} @logger.info "Response contained #{response.peers.length} peers" if response.peers.length == 0 @alarms.raise Alarm.new(:tracker, "Response from tracker contained no peers") if @alarms end response.peers.each do |p| peersHash[p] = 1 end @peersMutex.synchronize do @peers = peersHash end if @peersChangedListeners.size > 0 @peersChangedListeners.each{ |l| l.call } end else @logger.info "Response was unsuccessful from tracker: #{response.error}" addError response.error if response @alarms.raise Alarm.new(:tracker, "Unsuccessful response from tracker: #{response.error}") if @alarms && response changeToNextTracker next end # If we have no interval from the tracker yet, and the last request didn't error out leaving us with no peers, # then set the interval to 20 seconds. interval = trackerInterval interval = 20 if ! interval interval = 2 if response && !response.successful? && @peers.length == 0 @logger.debug "Sleeping for #{interval} seconds" @sleeper.sleep interval rescue @logger.warn "Unhandled exception in worker thread: #{$!}" @logger.warn $!.backtrace.join("\n") @sleeper.sleep 1 end end @logger.info "Worker thread shutting down" @logger.info "Sending final update to tracker" begin driver = currentDriver driver.request(:stopped) if driver rescue addError $! @logger.debug "Request failed due to exception: #{$!}" @logger.debug $!.backtrace.join("\n") end @started = false end end
ruby
{ "resource": "" }
q2281
QuartzTorrent.Bitfield.length=
train
def length=(l) byteLen = 0 byteLen = (l-1)/8+1 if l > 0 raise "Length adjustment would change size of underlying array" if byteLen != byteLength @length = l end
ruby
{ "resource": "" }
q2282
QuartzTorrent.Bitfield.set
train
def set(bit) quotient = bit >> 3 remainder = bit & 0x7 mask = 0x80 >> remainder raise "Bit #{bit} out of range of bitfield with length #{length}" if quotient >= @data.length @data[quotient] |= mask end
ruby
{ "resource": "" }
q2283
QuartzTorrent.Bitfield.clear
train
def clear(bit) quotient = bit >> 3 remainder = bit & 0x7 mask = ~(0x80 >> remainder) raise "Bit #{bit} out of range of bitfield with length #{length}" if quotient >= @data.length @data[quotient] &= mask end
ruby
{ "resource": "" }
q2284
QuartzTorrent.Bitfield.allSet?
train
def allSet? # Check all but last byte quickly (@data.length-1).times do |i| return false if @data[i] != 0xff end # Check last byte slowly toCheck = @length % 8 toCheck = 8 if toCheck == 0 ((@length-toCheck)..(@length-1)).each do |i| return false if ! set?(i) end true end
ruby
{ "resource": "" }
q2285
QuartzTorrent.Bitfield.union
train
def union(bitfield) raise "That's not a bitfield" if ! bitfield.is_a?(Bitfield) raise "bitfield lengths must be equal" if ! bitfield.length == length result = Bitfield.new(length) (@data.length).times do |i| result.data[i] = @data[i] | bitfield.data[i] end result end
ruby
{ "resource": "" }
q2286
QuartzTorrent.Bitfield.intersection
train
def intersection(bitfield) raise "That's not a bitfield" if ! bitfield.is_a?(Bitfield) raise "bitfield lengths must be equal" if ! bitfield.length == length newbitfield = Bitfield.new(length) newbitfield.copyFrom(self) newbitfield.intersection!(bitfield) end
ruby
{ "resource": "" }
q2287
QuartzTorrent.Bitfield.intersection!
train
def intersection!(bitfield) raise "That's not a bitfield" if ! bitfield.is_a?(Bitfield) raise "bitfield lengths must be equal" if ! bitfield.length == length (@data.length).times do |i| @data[i] = @data[i] & bitfield.data[i] end self end
ruby
{ "resource": "" }
q2288
QuartzTorrent.Bitfield.copyFrom
train
def copyFrom(bitfield) raise "Source bitfield is too small (#{bitfield.length} < #{length})" if bitfield.length < length (@data.length).times do |i| @data[i] = bitfield.data[i] end end
ruby
{ "resource": "" }
q2289
QuartzTorrent.Bitfield.to_s
train
def to_s(groupsOf = 8) groupsOf = 8 if groupsOf == 0 s = "" length.times do |i| s << (set?(i) ? "1" : "0") s << " " if i % groupsOf == 0 end s end
ruby
{ "resource": "" }
q2290
QuartzTorrent.Peer.updateUploadRate
train
def updateUploadRate(msg) @uploadRate.update msg.length if msg.is_a? Piece @uploadRateDataOnly.update msg.data.length end end
ruby
{ "resource": "" }
q2291
QuartzTorrent.Peer.updateDownloadRate
train
def updateDownloadRate(msg) @downloadRate.update msg.length if msg.is_a? Piece @downloadRateDataOnly.update msg.data.length end end
ruby
{ "resource": "" }
q2292
Cloudconvert.Conversion.convert
train
def convert(inputformat, outputformat, file_path, callback_url = nil, options = {}) raise "File path cant be blank" if file_path.nil? @convert_request_url = start_conversion(inputformat, outputformat) #initiate connection with new response host initiate_connection(@convert_request_url) upload(build_upload_params(file_path, outputformat, callback_url, options)) end
ruby
{ "resource": "" }
q2293
Cloudconvert.Conversion.converter_options
train
def converter_options(inputformat ="", outputformat = "") response = @conversion_connection.get "conversiontypes", {:inputformat => inputformat,:outputformat => outputformat } parse_response(response.body) end
ruby
{ "resource": "" }
q2294
Cloudconvert.Conversion.build_upload_params
train
def build_upload_params(file_path, outputformat, callback_url = nil, options = {}) upload_params = { :format => outputformat} upload_params.merge!(:callback => callback(callback_url)) if callback(callback_url).present? upload_params.merge!(:input => "download",:link => file_path ) upload_params.merge!(options) end
ruby
{ "resource": "" }
q2295
Gravatarify.Helper.gravatar_attrs
train
def gravatar_attrs(email, *params) url_options = Gravatarify::Utils.merge_gravatar_options(*params) options = url_options[:html] || {} options[:src] = gravatar_url(email, false, url_options) options[:width] = options[:height] = (url_options[:size] || 80) # customize size { :alt => '' }.merge!(options) # to ensure validity merge with :alt => ''! end
ruby
{ "resource": "" }
q2296
GemUpdater.GemFile.compute_changes
train
def compute_changes spec_sets_diff! old_spec_set.each do |old_gem| updated_gem = new_spec_set.find { |new_gem| new_gem.name == old_gem.name } next unless updated_gem && old_gem.version != updated_gem.version fill_changes(old_gem, updated_gem) end end
ruby
{ "resource": "" }
q2297
GemUpdater.GemFile.fill_changes
train
def fill_changes(old_gem, updated_gem) changes[old_gem.name] = { versions: { old: old_gem.version.to_s, new: updated_gem.version.to_s }, source: updated_gem.source } end
ruby
{ "resource": "" }
q2298
BERTRPC.Action.connect_to
train
def connect_to(host, port, timeout = nil) timeout = timeout && Float(timeout) addr = Socket.getaddrinfo(host, nil, Socket::AF_INET) sock = Socket.new(Socket.const_get(addr[0][0]), Socket::SOCK_STREAM, 0) sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1 if timeout secs = Integer(timeout) usecs = Integer((timeout - secs) * 1_000_000) optval = [secs, usecs].pack("l_2") sock.setsockopt Socket::SOL_SOCKET, Socket::SO_RCVTIMEO, optval sock.setsockopt Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, optval begin sock.connect_nonblock(Socket.pack_sockaddr_in(port, addr[0][3])) rescue Errno::EINPROGRESS result = IO.select(nil, [sock], nil, timeout) if result.nil? raise ConnectionError.new(@svc.host, @svc.port) end begin sock.connect_nonblock(Socket.pack_sockaddr_in(port, addr[0][3])) rescue Errno::EISCONN end end else sock.connect(Socket.pack_sockaddr_in(port, addr[0][3])) end sock end
ruby
{ "resource": "" }
q2299
Nexus.Cache.prune_cache
train
def prune_cache(mtime=15) # get old, unused entries and discard from DB and filesystem entries = remove_old_items(mtime) entries.each do |key, entry| FileUtils.rm_f(entry[:file]) end end
ruby
{ "resource": "" }