_id
stringlengths 2
7
| title
stringlengths 3
140
| partition
stringclasses 3
values | text
stringlengths 73
34.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q1400
|
CoreStitchAppClient.callFunction
|
train
|
public <T> T callFunction(
final String name,
final List<?> args,
final @Nullable Long requestTimeout,
final Class<T> resultClass,
final CodecRegistry codecRegistry
) {
return this.functionService
.withCodecRegistry(codecRegistry)
.callFunction(name, args, requestTimeout, resultClass);
}
|
java
|
{
"resource": ""
}
|
q1401
|
InstanceChangeStreamListenerImpl.start
|
train
|
public void start() {
instanceLock.writeLock().lock();
try {
for (final Map.Entry<MongoNamespace, NamespaceChangeStreamListener> streamerEntry :
nsStreamers.entrySet()) {
streamerEntry.getValue().start();
}
} finally {
instanceLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1402
|
InstanceChangeStreamListenerImpl.stop
|
train
|
public void stop() {
instanceLock.writeLock().lock();
try {
for (final NamespaceChangeStreamListener streamer : nsStreamers.values()) {
streamer.stop();
}
} finally {
instanceLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1403
|
InstanceChangeStreamListenerImpl.addNamespace
|
train
|
public void addNamespace(final MongoNamespace namespace) {
this.instanceLock.writeLock().lock();
try {
if (this.nsStreamers.containsKey(namespace)) {
return;
}
final NamespaceChangeStreamListener streamer =
new NamespaceChangeStreamListener(
namespace,
instanceConfig.getNamespaceConfig(namespace),
service,
networkMonitor,
authMonitor,
getLockForNamespace(namespace));
this.nsStreamers.put(namespace, streamer);
} finally {
this.instanceLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1404
|
InstanceChangeStreamListenerImpl.removeNamespace
|
train
|
@Override
public void removeNamespace(final MongoNamespace namespace) {
this.instanceLock.writeLock().lock();
try {
if (!this.nsStreamers.containsKey(namespace)) {
return;
}
final NamespaceChangeStreamListener streamer = this.nsStreamers.get(namespace);
streamer.stop();
this.nsStreamers.remove(namespace);
} finally {
this.instanceLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1405
|
InstanceChangeStreamListenerImpl.getEventsForNamespace
|
train
|
public Map<BsonValue, ChangeEvent<BsonDocument>> getEventsForNamespace(
final MongoNamespace namespace
) {
this.instanceLock.readLock().lock();
final NamespaceChangeStreamListener streamer;
try {
streamer = nsStreamers.get(namespace);
} finally {
this.instanceLock.readLock().unlock();
}
if (streamer == null) {
return new HashMap<>();
}
return streamer.getEvents();
}
|
java
|
{
"resource": ""
}
|
q1406
|
InstanceChangeStreamListenerImpl.getUnprocessedEventForDocumentId
|
train
|
public @Nullable ChangeEvent<BsonDocument> getUnprocessedEventForDocumentId(
final MongoNamespace namespace,
final BsonValue documentId
) {
this.instanceLock.readLock().lock();
final NamespaceChangeStreamListener streamer;
try {
streamer = nsStreamers.get(namespace);
} finally {
this.instanceLock.readLock().unlock();
}
if (streamer == null) {
return null;
}
return streamer.getUnprocessedEventForDocumentId(documentId);
}
|
java
|
{
"resource": ""
}
|
q1407
|
CoreDocumentSynchronizationConfig.setPaused
|
train
|
void setPaused(final boolean isPaused) {
docLock.writeLock().lock();
try {
docsColl.updateOne(
getDocFilter(namespace, documentId),
new BsonDocument("$set",
new BsonDocument(
ConfigCodec.Fields.IS_PAUSED,
new BsonBoolean(isPaused))));
this.isPaused = isPaused;
} catch (IllegalStateException e) {
// eat this
} finally {
docLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1408
|
CoreDocumentSynchronizationConfig.setSomePendingWritesAndSave
|
train
|
public void setSomePendingWritesAndSave(
final long atTime,
final ChangeEvent<BsonDocument> changeEvent
) {
docLock.writeLock().lock();
try {
// if we were frozen
if (isPaused) {
// unfreeze the document due to the local write
setPaused(false);
// and now the unfrozen document is now stale
setStale(true);
}
this.lastUncommittedChangeEvent =
coalesceChangeEvents(this.lastUncommittedChangeEvent, changeEvent);
this.lastResolution = atTime;
docsColl.replaceOne(
getDocFilter(namespace, documentId),
this);
} finally {
docLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1409
|
CoreDocumentSynchronizationConfig.coalesceChangeEvents
|
train
|
private static ChangeEvent<BsonDocument> coalesceChangeEvents(
final ChangeEvent<BsonDocument> lastUncommittedChangeEvent,
final ChangeEvent<BsonDocument> newestChangeEvent
) {
if (lastUncommittedChangeEvent == null) {
return newestChangeEvent;
}
switch (lastUncommittedChangeEvent.getOperationType()) {
case INSERT:
switch (newestChangeEvent.getOperationType()) {
// Coalesce replaces/updates to inserts since we believe at some point a document did not
// exist remotely and that this replace or update should really be an insert if we are
// still in an uncommitted state.
case REPLACE:
case UPDATE:
return new ChangeEvent<>(
newestChangeEvent.getId(),
OperationType.INSERT,
newestChangeEvent.getFullDocument(),
newestChangeEvent.getNamespace(),
newestChangeEvent.getDocumentKey(),
null,
newestChangeEvent.hasUncommittedWrites()
);
default:
break;
}
break;
case DELETE:
switch (newestChangeEvent.getOperationType()) {
// Coalesce inserts to replaces since we believe at some point a document existed
// remotely and that this insert should really be an replace if we are still in an
// uncommitted state.
case INSERT:
return new ChangeEvent<>(
newestChangeEvent.getId(),
OperationType.REPLACE,
newestChangeEvent.getFullDocument(),
newestChangeEvent.getNamespace(),
newestChangeEvent.getDocumentKey(),
null,
newestChangeEvent.hasUncommittedWrites()
);
default:
break;
}
break;
case UPDATE:
switch (newestChangeEvent.getOperationType()) {
case UPDATE:
return new ChangeEvent<>(
newestChangeEvent.getId(),
OperationType.UPDATE,
newestChangeEvent.getFullDocument(),
newestChangeEvent.getNamespace(),
newestChangeEvent.getDocumentKey(),
lastUncommittedChangeEvent.getUpdateDescription() != null
? lastUncommittedChangeEvent
.getUpdateDescription()
.merge(newestChangeEvent.getUpdateDescription())
: newestChangeEvent.getUpdateDescription(),
newestChangeEvent.hasUncommittedWrites()
);
case REPLACE:
return new ChangeEvent<>(
newestChangeEvent.getId(),
OperationType.REPLACE,
newestChangeEvent.getFullDocument(),
newestChangeEvent.getNamespace(),
newestChangeEvent.getDocumentKey(),
null,
newestChangeEvent.hasUncommittedWrites()
);
default:
break;
}
break;
case REPLACE:
switch (newestChangeEvent.getOperationType()) {
case UPDATE:
return new ChangeEvent<>(
newestChangeEvent.getId(),
OperationType.REPLACE,
newestChangeEvent.getFullDocument(),
newestChangeEvent.getNamespace(),
newestChangeEvent.getDocumentKey(),
null,
newestChangeEvent.hasUncommittedWrites()
);
default:
break;
}
break;
default:
break;
}
return newestChangeEvent;
}
|
java
|
{
"resource": ""
}
|
q1410
|
DocumentVersionInfo.getRemoteVersionInfo
|
train
|
static DocumentVersionInfo getRemoteVersionInfo(final BsonDocument remoteDocument) {
final BsonDocument version = getDocumentVersionDoc(remoteDocument);
return new DocumentVersionInfo(
version,
remoteDocument != null
? BsonUtils.getDocumentId(remoteDocument) : null
);
}
|
java
|
{
"resource": ""
}
|
q1411
|
DocumentVersionInfo.getFreshVersionDocument
|
train
|
static BsonDocument getFreshVersionDocument() {
final BsonDocument versionDoc = new BsonDocument();
versionDoc.append(Fields.SYNC_PROTOCOL_VERSION_FIELD, new BsonInt32(1));
versionDoc.append(Fields.INSTANCE_ID_FIELD, new BsonString(UUID.randomUUID().toString()));
versionDoc.append(Fields.VERSION_COUNTER_FIELD, new BsonInt64(0L));
return versionDoc;
}
|
java
|
{
"resource": ""
}
|
q1412
|
DocumentVersionInfo.getDocumentVersionDoc
|
train
|
static BsonDocument getDocumentVersionDoc(final BsonDocument document) {
if (document == null || !document.containsKey(DOCUMENT_VERSION_FIELD)) {
return null;
}
return document.getDocument(DOCUMENT_VERSION_FIELD, null);
}
|
java
|
{
"resource": ""
}
|
q1413
|
DocumentVersionInfo.getVersionedFilter
|
train
|
static BsonDocument getVersionedFilter(
@Nonnull final BsonValue documentId,
@Nullable final BsonValue version
) {
final BsonDocument filter = new BsonDocument("_id", documentId);
if (version == null) {
filter.put(DOCUMENT_VERSION_FIELD, new BsonDocument("$exists", BsonBoolean.FALSE));
} else {
filter.put(DOCUMENT_VERSION_FIELD, version);
}
return filter;
}
|
java
|
{
"resource": ""
}
|
q1414
|
DocumentVersionInfo.getNextVersion
|
train
|
BsonDocument getNextVersion() {
if (!this.hasVersion() || this.getVersionDoc() == null) {
return getFreshVersionDocument();
}
final BsonDocument nextVersion = BsonUtils.copyOfDocument(this.getVersionDoc());
nextVersion.put(
Fields.VERSION_COUNTER_FIELD,
new BsonInt64(this.getVersion().getVersionCounter() + 1));
return nextVersion;
}
|
java
|
{
"resource": ""
}
|
q1415
|
RemoteMongoDatabaseImpl.getCollection
|
train
|
public RemoteMongoCollection<Document> getCollection(final String collectionName) {
return new RemoteMongoCollectionImpl<>(proxy.getCollection(collectionName), dispatcher);
}
|
java
|
{
"resource": ""
}
|
q1416
|
StitchEvent.fromEvent
|
train
|
static <T> StitchEvent<T> fromEvent(final Event event,
final Decoder<T> decoder) {
return new StitchEvent<>(event.getEventName(), event.getData(), decoder);
}
|
java
|
{
"resource": ""
}
|
q1417
|
EventStreamReader.processEvent
|
train
|
protected final Event processEvent() throws IOException {
while (true) {
String line;
try {
line = readLine();
} catch (final EOFException ex) {
if (doneOnce) {
throw ex;
}
doneOnce = true;
line = "";
}
// If the line is empty (a blank line), Dispatch the event, as defined below.
if (line.isEmpty()) {
// If the data buffer is an empty string, set the data buffer and the event name buffer to
// the empty string and abort these steps.
if (dataBuffer.length() == 0) {
eventName = "";
continue;
}
// If the event name buffer is not the empty string but is also not a valid NCName,
// set the data buffer and the event name buffer to the empty string and abort these steps.
// NOT IMPLEMENTED
final Event.Builder eventBuilder = new Event.Builder();
eventBuilder.withEventName(eventName.isEmpty() ? Event.MESSAGE_EVENT : eventName);
eventBuilder.withData(dataBuffer.toString());
// Set the data buffer and the event name buffer to the empty string.
dataBuffer = new StringBuilder();
eventName = "";
return eventBuilder.build();
// If the line starts with a U+003A COLON character (':')
} else if (line.startsWith(":")) {
// ignore the line
// If the line contains a U+003A COLON character (':') character
} else if (line.contains(":")) {
// Collect the characters on the line before the first U+003A COLON character (':'),
// and let field be that string.
final int colonIdx = line.indexOf(":");
final String field = line.substring(0, colonIdx);
// Collect the characters on the line after the first U+003A COLON character (':'),
// and let value be that string.
// If value starts with a single U+0020 SPACE character, remove it from value.
String value = line.substring(colonIdx + 1);
value = value.startsWith(" ") ? value.substring(1) : value;
processField(field, value);
// Otherwise, the string is not empty but does not contain a U+003A COLON character (':')
// character
} else {
processField(line, "");
}
}
}
|
java
|
{
"resource": ""
}
|
q1418
|
StitchError.handleRichError
|
train
|
private static String handleRichError(final Response response, final String body) {
if (!response.getHeaders().containsKey(Headers.CONTENT_TYPE)
|| !response.getHeaders().get(Headers.CONTENT_TYPE).equals(ContentTypes.APPLICATION_JSON)) {
return body;
}
final Document doc;
try {
doc = BsonUtils.parseValue(body, Document.class);
} catch (Exception e) {
return body;
}
if (!doc.containsKey(Fields.ERROR)) {
return body;
}
final String errorMsg = doc.getString(Fields.ERROR);
if (!doc.containsKey(Fields.ERROR_CODE)) {
return errorMsg;
}
final String errorCode = doc.getString(Fields.ERROR_CODE);
throw new StitchServiceException(errorMsg, StitchServiceErrorCode.fromCodeName(errorCode));
}
|
java
|
{
"resource": ""
}
|
q1419
|
Stitch.initialize
|
train
|
public static void initialize(final Context context) {
if (!initialized.compareAndSet(false, true)) {
return;
}
applicationContext = context.getApplicationContext();
final String packageName = applicationContext.getPackageName();
localAppName = packageName;
final PackageManager manager = applicationContext.getPackageManager();
try {
final PackageInfo pkgInfo = manager.getPackageInfo(packageName, 0);
localAppVersion = pkgInfo.versionName;
} catch (final NameNotFoundException e) {
Log.d(TAG, "Failed to get version of application, will not send in device info.");
}
Log.d(TAG, "Initialized android SDK");
}
|
java
|
{
"resource": ""
}
|
q1420
|
Stitch.getAppClient
|
train
|
public static StitchAppClient getAppClient(
@Nonnull final String clientAppId
) {
ensureInitialized();
synchronized (Stitch.class) {
if (!appClients.containsKey(clientAppId)) {
throw new IllegalStateException(
String.format("client for app '%s' has not yet been initialized", clientAppId));
}
return appClients.get(clientAppId);
}
}
|
java
|
{
"resource": ""
}
|
q1421
|
NamespaceChangeStreamListener.start
|
train
|
public void start() {
nsLock.writeLock().lock();
try {
if (runnerThread != null) {
return;
}
runnerThread =
new Thread(new NamespaceChangeStreamRunner(
new WeakReference<>(this), networkMonitor, logger));
runnerThread.start();
} finally {
nsLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1422
|
NamespaceChangeStreamListener.stop
|
train
|
public void stop() {
if (runnerThread == null) {
return;
}
runnerThread.interrupt();
nsLock.writeLock().lock();
try {
if (runnerThread == null) {
return;
}
this.cancel();
this.close();
while (runnerThread.isAlive()) {
runnerThread.interrupt();
try {
runnerThread.join(1000);
} catch (final Exception e) {
e.printStackTrace();
return;
}
}
runnerThread = null;
} catch (Exception e) {
e.printStackTrace();
} finally {
nsLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1423
|
NamespaceChangeStreamListener.openStream
|
train
|
boolean openStream() throws InterruptedException, IOException {
logger.info("stream START");
final boolean isOpen;
final Set<BsonValue> idsToWatch = nsConfig.getSynchronizedDocumentIds();
if (!networkMonitor.isConnected()) {
logger.info("stream END - Network disconnected");
return false;
}
if (idsToWatch.isEmpty()) {
logger.info("stream END - No synchronized documents");
return false;
}
nsLock.writeLock().lockInterruptibly();
try {
if (!authMonitor.isLoggedIn()) {
logger.info("stream END - Logged out");
return false;
}
final Document args = new Document();
args.put("database", namespace.getDatabaseName());
args.put("collection", namespace.getCollectionName());
args.put("ids", idsToWatch);
currentStream =
service.streamFunction(
"watch",
Collections.singletonList(args),
ResultDecoders.changeEventDecoder(BSON_DOCUMENT_CODEC));
if (currentStream != null && currentStream.isOpen()) {
this.nsConfig.setStale(true);
isOpen = true;
} else {
isOpen = false;
}
} finally {
nsLock.writeLock().unlock();
}
return isOpen;
}
|
java
|
{
"resource": ""
}
|
q1424
|
NamespaceChangeStreamListener.getEvents
|
train
|
@SuppressWarnings("unchecked")
public Map<BsonValue, ChangeEvent<BsonDocument>> getEvents() {
nsLock.readLock().lock();
final Map<BsonValue, ChangeEvent<BsonDocument>> events;
try {
events = new HashMap<>(this.events);
} finally {
nsLock.readLock().unlock();
}
nsLock.writeLock().lock();
try {
this.events.clear();
return events;
} finally {
nsLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1425
|
NamespaceChangeStreamListener.getUnprocessedEventForDocumentId
|
train
|
public @Nullable ChangeEvent<BsonDocument> getUnprocessedEventForDocumentId(
final BsonValue documentId
) {
final ChangeEvent<BsonDocument> event;
nsLock.readLock().lock();
try {
event = this.events.get(documentId);
} finally {
nsLock.readLock().unlock();
}
nsLock.writeLock().lock();
try {
this.events.remove(documentId);
return event;
} finally {
nsLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1426
|
HashUtils.hash
|
train
|
public static long hash(final BsonDocument doc) {
if (doc == null) {
return 0L;
}
final byte[] docBytes = toBytes(doc);
long hashValue = FNV_64BIT_OFFSET_BASIS;
for (int offset = 0; offset < docBytes.length; offset++) {
hashValue ^= (0xFF & docBytes[offset]);
hashValue *= FNV_64BIT_PRIME;
}
return hashValue;
}
|
java
|
{
"resource": ""
}
|
q1427
|
SyncMongoClientFactory.deleteDatabase
|
train
|
public static boolean deleteDatabase(final StitchAppClientInfo appInfo,
final String serviceName,
final EmbeddedMongoClientFactory clientFactory,
final String userId) {
final String dataDir = appInfo.getDataDirectory();
if (dataDir == null) {
throw new IllegalArgumentException("StitchAppClient not configured with a data directory");
}
final String instanceKey = String.format(
"%s-%s_sync_%s_%s", appInfo.getClientAppId(), dataDir, serviceName, userId);
final String dbPath = String.format(
"%s/%s/sync_mongodb_%s/%s/0/", dataDir, appInfo.getClientAppId(), serviceName, userId);
final MongoClient client =
clientFactory.getClient(instanceKey, dbPath, appInfo.getCodecRegistry());
for (final String listDatabaseName : client.listDatabaseNames()) {
try {
client.getDatabase(listDatabaseName).drop();
} catch (Exception e) {
// do nothing
}
}
client.close();
clientFactory.removeClient(instanceKey);
return new File(dbPath).delete();
}
|
java
|
{
"resource": ""
}
|
q1428
|
RemoteMongoCollectionImpl.count
|
train
|
public Task<Long> count() {
return dispatcher.dispatchTask(new Callable<Long>() {
@Override
public Long call() {
return proxy.count();
}
});
}
|
java
|
{
"resource": ""
}
|
q1429
|
RemoteMongoCollectionImpl.insertOne
|
train
|
public Task<RemoteInsertOneResult> insertOne(final DocumentT document) {
return dispatcher.dispatchTask(new Callable<RemoteInsertOneResult>() {
@Override
public RemoteInsertOneResult call() {
return proxy.insertOne(document);
}
});
}
|
java
|
{
"resource": ""
}
|
q1430
|
UserPasswordAuthProviderClientImpl.registerWithEmail
|
train
|
public Task<Void> registerWithEmail(@NonNull final String email, @NonNull final String password) {
return dispatcher.dispatchTask(
new Callable<Void>() {
@Override
public Void call() {
registerWithEmailInternal(email, password);
return null;
}
});
}
|
java
|
{
"resource": ""
}
|
q1431
|
UserPasswordAuthProviderClientImpl.confirmUser
|
train
|
public Task<Void> confirmUser(@NonNull final String token, @NonNull final String tokenId) {
return dispatcher.dispatchTask(
new Callable<Void>() {
@Override
public Void call() {
confirmUserInternal(token, tokenId);
return null;
}
});
}
|
java
|
{
"resource": ""
}
|
q1432
|
UserPasswordAuthProviderClientImpl.resendConfirmationEmail
|
train
|
public Task<Void> resendConfirmationEmail(@NonNull final String email) {
return dispatcher.dispatchTask(
new Callable<Void>() {
@Override
public Void call() {
resendConfirmationEmailInternal(email);
return null;
}
});
}
|
java
|
{
"resource": ""
}
|
q1433
|
UserPasswordAuthProviderClientImpl.sendResetPasswordEmail
|
train
|
public Task<Void> sendResetPasswordEmail(@NonNull final String email) {
return dispatcher.dispatchTask(
new Callable<Void>() {
@Override
public Void call() {
sendResetPasswordEmailInternal(email);
return null;
}
});
}
|
java
|
{
"resource": ""
}
|
q1434
|
CoreRemoteMongoCollectionWriteModelContainer.commit
|
train
|
@Override
public boolean commit() {
final CoreRemoteMongoCollection<DocumentT> collection = getCollection();
final List<WriteModel<DocumentT>> writeModels = getBulkWriteModels();
// define success as any one operation succeeding for now
boolean success = true;
for (final WriteModel<DocumentT> write : writeModels) {
if (write instanceof ReplaceOneModel) {
final ReplaceOneModel<DocumentT> replaceModel = ((ReplaceOneModel) write);
final RemoteUpdateResult result =
collection.updateOne(replaceModel.getFilter(), (Bson) replaceModel.getReplacement());
success = success
&& (result != null && result.getModifiedCount() == result.getMatchedCount());
} else if (write instanceof UpdateOneModel) {
final UpdateOneModel<DocumentT> updateModel = ((UpdateOneModel) write);
final RemoteUpdateResult result =
collection.updateOne(updateModel.getFilter(), updateModel.getUpdate());
success = success
&& (result != null && result.getModifiedCount() == result.getMatchedCount());
} else if (write instanceof UpdateManyModel) {
final UpdateManyModel<DocumentT> updateModel = ((UpdateManyModel) write);
final RemoteUpdateResult result =
collection.updateMany(updateModel.getFilter(), updateModel.getUpdate());
success = success
&& (result != null && result.getModifiedCount() == result.getMatchedCount());
}
}
return success;
}
|
java
|
{
"resource": ""
}
|
q1435
|
StitchAppRequestClientImpl.doRequest
|
train
|
@Override
public Response doRequest(final StitchRequest stitchReq) {
initAppMetadata(clientAppId);
return super.doRequestUrl(stitchReq, getHostname());
}
|
java
|
{
"resource": ""
}
|
q1436
|
StitchAppRequestClientImpl.doStreamRequest
|
train
|
@Override
public EventStream doStreamRequest(final StitchRequest stitchReq) {
initAppMetadata(clientAppId);
return super.doStreamRequestUrl(stitchReq, getHostname());
}
|
java
|
{
"resource": ""
}
|
q1437
|
UpdateDescription.toUpdateDocument
|
train
|
public BsonDocument toUpdateDocument() {
final List<BsonElement> unsets = new ArrayList<>();
for (final String removedField : this.removedFields) {
unsets.add(new BsonElement(removedField, new BsonBoolean(true)));
}
final BsonDocument updateDocument = new BsonDocument();
if (this.updatedFields.size() > 0) {
updateDocument.append("$set", this.updatedFields);
}
if (unsets.size() > 0) {
updateDocument.append("$unset", new BsonDocument(unsets));
}
return updateDocument;
}
|
java
|
{
"resource": ""
}
|
q1438
|
UpdateDescription.toBsonDocument
|
train
|
public BsonDocument toBsonDocument() {
final BsonDocument updateDescDoc = new BsonDocument();
updateDescDoc.put(
Fields.UPDATED_FIELDS_FIELD,
this.getUpdatedFields());
final BsonArray removedFields = new BsonArray();
for (final String field : this.getRemovedFields()) {
removedFields.add(new BsonString(field));
}
updateDescDoc.put(
Fields.REMOVED_FIELDS_FIELD,
removedFields);
return updateDescDoc;
}
|
java
|
{
"resource": ""
}
|
q1439
|
UpdateDescription.fromBsonDocument
|
train
|
public static UpdateDescription fromBsonDocument(final BsonDocument document) {
keyPresent(Fields.UPDATED_FIELDS_FIELD, document);
keyPresent(Fields.REMOVED_FIELDS_FIELD, document);
final BsonArray removedFieldsArr =
document.getArray(Fields.REMOVED_FIELDS_FIELD);
final Set<String> removedFields = new HashSet<>(removedFieldsArr.size());
for (final BsonValue field : removedFieldsArr) {
removedFields.add(field.asString().getValue());
}
return new UpdateDescription(document.getDocument(Fields.UPDATED_FIELDS_FIELD), removedFields);
}
|
java
|
{
"resource": ""
}
|
q1440
|
UpdateDescription.merge
|
train
|
public UpdateDescription merge(@Nullable final UpdateDescription otherDescription) {
if (otherDescription != null) {
for (final Map.Entry<String, BsonValue> entry : this.updatedFields.entrySet()) {
if (otherDescription.removedFields.contains(entry.getKey())) {
this.updatedFields.remove(entry.getKey());
}
}
for (final String removedField : this.removedFields) {
if (otherDescription.updatedFields.containsKey(removedField)) {
this.removedFields.remove(removedField);
}
}
this.removedFields.addAll(otherDescription.removedFields);
this.updatedFields.putAll(otherDescription.updatedFields);
}
return this;
}
|
java
|
{
"resource": ""
}
|
q1441
|
CoreRemoteMongoCollectionImpl.watch
|
train
|
@Override
@SuppressWarnings("unchecked")
public Stream<ChangeEvent<DocumentT>> watch(final BsonValue... ids)
throws InterruptedException, IOException {
return operations.watch(
new HashSet<>(Arrays.asList(ids)),
false,
documentClass
).execute(service);
}
|
java
|
{
"resource": ""
}
|
q1442
|
SyncImpl.updateSyncFrequency
|
train
|
public Task<Void> updateSyncFrequency(@NonNull final SyncFrequency syncFrequency) {
return this.dispatcher.dispatchTask(new Callable<Void>() {
@Override
public Void call() throws Exception {
SyncImpl.this.proxy.updateSyncFrequency(syncFrequency);
return null;
}
});
}
|
java
|
{
"resource": ""
}
|
q1443
|
EventDispatcher.emitEvent
|
train
|
public void emitEvent(
final NamespaceSynchronizationConfig nsConfig,
final ChangeEvent<BsonDocument> event) {
listenersLock.lock();
try {
if (nsConfig.getNamespaceListenerConfig() == null) {
return;
}
final NamespaceListenerConfig namespaceListener =
nsConfig.getNamespaceListenerConfig();
eventDispatcher.dispatch(() -> {
try {
if (namespaceListener.getEventListener() != null) {
namespaceListener.getEventListener().onEvent(
BsonUtils.getDocumentId(event.getDocumentKey()),
ChangeEvents.transformChangeEventForUser(
event, namespaceListener.getDocumentCodec()));
}
} catch (final Exception ex) {
logger.error(String.format(
Locale.US,
"emitEvent ns=%s documentId=%s emit exception: %s",
event.getNamespace(),
BsonUtils.getDocumentId(event.getDocumentKey()),
ex), ex);
}
return null;
});
} finally {
listenersLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1444
|
ChangeEvents.changeEventForLocalInsert
|
train
|
static ChangeEvent<BsonDocument> changeEventForLocalInsert(
final MongoNamespace namespace,
final BsonDocument document,
final boolean writePending
) {
final BsonValue docId = BsonUtils.getDocumentId(document);
return new ChangeEvent<>(
new BsonDocument(),
OperationType.INSERT,
document,
namespace,
new BsonDocument("_id", docId),
null,
writePending);
}
|
java
|
{
"resource": ""
}
|
q1445
|
ChangeEvents.changeEventForLocalUpdate
|
train
|
static ChangeEvent<BsonDocument> changeEventForLocalUpdate(
final MongoNamespace namespace,
final BsonValue documentId,
final UpdateDescription update,
final BsonDocument fullDocumentAfterUpdate,
final boolean writePending
) {
return new ChangeEvent<>(
new BsonDocument(),
OperationType.UPDATE,
fullDocumentAfterUpdate,
namespace,
new BsonDocument("_id", documentId),
update,
writePending);
}
|
java
|
{
"resource": ""
}
|
q1446
|
ChangeEvents.changeEventForLocalReplace
|
train
|
static ChangeEvent<BsonDocument> changeEventForLocalReplace(
final MongoNamespace namespace,
final BsonValue documentId,
final BsonDocument document,
final boolean writePending
) {
return new ChangeEvent<>(
new BsonDocument(),
OperationType.REPLACE,
document,
namespace,
new BsonDocument("_id", documentId),
null,
writePending);
}
|
java
|
{
"resource": ""
}
|
q1447
|
ChangeEvents.changeEventForLocalDelete
|
train
|
static ChangeEvent<BsonDocument> changeEventForLocalDelete(
final MongoNamespace namespace,
final BsonValue documentId,
final boolean writePending
) {
return new ChangeEvent<>(
new BsonDocument(),
OperationType.DELETE,
null,
namespace,
new BsonDocument("_id", documentId),
null,
writePending);
}
|
java
|
{
"resource": ""
}
|
q1448
|
Assertions.notNull
|
train
|
public static <T> void notNull(final String name, final T value) {
if (value == null) {
throw new IllegalArgumentException(name + " can not be null");
}
}
|
java
|
{
"resource": ""
}
|
q1449
|
Assertions.keyPresent
|
train
|
public static void keyPresent(final String key, final Map<String, ?> map) {
if (!map.containsKey(key)) {
throw new IllegalStateException(
String.format("expected %s to be present", key));
}
}
|
java
|
{
"resource": ""
}
|
q1450
|
BsonUtils.copyOfDocument
|
train
|
public static BsonDocument copyOfDocument(final BsonDocument document) {
final BsonDocument newDocument = new BsonDocument();
for (final Map.Entry<String, BsonValue> kv : document.entrySet()) {
newDocument.put(kv.getKey(), kv.getValue());
}
return newDocument;
}
|
java
|
{
"resource": ""
}
|
q1451
|
DefaultSyncConflictResolvers.localWins
|
train
|
public static <T> ConflictHandler<T> localWins() {
return new ConflictHandler<T>() {
@Override
public T resolveConflict(
final BsonValue documentId,
final ChangeEvent<T> localEvent,
final ChangeEvent<T> remoteEvent
) {
return localEvent.getFullDocument();
}
};
}
|
java
|
{
"resource": ""
}
|
q1452
|
TodoItem.isTodoItem
|
train
|
public static boolean isTodoItem(final Document todoItemDoc) {
return todoItemDoc.containsKey(ID_KEY)
&& todoItemDoc.containsKey(TASK_KEY)
&& todoItemDoc.containsKey(CHECKED_KEY);
}
|
java
|
{
"resource": ""
}
|
q1453
|
DataSynchronizer.recover
|
train
|
void recover() {
final List<NamespaceSynchronizationConfig> nsConfigs = new ArrayList<>();
for (final MongoNamespace ns : this.syncConfig.getSynchronizedNamespaces()) {
nsConfigs.add(this.syncConfig.getNamespaceConfig(ns));
}
for (final NamespaceSynchronizationConfig nsConfig : nsConfigs) {
nsConfig.getLock().writeLock().lock();
}
try {
for (final NamespaceSynchronizationConfig nsConfig : nsConfigs) {
nsConfig.getLock().writeLock().lock();
try {
recoverNamespace(nsConfig);
} finally {
nsConfig.getLock().writeLock().unlock();
}
}
} finally {
for (final NamespaceSynchronizationConfig nsConfig : nsConfigs) {
nsConfig.getLock().writeLock().unlock();
}
}
}
|
java
|
{
"resource": ""
}
|
q1454
|
DataSynchronizer.recoverNamespace
|
train
|
private void recoverNamespace(final NamespaceSynchronizationConfig nsConfig) {
final MongoCollection<BsonDocument> undoCollection =
getUndoCollection(nsConfig.getNamespace());
final MongoCollection<BsonDocument> localCollection =
getLocalCollection(nsConfig.getNamespace());
final List<BsonDocument> undoDocs = undoCollection.find().into(new ArrayList<>());
final Set<BsonValue> recoveredIds = new HashSet<>();
// Replace local docs with undo docs. Presence of an undo doc implies we had a system failure
// during a write. This covers updates and deletes.
for (final BsonDocument undoDoc : undoDocs) {
final BsonValue documentId = BsonUtils.getDocumentId(undoDoc);
final BsonDocument filter = getDocumentIdFilter(documentId);
localCollection.findOneAndReplace(
filter, undoDoc, new FindOneAndReplaceOptions().upsert(true));
recoveredIds.add(documentId);
}
// If we recovered a document, but its pending writes are set to do something else, then the
// failure occurred after the pending writes were set, but before the undo document was
// deleted. In this case, we should restore the document to the state that the pending
// write indicates. There is a possibility that the pending write is from before the failed
// operation, but in that case, the findOneAndReplace or delete is a no-op since restoring
// the document to the state of the change event would be the same as recovering the undo
// document.
for (final CoreDocumentSynchronizationConfig docConfig : nsConfig.getSynchronizedDocuments()) {
final BsonValue documentId = docConfig.getDocumentId();
final BsonDocument filter = getDocumentIdFilter(documentId);
if (recoveredIds.contains(docConfig.getDocumentId())) {
final ChangeEvent<BsonDocument> pendingWrite = docConfig.getLastUncommittedChangeEvent();
if (pendingWrite != null) {
switch (pendingWrite.getOperationType()) {
case INSERT:
case UPDATE:
case REPLACE:
localCollection.findOneAndReplace(
filter,
pendingWrite.getFullDocument(),
new FindOneAndReplaceOptions().upsert(true)
);
break;
case DELETE:
localCollection.deleteOne(filter);
break;
default:
// There should never be pending writes with an unknown event type, but if someone
// is messing with the config collection we want to stop the synchronizer to prevent
// further data corruption.
throw new IllegalStateException(
"there should not be a pending write with an unknown event type"
);
}
}
}
}
// Delete all of our undo documents. If we've reached this point, we've recovered the local
// collection to the state we want with respect to all of our undo documents. If we fail before
// these deletes or while carrying out the deletes, but after recovering the documents to
// their desired state, that's okay because the next recovery pass will be effectively a no-op
// up to this point.
for (final BsonValue recoveredId : recoveredIds) {
undoCollection.deleteOne(getDocumentIdFilter(recoveredId));
}
// Find local documents for which there are no document configs and delete them. This covers
// inserts, upserts, and desync deletes. This will occur on any recovery pass regardless of
// the documents in the undo collection, so it's fine that we do this after deleting the undo
// documents.
localCollection.deleteMany(new BsonDocument(
"_id",
new BsonDocument(
"$nin",
new BsonArray(new ArrayList<>(
this.syncConfig.getSynchronizedDocumentIds(nsConfig.getNamespace()))))));
}
|
java
|
{
"resource": ""
}
|
q1455
|
DataSynchronizer.wipeInMemorySettings
|
train
|
public void wipeInMemorySettings() {
this.waitUntilInitialized();
syncLock.lock();
try {
this.instanceChangeStreamListener.stop();
if (instancesColl.find().first() == null) {
throw new IllegalStateException("expected to find instance configuration");
}
this.syncConfig = new InstanceSynchronizationConfig(configDb);
this.instanceChangeStreamListener = new InstanceChangeStreamListenerImpl(
syncConfig,
service,
networkMonitor,
authMonitor
);
this.isConfigured = false;
this.stop();
} finally {
syncLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1456
|
DataSynchronizer.start
|
train
|
public void start() {
syncLock.lock();
try {
if (!this.isConfigured) {
return;
}
instanceChangeStreamListener.stop();
if (listenersEnabled) {
instanceChangeStreamListener.start();
}
if (syncThread == null) {
syncThread = new Thread(
new DataSynchronizerRunner(
new WeakReference<>(this),
networkMonitor,
logger
),
"dataSynchronizerRunnerThread"
);
}
if (syncThreadEnabled && !isRunning) {
syncThread.start();
isRunning = true;
}
} finally {
syncLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1457
|
DataSynchronizer.stop
|
train
|
public void stop() {
syncLock.lock();
try {
if (syncThread == null) {
return;
}
instanceChangeStreamListener.stop();
syncThread.interrupt();
try {
syncThread.join();
} catch (final InterruptedException e) {
return;
}
syncThread = null;
isRunning = false;
} finally {
syncLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1458
|
DataSynchronizer.close
|
train
|
public void close() {
this.waitUntilInitialized();
this.ongoingOperationsGroup.blockAndWait();
syncLock.lock();
try {
if (this.networkMonitor != null) {
this.networkMonitor.removeNetworkStateListener(this);
}
this.dispatcher.close();
stop();
this.localClient.close();
} finally {
syncLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1459
|
DataSynchronizer.doSyncPass
|
train
|
public boolean doSyncPass() {
if (!this.isConfigured || !syncLock.tryLock()) {
return false;
}
try {
if (logicalT == Long.MAX_VALUE) {
if (logger.isInfoEnabled()) {
logger.info("reached max logical time; resetting back to 0");
}
logicalT = 0;
}
logicalT++;
if (logger.isInfoEnabled()) {
logger.info(String.format(
Locale.US,
"t='%d': doSyncPass START",
logicalT));
}
if (networkMonitor == null || !networkMonitor.isConnected()) {
if (logger.isInfoEnabled()) {
logger.info(String.format(
Locale.US,
"t='%d': doSyncPass END - Network disconnected",
logicalT));
}
return false;
}
if (authMonitor == null || !authMonitor.tryIsLoggedIn()) {
if (logger.isInfoEnabled()) {
logger.info(String.format(
Locale.US,
"t='%d': doSyncPass END - Logged out",
logicalT));
}
return false;
}
syncRemoteToLocal();
syncLocalToRemote();
if (logger.isInfoEnabled()) {
logger.info(String.format(
Locale.US,
"t='%d': doSyncPass END",
logicalT));
}
} catch (InterruptedException e) {
if (logger.isInfoEnabled()) {
logger.info(String.format(
Locale.US,
"t='%d': doSyncPass INTERRUPTED",
logicalT));
}
return false;
} finally {
syncLock.unlock();
}
return true;
}
|
java
|
{
"resource": ""
}
|
q1460
|
DataSynchronizer.resolveConflict
|
train
|
@CheckReturnValue
private LocalSyncWriteModelContainer resolveConflict(
final NamespaceSynchronizationConfig nsConfig,
final CoreDocumentSynchronizationConfig docConfig,
final ChangeEvent<BsonDocument> remoteEvent
) {
return resolveConflict(nsConfig, docConfig, docConfig.getLastUncommittedChangeEvent(),
remoteEvent);
}
|
java
|
{
"resource": ""
}
|
q1461
|
DataSynchronizer.resolveConflictWithResolver
|
train
|
@SuppressWarnings("unchecked")
private static Object resolveConflictWithResolver(
final ConflictHandler conflictResolver,
final BsonValue documentId,
final ChangeEvent localEvent,
final ChangeEvent remoteEvent
) {
return conflictResolver.resolveConflict(
documentId,
localEvent,
remoteEvent);
}
|
java
|
{
"resource": ""
}
|
q1462
|
DataSynchronizer.addWatcher
|
train
|
public void addWatcher(final MongoNamespace namespace,
final Callback<ChangeEvent<BsonDocument>, Object> watcher) {
instanceChangeStreamListener.addWatcher(namespace, watcher);
}
|
java
|
{
"resource": ""
}
|
q1463
|
DataSynchronizer.getSynchronizedDocuments
|
train
|
public Set<CoreDocumentSynchronizationConfig> getSynchronizedDocuments(
final MongoNamespace namespace
) {
this.waitUntilInitialized();
try {
ongoingOperationsGroup.enter();
return this.syncConfig.getSynchronizedDocuments(namespace);
} finally {
ongoingOperationsGroup.exit();
}
}
|
java
|
{
"resource": ""
}
|
q1464
|
DataSynchronizer.getPausedDocumentIds
|
train
|
public Set<BsonValue> getPausedDocumentIds(final MongoNamespace namespace) {
this.waitUntilInitialized();
try {
ongoingOperationsGroup.enter();
final Set<BsonValue> pausedDocumentIds = new HashSet<>();
for (final CoreDocumentSynchronizationConfig config :
this.syncConfig.getSynchronizedDocuments(namespace)) {
if (config.isPaused()) {
pausedDocumentIds.add(config.getDocumentId());
}
}
return pausedDocumentIds;
} finally {
ongoingOperationsGroup.exit();
}
}
|
java
|
{
"resource": ""
}
|
q1465
|
DataSynchronizer.resumeSyncForDocument
|
train
|
boolean resumeSyncForDocument(
final MongoNamespace namespace,
final BsonValue documentId
) {
if (namespace == null || documentId == null) {
return false;
}
final NamespaceSynchronizationConfig namespaceSynchronizationConfig;
final CoreDocumentSynchronizationConfig config;
if ((namespaceSynchronizationConfig = syncConfig.getNamespaceConfig(namespace)) == null
|| (config = namespaceSynchronizationConfig.getSynchronizedDocument(documentId)) == null) {
return false;
}
config.setPaused(false);
return !config.isPaused();
}
|
java
|
{
"resource": ""
}
|
q1466
|
DataSynchronizer.insertOne
|
train
|
void insertOne(final MongoNamespace namespace, final BsonDocument document) {
this.waitUntilInitialized();
try {
ongoingOperationsGroup.enter();
// Remove forbidden fields from the document before inserting it into the local collection.
final BsonDocument docForStorage = sanitizeDocument(document);
final NamespaceSynchronizationConfig nsConfig =
this.syncConfig.getNamespaceConfig(namespace);
final Lock lock = nsConfig.getLock().writeLock();
lock.lock();
final ChangeEvent<BsonDocument> event;
final BsonValue documentId;
try {
getLocalCollection(namespace).insertOne(docForStorage);
documentId = BsonUtils.getDocumentId(docForStorage);
event = ChangeEvents.changeEventForLocalInsert(namespace, docForStorage, true);
final CoreDocumentSynchronizationConfig config = syncConfig.addAndGetSynchronizedDocument(
namespace,
documentId
);
config.setSomePendingWritesAndSave(logicalT, event);
} finally {
lock.unlock();
}
checkAndInsertNamespaceListener(namespace);
eventDispatcher.emitEvent(nsConfig, event);
} finally {
ongoingOperationsGroup.exit();
}
}
|
java
|
{
"resource": ""
}
|
q1467
|
DataSynchronizer.deleteMany
|
train
|
DeleteResult deleteMany(final MongoNamespace namespace,
final Bson filter) {
this.waitUntilInitialized();
try {
ongoingOperationsGroup.enter();
final List<ChangeEvent<BsonDocument>> eventsToEmit = new ArrayList<>();
final DeleteResult result;
final NamespaceSynchronizationConfig nsConfig = this.syncConfig.getNamespaceConfig(namespace);
final Lock lock = nsConfig.getLock().writeLock();
lock.lock();
try {
final MongoCollection<BsonDocument> localCollection = getLocalCollection(namespace);
final MongoCollection<BsonDocument> undoCollection = getUndoCollection(namespace);
final Set<BsonValue> idsToDelete =
localCollection
.find(filter)
.map(new Function<BsonDocument, BsonValue>() {
@Override
@NonNull
public BsonValue apply(@NonNull final BsonDocument bsonDocument) {
undoCollection.insertOne(bsonDocument);
return BsonUtils.getDocumentId(bsonDocument);
}
}).into(new HashSet<>());
result = localCollection.deleteMany(filter);
for (final BsonValue documentId : idsToDelete) {
final CoreDocumentSynchronizationConfig config =
syncConfig.getSynchronizedDocument(namespace, documentId);
if (config == null) {
continue;
}
final ChangeEvent<BsonDocument> event =
ChangeEvents.changeEventForLocalDelete(namespace, documentId, true);
// this block is to trigger coalescence for a delete after insert
if (config.getLastUncommittedChangeEvent() != null
&& config.getLastUncommittedChangeEvent().getOperationType()
== OperationType.INSERT) {
desyncDocumentsFromRemote(nsConfig, config.getDocumentId())
.commitAndClear();
undoCollection.deleteOne(getDocumentIdFilter(documentId));
continue;
}
config.setSomePendingWritesAndSave(logicalT, event);
undoCollection.deleteOne(getDocumentIdFilter(documentId));
eventsToEmit.add(event);
}
checkAndDeleteNamespaceListener(namespace);
} finally {
lock.unlock();
}
for (final ChangeEvent<BsonDocument> event : eventsToEmit) {
eventDispatcher.emitEvent(nsConfig, event);
}
return result;
} finally {
ongoingOperationsGroup.exit();
}
}
|
java
|
{
"resource": ""
}
|
q1468
|
DataSynchronizer.getUndoCollection
|
train
|
MongoCollection<BsonDocument> getUndoCollection(final MongoNamespace namespace) {
return localClient
.getDatabase(String.format("sync_undo_%s", namespace.getDatabaseName()))
.getCollection(namespace.getCollectionName(), BsonDocument.class)
.withCodecRegistry(MongoClientSettings.getDefaultCodecRegistry());
}
|
java
|
{
"resource": ""
}
|
q1469
|
DataSynchronizer.getLocalCollection
|
train
|
private <T> MongoCollection<T> getLocalCollection(
final MongoNamespace namespace,
final Class<T> resultClass,
final CodecRegistry codecRegistry
) {
return localClient
.getDatabase(String.format("sync_user_%s", namespace.getDatabaseName()))
.getCollection(namespace.getCollectionName(), resultClass)
.withCodecRegistry(codecRegistry);
}
|
java
|
{
"resource": ""
}
|
q1470
|
DataSynchronizer.getLocalCollection
|
train
|
MongoCollection<BsonDocument> getLocalCollection(final MongoNamespace namespace) {
return getLocalCollection(
namespace,
BsonDocument.class,
MongoClientSettings.getDefaultCodecRegistry());
}
|
java
|
{
"resource": ""
}
|
q1471
|
DataSynchronizer.getRemoteCollection
|
train
|
private <T> CoreRemoteMongoCollection<T> getRemoteCollection(
final MongoNamespace namespace,
final Class<T> resultClass
) {
return remoteClient
.getDatabase(namespace.getDatabaseName())
.getCollection(namespace.getCollectionName(), resultClass);
}
|
java
|
{
"resource": ""
}
|
q1472
|
DataSynchronizer.sanitizeDocument
|
train
|
static BsonDocument sanitizeDocument(final BsonDocument document) {
if (document == null) {
return null;
}
if (document.containsKey(DOCUMENT_VERSION_FIELD)) {
final BsonDocument clonedDoc = document.clone();
clonedDoc.remove(DOCUMENT_VERSION_FIELD);
return clonedDoc;
}
return document;
}
|
java
|
{
"resource": ""
}
|
q1473
|
DataSynchronizer.withNewVersion
|
train
|
private static BsonDocument withNewVersion(
final BsonDocument document,
final BsonDocument newVersion
) {
final BsonDocument newDocument = BsonUtils.copyOfDocument(document);
newDocument.put(DOCUMENT_VERSION_FIELD, newVersion);
return newDocument;
}
|
java
|
{
"resource": ""
}
|
q1474
|
LocalMongoDbService.clearallLocalDBs
|
train
|
public static void clearallLocalDBs() {
for (final Map.Entry<MongoClient, Boolean> entry : localInstances.entrySet()) {
for (final String dbName : entry.getKey().listDatabaseNames()) {
entry.getKey().getDatabase(dbName).drop();
}
}
}
|
java
|
{
"resource": ""
}
|
q1475
|
StitchObjectMapper.withCodecRegistry
|
train
|
public StitchObjectMapper withCodecRegistry(final CodecRegistry codecRegistry) {
// We can't detect if their codecRegistry has any duplicate providers. There's also a chance
// that putting ours first may prevent decoding of some of their classes if for example they
// have their own way of decoding an Integer.
final CodecRegistry newReg =
CodecRegistries.fromRegistries(BsonUtils.DEFAULT_CODEC_REGISTRY, codecRegistry);
return new StitchObjectMapper(this, newReg);
}
|
java
|
{
"resource": ""
}
|
q1476
|
Stream.nextEvent
|
train
|
public StitchEvent<T> nextEvent() throws IOException {
final Event nextEvent = eventStream.nextEvent();
if (nextEvent == null) {
return null;
}
return StitchEvent.fromEvent(nextEvent, this.decoder);
}
|
java
|
{
"resource": ""
}
|
q1477
|
CoreStitchAuth.getUser
|
train
|
@Nullable
public StitchUserT getUser() {
authLock.readLock().lock();
try {
return activeUser;
} finally {
authLock.readLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1478
|
CoreStitchAuth.doAuthenticatedRequest
|
train
|
private synchronized Response doAuthenticatedRequest(
final StitchAuthRequest stitchReq,
final AuthInfo authInfo
) {
try {
return requestClient.doRequest(prepareAuthRequest(stitchReq, authInfo));
} catch (final StitchServiceException ex) {
return handleAuthFailure(ex, stitchReq);
}
}
|
java
|
{
"resource": ""
}
|
q1479
|
CoreStitchAuth.tryRefreshAccessToken
|
train
|
private void tryRefreshAccessToken(final Long reqStartedAt) {
authLock.writeLock().lock();
try {
if (!isLoggedIn()) {
throw new StitchClientException(StitchClientErrorCode.LOGGED_OUT_DURING_REQUEST);
}
try {
final Jwt jwt = Jwt.fromEncoded(getAuthInfo().getAccessToken());
if (jwt.getIssuedAt() >= reqStartedAt) {
return;
}
} catch (final IOException e) {
// Swallow
}
// retry
refreshAccessToken();
} finally {
authLock.writeLock().unlock();
}
}
|
java
|
{
"resource": ""
}
|
q1480
|
CoreStitchAuth.doLogin
|
train
|
private StitchUserT doLogin(final StitchCredential credential, final boolean asLinkRequest) {
final Response response = doLoginRequest(credential, asLinkRequest);
final StitchUserT previousUser = activeUser;
final StitchUserT user = processLoginResponse(credential, response, asLinkRequest);
if (asLinkRequest) {
onUserLinked(user);
} else {
onUserLoggedIn(user);
onActiveUserChanged(activeUser, previousUser);
}
return user;
}
|
java
|
{
"resource": ""
}
|
q1481
|
Interval.parseStartExtended
|
train
|
private static Interval parseStartExtended(CharSequence startStr, CharSequence endStr) {
Instant start = Instant.parse(startStr);
if (endStr.length() > 0) {
char c = endStr.charAt(0);
if (c == 'P' || c == 'p') {
PeriodDuration amount = PeriodDuration.parse(endStr);
// addition of PeriodDuration only supported by OffsetDateTime,
// but to make that work need to move point being added to closer to EPOCH
long move = start.isBefore(Instant.EPOCH) ? 1000 * 86400 : -1000 * 86400;
Instant end = start.plusSeconds(move).atOffset(ZoneOffset.UTC).plus(amount).toInstant().minusSeconds(move);
return Interval.of(start, end);
}
}
// infer offset from start if not specified by end
return parseEndDateTime(start, ZoneOffset.UTC, endStr);
}
|
java
|
{
"resource": ""
}
|
q1482
|
Interval.parseEndDateTime
|
train
|
private static Interval parseEndDateTime(Instant start, ZoneOffset offset, CharSequence endStr) {
try {
TemporalAccessor temporal = DateTimeFormatter.ISO_DATE_TIME.parseBest(endStr, OffsetDateTime::from, LocalDateTime::from);
if (temporal instanceof OffsetDateTime) {
OffsetDateTime odt = (OffsetDateTime) temporal;
return Interval.of(start, odt.toInstant());
} else {
// infer offset from start if not specified by end
LocalDateTime ldt = (LocalDateTime) temporal;
return Interval.of(start, ldt.toInstant(offset));
}
} catch (DateTimeParseException ex) {
Instant end = Instant.parse(endStr);
return Interval.of(start, end);
}
}
|
java
|
{
"resource": ""
}
|
q1483
|
AccountingChronology.date
|
train
|
@Override
public AccountingDate date(int prolepticYear, int month, int dayOfMonth) {
return AccountingDate.of(this, prolepticYear, month, dayOfMonth);
}
|
java
|
{
"resource": ""
}
|
q1484
|
AccountingChronology.dateYearDay
|
train
|
@Override
public AccountingDate dateYearDay(Era era, int yearOfEra, int dayOfYear) {
return dateYearDay(prolepticYear(era, yearOfEra), dayOfYear);
}
|
java
|
{
"resource": ""
}
|
q1485
|
AccountingChronology.localDateTime
|
train
|
@Override
@SuppressWarnings("unchecked")
public ChronoLocalDateTime<AccountingDate> localDateTime(TemporalAccessor temporal) {
return (ChronoLocalDateTime<AccountingDate>) super.localDateTime(temporal);
}
|
java
|
{
"resource": ""
}
|
q1486
|
AccountingChronology.zonedDateTime
|
train
|
@Override
@SuppressWarnings("unchecked")
public ChronoZonedDateTime<AccountingDate> zonedDateTime(TemporalAccessor temporal) {
return (ChronoZonedDateTime<AccountingDate>) super.zonedDateTime(temporal);
}
|
java
|
{
"resource": ""
}
|
q1487
|
CopticChronology.date
|
train
|
@Override
public CopticDate date(int prolepticYear, int month, int dayOfMonth) {
return CopticDate.of(prolepticYear, month, dayOfMonth);
}
|
java
|
{
"resource": ""
}
|
q1488
|
CopticChronology.dateYearDay
|
train
|
@Override
public CopticDate dateYearDay(Era era, int yearOfEra, int dayOfYear) {
return dateYearDay(prolepticYear(era, yearOfEra), dayOfYear);
}
|
java
|
{
"resource": ""
}
|
q1489
|
CopticChronology.localDateTime
|
train
|
@Override
@SuppressWarnings("unchecked")
public ChronoLocalDateTime<CopticDate> localDateTime(TemporalAccessor temporal) {
return (ChronoLocalDateTime<CopticDate>) super.localDateTime(temporal);
}
|
java
|
{
"resource": ""
}
|
q1490
|
CopticChronology.zonedDateTime
|
train
|
@Override
@SuppressWarnings("unchecked")
public ChronoZonedDateTime<CopticDate> zonedDateTime(TemporalAccessor temporal) {
return (ChronoZonedDateTime<CopticDate>) super.zonedDateTime(temporal);
}
|
java
|
{
"resource": ""
}
|
q1491
|
YearQuarter.with
|
train
|
private YearQuarter with(int newYear, Quarter newQuarter) {
if (year == newYear && quarter == newQuarter) {
return this;
}
return new YearQuarter(newYear, newQuarter);
}
|
java
|
{
"resource": ""
}
|
q1492
|
BritishCutoverChronology.localDateTime
|
train
|
@Override
@SuppressWarnings("unchecked")
public ChronoLocalDateTime<BritishCutoverDate> localDateTime(TemporalAccessor temporal) {
return (ChronoLocalDateTime<BritishCutoverDate>) super.localDateTime(temporal);
}
|
java
|
{
"resource": ""
}
|
q1493
|
BritishCutoverChronology.zonedDateTime
|
train
|
@Override
@SuppressWarnings("unchecked")
public ChronoZonedDateTime<BritishCutoverDate> zonedDateTime(TemporalAccessor temporal) {
return (ChronoZonedDateTime<BritishCutoverDate>) super.zonedDateTime(temporal);
}
|
java
|
{
"resource": ""
}
|
q1494
|
YearWeek.weekRange
|
train
|
private static int weekRange(int weekBasedYear) {
LocalDate date = LocalDate.of(weekBasedYear, 1, 1);
// 53 weeks if year starts on Thursday, or Wed in a leap year
if (date.getDayOfWeek() == THURSDAY || (date.getDayOfWeek() == WEDNESDAY && date.isLeapYear())) {
return 53;
}
return 52;
}
|
java
|
{
"resource": ""
}
|
q1495
|
YearWeek.with
|
train
|
private YearWeek with(int newYear, int newWeek) {
if (year == newYear && week == newWeek) {
return this;
}
return of(newYear, newWeek);
}
|
java
|
{
"resource": ""
}
|
q1496
|
SystemUtcRules.register
|
train
|
void register(long mjDay, int leapAdjustment) {
if (leapAdjustment != -1 && leapAdjustment != 1) {
throw new IllegalArgumentException("Leap adjustment must be -1 or 1");
}
Data data = dataRef.get();
int pos = Arrays.binarySearch(data.dates, mjDay);
int currentAdj = pos > 0 ? data.offsets[pos] - data.offsets[pos - 1] : 0;
if (currentAdj == leapAdjustment) {
return; // matches previous definition
}
if (mjDay <= data.dates[data.dates.length - 1]) {
throw new IllegalArgumentException("Date must be after the last configured leap second date");
}
long[] dates = Arrays.copyOf(data.dates, data.dates.length + 1);
int[] offsets = Arrays.copyOf(data.offsets, data.offsets.length + 1);
long[] taiSeconds = Arrays.copyOf(data.taiSeconds, data.taiSeconds.length + 1);
int offset = offsets[offsets.length - 2] + leapAdjustment;
dates[dates.length - 1] = mjDay;
offsets[offsets.length - 1] = offset;
taiSeconds[taiSeconds.length - 1] = tai(mjDay, offset);
Data newData = new Data(dates, offsets, taiSeconds);
if (dataRef.compareAndSet(data, newData) == false) {
throw new ConcurrentModificationException("Unable to update leap second rules as they have already been updated");
}
}
|
java
|
{
"resource": ""
}
|
q1497
|
SystemUtcRules.loadLeapSeconds
|
train
|
private static Data loadLeapSeconds() {
Data bestData = null;
URL url = null;
try {
// this is the new location of the file, working on Java 8, Java 9 class path and Java 9 module path
Enumeration<URL> en = Thread.currentThread().getContextClassLoader().getResources("META-INF/" + LEAP_SECONDS_TXT);
while (en.hasMoreElements()) {
url = en.nextElement();
Data candidate = loadLeapSeconds(url);
if (bestData == null || candidate.getNewestDate() > bestData.getNewestDate()) {
bestData = candidate;
}
}
// this location does not work on Java 9 module path because the resource is encapsulated
en = Thread.currentThread().getContextClassLoader().getResources(LEAP_SECONDS_TXT);
while (en.hasMoreElements()) {
url = en.nextElement();
Data candidate = loadLeapSeconds(url);
if (bestData == null || candidate.getNewestDate() > bestData.getNewestDate()) {
bestData = candidate;
}
}
// this location is the canonical one, and class-based loading works on Java 9 module path
url = SystemUtcRules.class.getResource("/" + LEAP_SECONDS_TXT);
if (url != null) {
Data candidate = loadLeapSeconds(url);
if (bestData == null || candidate.getNewestDate() > bestData.getNewestDate()) {
bestData = candidate;
}
}
} catch (Exception ex) {
throw new RuntimeException("Unable to load time-zone rule data: " + url, ex);
}
if (bestData == null) {
// no data on classpath, but we allow manual registration of leap seconds
// setup basic known data - MJD 1972-01-01 is 41317L, where offset was 10
bestData = new Data(new long[] {41317L}, new int[] {10}, new long[] {tai(41317L, 10)});
}
return bestData;
}
|
java
|
{
"resource": ""
}
|
q1498
|
SystemUtcRules.loadLeapSeconds
|
train
|
private static Data loadLeapSeconds(URL url) throws ClassNotFoundException, IOException {
List<String> lines;
try (BufferedReader reader = new BufferedReader(new InputStreamReader(url.openStream(), StandardCharsets.UTF_8))) {
lines = reader.lines().collect(Collectors.toList());
}
List<Long> dates = new ArrayList<>();
List<Integer> offsets = new ArrayList<>();
for (String line : lines) {
line = line.trim();
if (line.isEmpty() || line.startsWith("#")) {
continue;
}
Matcher matcher = LEAP_FILE_FORMAT.matcher(line);
if (matcher.matches() == false) {
throw new StreamCorruptedException("Invalid leap second file");
}
dates.add(LocalDate.parse(matcher.group(1)).getLong(JulianFields.MODIFIED_JULIAN_DAY));
offsets.add(Integer.valueOf(matcher.group(2)));
}
long[] datesData = new long[dates.size()];
int[] offsetsData = new int[dates.size()];
long[] taiData = new long[dates.size()];
for (int i = 0; i < datesData.length; i++) {
datesData[i] = dates.get(i);
offsetsData[i] = offsets.get(i);
taiData[i] = tai(datesData[i], offsetsData[i]);
}
return new Data(datesData, offsetsData, taiData);
}
|
java
|
{
"resource": ""
}
|
q1499
|
InternationalFixedDate.create
|
train
|
static InternationalFixedDate create(int prolepticYear, int month, int dayOfMonth) {
YEAR_RANGE.checkValidValue(prolepticYear, ChronoField.YEAR_OF_ERA);
MONTH_OF_YEAR_RANGE.checkValidValue(month, ChronoField.MONTH_OF_YEAR);
DAY_OF_MONTH_RANGE.checkValidValue(dayOfMonth, ChronoField.DAY_OF_MONTH);
if (dayOfMonth == DAYS_IN_LONG_MONTH && month != 6 && month != MONTHS_IN_YEAR) {
throw new DateTimeException("Invalid date: " + prolepticYear + '/' + month + '/' + dayOfMonth);
}
if (month == 6 && dayOfMonth == DAYS_IN_LONG_MONTH && !INSTANCE.isLeapYear(prolepticYear)) {
throw new DateTimeException("Invalid Leap Day as '" + prolepticYear + "' is not a leap year");
}
return new InternationalFixedDate(prolepticYear, month, dayOfMonth);
}
|
java
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.