Skip to content

Commit

Permalink
fix typos in test java files (apache#4380)
Browse files Browse the repository at this point in the history
Signed-off-by: ZhangJian He <[email protected]>
  • Loading branch information
shoothzj authored and Anup Ghatage committed Jul 12, 2024
1 parent 2b3247d commit 3229970
Show file tree
Hide file tree
Showing 69 changed files with 157 additions and 157 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ public void destroyResourceWhenRefCountReachesZero() {
assertEquals(SharedResourceManager.DESTROY_DELAY_SECONDS,
scheduledDestroyTask.getDelay(TimeUnit.SECONDS));

// Simluate that the destroyer executes the foo destroying task
// Simulate that the destroyer executes the foo destroying task
scheduledDestroyTask.runTask();
assertTrue(sharedFoo.closed);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,15 @@
* Ledger index file is made of a header and several fixed-length index pages, which records the offsets of data stored
* in entry loggers
* <pre>&lt;header&gt;&lt;index pages&gt;</pre>
* <b>Header</b> is formated as below:
* <b>Header</b> is formatted as below:
* <pre>&lt;magic bytes&gt;&lt;len of master key&gt;&lt;master key&gt;</pre>
* <ul>
* <li>magic bytes: 4 bytes, 'BKLE', version: 4 bytes
* <li>len of master key: indicates length of master key. -1 means no master key stored in header.
* <li>master key: master key
* <li>state: bit map to indicate the state, 32 bits.
* </ul>
* <b>Index page</b> is a fixed-length page, which contains serveral entries which point to the offsets of data stored
* <b>Index page</b> is a fixed-length page, which contains several entries which point to the offsets of data stored
* in entry loggers.
* </p>
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ CachedFileInfo loadFileInfo(long ledgerId, byte[] masterKey) throws IOException
// and if it is called (and succeeds) the fi will have been
// removed from fileInfos at the same time, so we should not
// have been able to get a reference to it here.
// The caller of loadFileInfo owns the refence, and is
// The caller of loadFileInfo owns the reference, and is
// responsible for calling the corresponding #release().
return tryRetainFileInfo(fi);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -661,7 +661,7 @@ long getPersistEntryBeyondInMem(long ledgerId, long lastEntryInMem) throws IOExc
fi = getFileInfo(ledgerId, null);
long size = fi.size();
// make sure the file size is aligned with index entry size
// otherwise we may read incorret data
// otherwise we may read incorrect data
if (0 != size % LedgerEntryPage.getIndexEntrySize()) {
LOG.warn("Index file of ledger {} is not aligned with index entry size.", ledgerId);
size = size - size % LedgerEntryPage.getIndexEntrySize();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ public interface NetworkTopology {
void add(Node node);

/**
* Remove a node from nework topology.
* Remove a node from network topology.
*
* @param node
* remove the node from network topology
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ private String getNextAncestorName(Node n) {
boolean add(Node n) {
if (!isAncestor(n)) {
throw new IllegalArgumentException(n.getName() + ", which is located at " + n.getNetworkLocation()
+ ", is not a decendent of " + getPath(this));
+ ", is not a descendent of " + getPath(this));
}
if (isParent(n)) {
// this node is the parent of n; add n directly
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
import org.apache.bookkeeper.tools.framework.CliSpec;

/**
* Intializes new cluster by creating required znodes for the cluster. If
* Initializes new cluster by creating required znodes for the cluster. If
* ledgersrootpath is already existing then it will error out. If for any
* reason it errors out while creating znodes for the cluster, then before
* running initnewcluster again, try nuking existing cluster by running
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ public void testMultiMessageAuthFailure() throws Exception {
fail("Shouldn't get this far");
} catch (BKException.BKUnauthorizedAccessException bke) {
// bookie should have sent a negative response before
// breaking the conneciton
// breaking the connection
}
assertFalse(ledgerId.get() == -1);
assertEquals("Shouldn't have entry", 0, entryCount(ledgerId.get(), bookieConf, clientConf));
Expand All @@ -300,7 +300,7 @@ public void testDifferentPluginFailure() throws Exception {
fail("Shouldn't get this far");
} catch (BKException.BKUnauthorizedAccessException bke) {
// bookie should have sent a negative response before
// breaking the conneciton
// breaking the connection
assertEquals(ProtocolVersion.ProtocolV3, protocolVersion);
} catch (BKException.BKNotEnoughBookiesException nebe) {
// With V2 we don't get the authorization error, but rather just
Expand All @@ -316,7 +316,7 @@ public void testDifferentPluginFailure() throws Exception {
* doesn't implement the interface, we fail predictably.
*/
@Test
public void testExistantButNotValidPlugin() throws Exception {
public void testExistentButNotValidPlugin() throws Exception {
ServerConfiguration bookieConf = newServerConfiguration();
bookieConf.setBookieAuthProviderFactoryClass(
"java.lang.String");
Expand Down Expand Up @@ -351,14 +351,14 @@ public void testExistantButNotValidPlugin() throws Exception {
* break.
*/
@Test
public void testNonExistantPlugin() throws Exception {
public void testNonExistentPlugin() throws Exception {
ServerConfiguration bookieConf = newServerConfiguration();
bookieConf.setBookieAuthProviderFactoryClass(
"NonExistantClassNameForTestingAuthPlugins");
"NonExistentClassNameForTestingAuthPlugins");

ClientConfiguration clientConf = newClientConfiguration();
clientConf.setClientAuthProviderFactoryClass(
"NonExistantClassNameForTestingAuthPlugins");
"NonExistentClassNameForTestingAuthPlugins");
try {
startAndStoreBookie(bookieConf);
fail("Shouldn't get this far");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ public void testBookieRegistrationWithSameZooKeeperClient() throws Exception {
"Bookie registration node doesn't exists!",
rm.isBookieRegistered(bookieId));

// test register bookie again if the registeration node is created by itself.
// test register bookie again if the registration node is created by itself.
manager.registerBookie(true).get();
assertTrue(
"Bookie registration node doesn't exists!",
Expand Down Expand Up @@ -742,11 +742,11 @@ bkConf, new TestBookieImpl(conf),
* OutOfMemoryError.
*/
public static class MockInterleavedLedgerStorage extends InterleavedLedgerStorage {
AtomicInteger atmoicInt = new AtomicInteger(0);
AtomicInteger atomicInt = new AtomicInteger(0);

@Override
public long addEntry(ByteBuf entry) throws IOException {
if (atmoicInt.incrementAndGet() == 10) {
if (atomicInt.incrementAndGet() == 10) {
throw new OutOfMemoryError("Some Injected Exception");
}
return super.addEntry(entry);
Expand Down Expand Up @@ -1676,7 +1676,7 @@ private void bookieConnectAfterCookieDeleteWorker(ServerConfiguration conf, Regi

final BookieId bookieAddress = BookieImpl.getBookieId(conf);

// Read cookie from registation manager
// Read cookie from registration manager
Versioned<Cookie> rmCookie = Cookie.readFromRegistrationManager(rm, bookieAddress);

// Shutdown bookie
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -862,7 +862,7 @@ public List<File> getWritableLedgerDirsForNewLog() throws NoWritableLedgerDirExc
/*
* In a new thread, create newlog for 'firstLedgerId' and then set
* 'newLogCreated' to true. Since this is the first createNewLog call,
* it is going to be blocked untill latch is countdowned to 0.
* it is going to be blocked until latch is countdowned to 0.
*/
new Thread() {
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1382,7 +1382,7 @@ public void testExpiryRemovalByAccessingNonCacheRelatedMethods() throws Exceptio
BufferedLogChannel newLogChannel = createDummyBufferedLogChannel(entryLogger, 1, conf);
entryLogManager.setCurrentLogForLedgerAndAddToRotate(ledgerId, newLogChannel);

AtomicBoolean exceptionOccured = new AtomicBoolean(false);
AtomicBoolean exceptionOccurred = new AtomicBoolean(false);
Thread t = new Thread() {
public void run() {
try {
Expand All @@ -1402,15 +1402,15 @@ public void run() {
entryLogManager.getCurrentLogIfPresent(newLedgerId);
} catch (Exception e) {
LOG.error("Got Exception in thread", e);
exceptionOccured.set(true);
exceptionOccurred.set(true);
}
}
};

t.start();
Thread.sleep(evictionPeriod * 1000 + 100);
entryLogManager.doEntryLogMapCleanup();
Assert.assertFalse("Exception occured in thread, which is not expected", exceptionOccured.get());
Assert.assertFalse("Exception occurred in thread, which is not expected", exceptionOccurred.get());

/*
* since for more than evictionPeriod, that ledger is not accessed and cache is cleaned up, mapping for that
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ public void testGetFileInfoReadBeforeWrite() throws Exception {
indexPersistenceMgr.getFileInfo(lid, null);
fail("Should fail get file info for reading if the file doesn't exist");
} catch (Bookie.NoLedgerException nle) {
// exepcted
// expected
}
assertEquals(0, indexPersistenceMgr.writeFileInfoCache.size());
assertEquals(0, indexPersistenceMgr.readFileInfoCache.size());
Expand Down Expand Up @@ -468,7 +468,7 @@ void validateFileInfo(IndexPersistenceMgr indexPersistenceMgr, long ledgerId, in
assertEquals("explicitLac ByteBuf contents should match", 0,
ByteBufUtil.compare(explicitLacByteBuf, indexPersistenceMgr.getExplicitLac(ledgerId)));
/*
* release fileInfo untill it is marked dead and closed, so that
* release fileInfo until it is marked dead and closed, so that
* contents of it are persisted.
*/
while (fileInfo.refCount.get() != FileInfoBackingCache.DEAD_REF) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ public void testPageEviction() throws Exception {
// create ledger cache
newLedgerCache();
try {
// create serveral ledgers
// create several ledgers
for (int i = 1; i <= numLedgers; i++) {
ledgerCache.setMasterKey((long) i, masterKey);
ledgerCache.putEntryOffset(i, 0, i * 8);
Expand All @@ -237,7 +237,7 @@ public void testPageEviction() throws Exception {
// flush all
ledgerCache.flushLedger(true);

// delete serveral ledgers
// delete several ledgers
for (int i = 1; i <= numLedgers / 2; i++) {
ledgerCache.deleteLedger(i);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ conf, new TestBookieImpl(conf),
LogMark curMarkAfterFirstSetOfAdds = lastLogMarkAfterFirstSetOfAdds.getCurMark();

File lastMarkFile = new File(ledgerDir, "lastMark");
// lastMark file should be zero, because checkpoint hasn't happenend
// lastMark file should be zero, because checkpoint hasn't happened
LogMark logMarkFileBeforeCheckpoint = readLastMarkFile(lastMarkFile);
Assert.assertEquals("lastMarkFile before checkpoint should be zero", 0,
logMarkFileBeforeCheckpoint.compare(new LogMark()));
Expand All @@ -285,7 +285,7 @@ conf, new TestBookieImpl(conf),
LogMark curMarkAfterCheckpoint = lastLogMarkAfterCheckpoint.getCurMark();

LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happenend", 0,
Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happened", 0,
rolledLogMark.compare(new LogMark()));
/*
* Curmark should be equal before and after checkpoint, because we didnt
Expand Down Expand Up @@ -561,7 +561,7 @@ conf, new TestBookieImpl(conf),
executorController.advance(Duration.ofMillis(conf.getFlushInterval()));

/*
* since checkpoint happenend, there shouldn't be any logChannelsToFlush
* since checkpoint happened, there shouldn't be any logChannelsToFlush
* and bytesWrittenSinceLastFlush should be zero.
*/
List<DefaultEntryLogger.BufferedLogChannel> copyOfRotatedLogChannels = entryLogManager.getRotatedLogChannels();
Expand Down Expand Up @@ -676,7 +676,7 @@ conf, new TestBookieImpl(conf),
Assert.assertTrue("lastMark file must be existing, because checkpoint should have happened",
lastMarkFile.exists());
LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happenend", 0,
Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happened", 0,
rolledLogMark.compare(new LogMark()));

bkClient.close();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ static File initV2LedgerDirectoryWithIndexDir(File ledgerDir, File indexDir) thr
return ledgerDir;
}

private static void testUpgradeProceedure(String zkServers, String journalDir, String ledgerDir, String indexDir)
private static void testUpgradeProcedure(String zkServers, String journalDir, String ledgerDir, String indexDir)
throws Exception {
ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
conf.setMetadataServiceUri("zk://" + zkServers + "/ledgers");
Expand Down Expand Up @@ -250,7 +250,7 @@ private static void testUpgradeProceedure(String zkServers, String journalDir, S
public void testUpgradeV1toCurrent() throws Exception {
File journalDir = initV1JournalDirectory(tmpDirs.createNew("bookie", "journal"));
File ledgerDir = initV1LedgerDirectory(tmpDirs.createNew("bookie", "ledger"));
testUpgradeProceedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
testUpgradeProcedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
ledgerDir.getPath(), ledgerDir.getPath());
}

Expand All @@ -260,7 +260,7 @@ public void testUpgradeV1toCurrentWithIndexDir() throws Exception {
File indexDir = tmpDirs.createNew("bookie", "index");
File ledgerDir = initV1LedgerDirectoryWithIndexDir(
tmpDirs.createNew("bookie", "ledger"), indexDir);
testUpgradeProceedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
testUpgradeProcedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
ledgerDir.getPath(), indexDir.getPath());
}

Expand All @@ -269,7 +269,7 @@ public void testUpgradeV2toCurrent() throws Exception {
File journalDir = initV2JournalDirectory(tmpDirs.createNew("bookie", "journal"));
File ledgerDir = initV2LedgerDirectory(tmpDirs.createNew("bookie", "ledger"));
File indexDir = tmpDirs.createNew("bookie", "index");
testUpgradeProceedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
testUpgradeProcedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
ledgerDir.getPath(), indexDir.getPath());
}

Expand All @@ -279,7 +279,7 @@ public void testUpgradeV2toCurrentWithIndexDir() throws Exception {
File indexDir = tmpDirs.createNew("bookie", "index");
File ledgerDir = initV2LedgerDirectoryWithIndexDir(
tmpDirs.createNew("bookie", "ledger"), indexDir);
testUpgradeProceedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
testUpgradeProcedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
ledgerDir.getPath(), indexDir.getPath());
}

Expand All @@ -304,7 +304,7 @@ public void testUpgradeCurrent(boolean hasIndexDir) throws Exception {
initV2LedgerDirectory(ledgerDir);
}

testUpgradeProceedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
testUpgradeProcedure(zkUtil.getZooKeeperConnectString(), journalDir.getPath(),
ledgerDir.getPath(), indexDir.getPath());

// Upgrade again
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,8 @@ public void testReadLedgerIndexEntries() throws Exception {
try {
for (ledgerId = TEST_LEDGER_MIN_ID; ledgerId <= TEST_LEDGER_MAX_ID; ledgerId++) {
BlockingQueue<Long> entrys = new ArrayBlockingQueue<>(TEST_ENTRY_MAX_ID + 1);
DbLedgerStorage.readLedgerIndexEntries(ledgerId, conf, (eId, entryLodId, pos) -> {
System.out.println("entry " + eId + "\t:\t(log: " + entryLodId + ", pos: " + pos + ")");
DbLedgerStorage.readLedgerIndexEntries(ledgerId, conf, (eId, entryLogId, pos) -> {
System.out.println("entry " + eId + "\t:\t(log: " + entryLogId + ", pos: " + pos + ")");
entrys.add(eId);
});
for (long entryId = TEST_ENTRY_MIN_ID; entryId <= TEST_ENTRY_MAX_ID; entryId++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ public void createComplete(int rc, LedgerHandle lh, Object ctx) {
// wait for creating the ledger
assertTrue("create ledger call should have completed",
openLatch.await(20, TimeUnit.SECONDS));
assertEquals("Succesfully created ledger through closed bkclient!",
assertEquals("Successfully created ledger through closed bkclient!",
BKException.Code.ClientClosedException, returnCode.get());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ public void testAsyncReadWithError() throws Exception {
final AtomicInteger result = new AtomicInteger(0);
final CountDownLatch counter = new CountDownLatch(1);

// Try to write, we shoud get and error callback but not an exception
// Try to write, we should get and error callback but not an exception
lh.asyncAddEntry("test".getBytes(), new AddCallback() {
public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
result.set(rc);
Expand Down Expand Up @@ -398,7 +398,7 @@ public void testReadAfterLastAddConfirmed() throws Exception {
// without readUnconfirmedEntries we are not able to read all of the entries
try {
rlh.readEntries(0, numOfEntries - 1);
fail("shoud not be able to read up to " + (numOfEntries - 1) + " with readEntries");
fail("should not be able to read up to " + (numOfEntries - 1) + " with readEntries");
} catch (BKException.BKReadException expected) {
}

Expand Down Expand Up @@ -480,7 +480,7 @@ public void testReadAfterLastAddConfirmed() throws Exception {
// without readUnconfirmedEntries we are not able to read all of the entries
try {
rlh.readEntries(0, numOfEntries - 1);
fail("shoud not be able to read up to " + (numOfEntries - 1) + " with readEntries");
fail("should not be able to read up to " + (numOfEntries - 1) + " with readEntries");
} catch (BKException.BKReadException expected) {
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ public void testLedgerCreateAdvAndWriteNonAdv() throws Exception {
}

/**
* Verify that LedgerHandleAdv cannnot handle addEntry without the entryId.
* Verify that LedgerHandleAdv cannot handle addEntry without the entryId.
*
* @throws Exception
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ public void testForceRequiresFullEnsemble() throws Exception {
}

@Test
public void testForceWillAdvanceLacOnlyUpToLastAcknoledgedWrite() throws Exception {
public void testForceWillAdvanceLacOnlyUpToLastAcknowledgedWrite() throws Exception {
try (WriteHandle wh = result(newCreateLedgerOp()
.withEnsembleSize(3)
.withWriteQuorumSize(3)
Expand Down
Loading

0 comments on commit 3229970

Please sign in to comment.