Skip to content

Commit

Permalink
Merge branch 'develop' into test-removing-clean-up
Browse files Browse the repository at this point in the history
  • Loading branch information
jyang-broad authored Dec 8, 2023
2 parents 888039f + 012bc9a commit 3ca2c0b
Show file tree
Hide file tree
Showing 28 changed files with 515 additions and 174 deletions.
3 changes: 3 additions & 0 deletions .git-blame-ignore-revs
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,6 @@ b9cc35b06928aa1792fee88af0419d5087a7a800

# Scala Steward: Reformat with scalafmt 3.7.12
8448f2850081d53520aec809b0d678e0866a73c0

# Scala Steward: Reformat with scalafmt 3.7.17
a4933777d38af3b80fdf1d70bf1f5cac3b84ec9d
2 changes: 1 addition & 1 deletion .scalafmt.conf
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
version = 3.7.12
version = 3.7.17
align = none
align.openParenCallSite = true
align.openParenDefnSite = true
Expand Down
6 changes: 3 additions & 3 deletions automation/project/Dependencies.scala
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ object Dependencies {

val akkaV = "2.6.8"
val akkaHttpV = "10.2.0"
val jacksonV = "2.15.2"
val jacksonV = "2.15.3"

val workbenchLibsHash = "a562dff"
val serviceTestV = s"4.2-${workbenchLibsHash}"
Expand All @@ -28,7 +28,7 @@ object Dependencies {
val workbenchGoogle2: ModuleID = "org.broadinstitute.dsde.workbench" %% "workbench-google2" % workbenchGoogle2V exclude ("org.slf4j", "slf4j-api")
val workbenchServiceTest: ModuleID = "org.broadinstitute.dsde.workbench" %% "workbench-service-test" % serviceTestV % "test" classifier "tests" excludeAll(workbenchExclusions :+ rawlsModelExclusion:_*)

val workspaceManager: ModuleID = "bio.terra" % "workspace-manager-client" % "0.254.950-SNAPSHOT"
val workspaceManager: ModuleID = "bio.terra" % "workspace-manager-client" % "0.254.967-SNAPSHOT"
val dataRepo: ModuleID = "bio.terra" % "datarepo-client" % "1.41.0-SNAPSHOT"
val dataRepoJersey : ModuleID = "org.glassfish.jersey.inject" % "jersey-hk2" % "2.32" // scala-steward:off (must match TDR)

Expand All @@ -39,7 +39,7 @@ object Dependencies {
"com.fasterxml.jackson.core" % "jackson-databind" % jacksonV,
"com.fasterxml.jackson.core" % "jackson-core" % jacksonV,
"com.fasterxml.jackson.module" % ("jackson-module-scala_" + scalaV) % jacksonV,
"ch.qos.logback" % "logback-classic" % "1.2.3",
"ch.qos.logback" % "logback-classic" % "1.4.14",
"net.logstash.logback" % "logstash-logback-encoder" % "6.6",
"com.google.apis" % "google-api-services-oauth2" % "v1-rev112-1.22.0" excludeAll (
ExclusionRule("com.google.guava", "guava-jdk5"),
Expand Down
2 changes: 1 addition & 1 deletion automation/project/build.properties
Original file line number Diff line number Diff line change
@@ -1 +1 @@
sbt.version = 1.9.4
sbt.version = 1.9.7
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ trait PipelineInjector {
seq <- json.as[Seq[UserMetadata]]
} yield seq
userMetadataSeq match {
case Right(u) => u
case Left(_) => Seq()
case Right(u) => u
case Left(_) => Seq()
}
case _ => Seq()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ case class UserMetadata(email: String, `type`: UserType, bearer: String) {
* Companion object containing some useful methods for UserMetadata.
*/
object UserMetadata {
//implicit val userMetadataDecoder: Decoder[UserMetadata] = deriveDecoder[UserMetadata]
// implicit val userMetadataDecoder: Decoder[UserMetadata] = deriveDecoder[UserMetadata]
implicit val userMetadataDecoder: Decoder[UserMetadata] = (c: HCursor) =>
for {
email <- c.downField("email").as[String]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,4 +125,5 @@
<include file="changesets/20230810_track_mrb_sts_progress.xml" relativeToChangelogFile="true"/>
<include file="changesets/20230829_mrb_add_sts_project.xml" relativeToChangelogFile="true"/>
<include file="changesets/20231013_submission_monitor_script.xml" relativeToChangelogFile="true"/>
<include file="changesets/20231130_limit_clone_workspace_file_transfer.xml" relativeToChangelogFile="true"/>
</databaseChangeLog>
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<databaseChangeLog logicalFilePath="dummy" xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
xmlns:ext="http://www.liquibase.org/xml/ns/dbchangelog-ext"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog-ext http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-ext.xsd http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.4.xsd">
<changeSet logicalFilePath="dummy" author="mtalbott" id="add_timestamps_outcome_CLONE_WORKSPACE_FILE_TRANSFER">
<addColumn tableName="CLONE_WORKSPACE_FILE_TRANSFER">
<column name="CREATED" type="DATETIME" defaultValueComputed="CURRENT_TIMESTAMP">
<constraints nullable="false" />
</column>
</addColumn>
<addColumn tableName="CLONE_WORKSPACE_FILE_TRANSFER">
<column name="FINISHED" type="DATETIME">
<constraints nullable="true" />
</column>
</addColumn>
<addColumn tableName="CLONE_WORKSPACE_FILE_TRANSFER">
<column name="OUTCOME" type="VARCHAR(254)">
<constraints nullable="true" />
</column>
</addColumn>
</changeSet>
</databaseChangeLog>
23 changes: 23 additions & 0 deletions core/src/main/resources/swagger/api-docs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5851,6 +5851,13 @@ components:
description: timestamp (UTC) marking the date that the bucket usage was last updated (YYYY-MM-DDThh:mm:ss.fffZ)
description: ""
PendingCloneWorkspaceFileTransfer:
required:
- destWorkspaceId
- sourceWorkspaceBucketName
- destWorkspaceBucketName
- copyFilesWithPrefix
- destWorkspaceGoogleProjectId
- created
type: object
properties:
destWorkspaceId:
Expand All @@ -5868,6 +5875,22 @@ components:
destWorkspaceGoogleProjectId:
type: string
description: "The Google project that the destination workspace belongs to"
created:
type: string
description: "The time the file transfer started in yyyy-MM-ddTHH:mm:ss.SSSZZ
format."
format: date-time
finished:
type: string
description: "The time the file transfer finished in yyyy-MM-ddTHH:mm:ss.SSSZZ
format."
format: date-time
outcome:
type: string
description: "The outcome of a finished file transfer."
enum:
- Success
- Failure
Attribute:
type: object
properties:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,8 @@ class BillingProfileManagerDAOImpl(
def getBillingProfile(billingProfileId: UUID, ctx: RawlsRequestContext): Option[ProfileModel] =
Try(Option(apiClientProvider.getProfileApi(ctx).getProfile(billingProfileId))) match {
case Success(value) => value
case Failure(e: ApiException) if e.getCode == StatusCodes.NotFound.intValue =>
case Failure(e: ApiException)
if e.getCode == StatusCodes.NotFound.intValue || e.getCode == StatusCodes.Forbidden.intValue =>
None
case Failure(e) => throw new BpmException(billingProfileId, e);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
package org.broadinstitute.dsde.rawls.dataaccess.slick

import org.broadinstitute.dsde.rawls.model.{GoogleProjectId, PendingCloneWorkspaceFileTransfer}
import org.joda.time.DateTime

import java.sql.Timestamp
import java.util.UUID

case class CloneWorkspaceFileTransferRecord(id: Long,
destWorkspaceId: UUID,
sourceWorkspaceId: UUID,
copyFilesWithPrefix: String
copyFilesWithPrefix: String,
created: Timestamp,
finished: Option[Timestamp],
outcome: Option[String]
)

trait CloneWorkspaceFileTransferComponent {
Expand All @@ -21,11 +26,17 @@ trait CloneWorkspaceFileTransferComponent {
def destWorkspaceId = column[UUID]("DEST_WORKSPACE_ID")
def sourceWorkspaceId = column[UUID]("SOURCE_WORKSPACE_ID")
def copyFilesWithPrefix = column[String]("COPY_FILES_WITH_PREFIX", O.Length(254))
def created = column[Timestamp]("CREATED")
def finished = column[Option[Timestamp]]("FINISHED")
def outcome = column[Option[String]]("OUTCOME")

def * = (id,
destWorkspaceId,
sourceWorkspaceId,
copyFilesWithPrefix
copyFilesWithPrefix,
created,
finished,
outcome
) <> (CloneWorkspaceFileTransferRecord.tupled, CloneWorkspaceFileTransferRecord.unapply)
}

Expand All @@ -43,16 +54,22 @@ trait CloneWorkspaceFileTransferComponent {

def listPendingTransfers(workspaceId: Option[UUID] = None): ReadAction[Seq[PendingCloneWorkspaceFileTransfer]] = {
val query = for {
fileTransfer <- cloneWorkspaceFileTransferQuery.filterOpt(workspaceId) { case (table, workspaceId) =>
table.destWorkspaceId === workspaceId
}
fileTransfer <-
cloneWorkspaceFileTransferQuery
.filter(_.finished.isEmpty)
.filterOpt(workspaceId) { case (table, workspaceId) =>
table.destWorkspaceId === workspaceId
}
sourceWorkspace <- workspaceQuery if sourceWorkspace.id === fileTransfer.sourceWorkspaceId
destWorkspace <- workspaceQuery if destWorkspace.id === fileTransfer.destWorkspaceId
} yield (destWorkspace.id,
sourceWorkspace.bucketName,
destWorkspace.bucketName,
fileTransfer.copyFilesWithPrefix,
destWorkspace.googleProjectId
destWorkspace.googleProjectId,
fileTransfer.created,
fileTransfer.finished,
fileTransfer.outcome
)

query.result.map(results =>
Expand All @@ -61,18 +78,34 @@ trait CloneWorkspaceFileTransferComponent {
sourceWorkspaceBucketName,
destWorkspaceBucketName,
copyFilesWithPrefix,
destWorkspaceGoogleProjectId
destWorkspaceGoogleProjectId,
created,
finished,
outcome
) =>
PendingCloneWorkspaceFileTransfer(destWorkspaceId,
sourceWorkspaceBucketName,
destWorkspaceBucketName,
copyFilesWithPrefix,
GoogleProjectId(destWorkspaceGoogleProjectId)
PendingCloneWorkspaceFileTransfer(
destWorkspaceId,
sourceWorkspaceBucketName,
destWorkspaceBucketName,
copyFilesWithPrefix,
GoogleProjectId(destWorkspaceGoogleProjectId),
new DateTime(created),
finished.map(new DateTime(_)),
outcome
)
}
)
}

def update(pendingCloneWorkspaceFileTransfer: PendingCloneWorkspaceFileTransfer): ReadWriteAction[Int] =
findByDestWorkspaceId(pendingCloneWorkspaceFileTransfer.destWorkspaceId)
.map(ft => (ft.finished, ft.outcome))
.update(
(pendingCloneWorkspaceFileTransfer.finished.map(f => new Timestamp(f.getMillis)),
pendingCloneWorkspaceFileTransfer.outcome
)
)

def delete(destWorkspaceId: UUID): ReadWriteAction[Int] =
findByDestWorkspaceId(destWorkspaceId).delete

Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
package org.broadinstitute.dsde.rawls.dataaccess.slick

import org.broadinstitute.dsde.rawls.entities.local.LocalEntityExpressionQueries
import org.broadinstitute.dsde.rawls.monitor.migration.{MultiregionalBucketMigrationHistory}
import org.broadinstitute.dsde.rawls.monitor.migration.MultiregionalBucketMigrationHistory
import slick.jdbc.JdbcProfile

import javax.naming.NameNotFoundException
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import org.broadinstitute.dsde.rawls.model.{
}
import org.broadinstitute.dsde.rawls.util.TracingUtils._
import org.broadinstitute.dsde.rawls.util.{AttributeSupport, CollectionUtils, EntitySupport}
import slick.jdbc.TransactionIsolation

import scala.concurrent.{ExecutionContext, Future}
import scala.language.postfixOps
Expand Down Expand Up @@ -75,62 +76,69 @@ class LocalEntityProvider(requestArguments: EntityRequestArguments,
s.putAttribute("cacheEnabled", OpenCensusAttributeValue.booleanAttributeValue(cacheEnabled))
}
// start transaction
dataSource.inTransaction { dataAccess =>
if (!useCache || !cacheEnabled) {
if (!cacheEnabled) {
logger.info(
s"entity statistics cache: miss (cache disabled at system level) [${workspaceContext.workspaceIdAsUUID}]"
)
} else if (!useCache) {
logger.info(
s"entity statistics cache: miss (user request specified cache bypass) [${workspaceContext.workspaceIdAsUUID}]"
)
}
// retrieve metadata, bypassing cache
calculateMetadataResponse(dataAccess, countsFromCache = false, attributesFromCache = false, localContext)
} else {
// system and request both have cache enabled. Check for existence and staleness of cache
cacheStaleness(dataAccess, localContext).flatMap {
case None =>
// cache does not exist - return uncached
dataSource.inTransaction(
dataAccess =>
if (!useCache || !cacheEnabled) {
if (!cacheEnabled) {
logger.info(
s"entity statistics cache: miss (cache disabled at system level) [${workspaceContext.workspaceIdAsUUID}]"
)
} else if (!useCache) {
logger.info(
s"entity statistics cache: miss (cache does not exist) [${workspaceContext.workspaceIdAsUUID}]"
s"entity statistics cache: miss (user request specified cache bypass) [${workspaceContext.workspaceIdAsUUID}]"
)
calculateMetadataResponse(dataAccess, countsFromCache = false, attributesFromCache = false, localContext)
case Some(0) =>
// cache is up to date - return cached
logger.info(s"entity statistics cache: hit [${workspaceContext.workspaceIdAsUUID}]")
calculateMetadataResponse(dataAccess, countsFromCache = true, attributesFromCache = true, localContext)
case Some(stalenessSeconds) =>
// cache exists, but is out of date - check if this workspace has any always-cache feature flags set
cacheFeatureFlags(dataAccess, localContext).flatMap { flags =>
if (flags.alwaysCacheTypeCounts || flags.alwaysCacheAttributes) {
localContext.tracingSpan.foreach { s =>
s.putAttribute("alwaysCacheTypeCountsFeatureFlag",
OpenCensusAttributeValue.booleanAttributeValue(flags.alwaysCacheTypeCounts)
}
// retrieve metadata, bypassing cache
calculateMetadataResponse(dataAccess, countsFromCache = false, attributesFromCache = false, localContext)
} else {
// system and request both have cache enabled. Check for existence and staleness of cache
cacheStaleness(dataAccess, localContext).flatMap {
case None =>
// cache does not exist - return uncached
logger.info(
s"entity statistics cache: miss (cache does not exist) [${workspaceContext.workspaceIdAsUUID}]"
)
calculateMetadataResponse(dataAccess,
countsFromCache = false,
attributesFromCache = false,
localContext
)
case Some(0) =>
// cache is up to date - return cached
logger.info(s"entity statistics cache: hit [${workspaceContext.workspaceIdAsUUID}]")
calculateMetadataResponse(dataAccess, countsFromCache = true, attributesFromCache = true, localContext)
case Some(stalenessSeconds) =>
// cache exists, but is out of date - check if this workspace has any always-cache feature flags set
cacheFeatureFlags(dataAccess, localContext).flatMap { flags =>
if (flags.alwaysCacheTypeCounts || flags.alwaysCacheAttributes) {
localContext.tracingSpan.foreach { s =>
s.putAttribute("alwaysCacheTypeCountsFeatureFlag",
OpenCensusAttributeValue.booleanAttributeValue(flags.alwaysCacheTypeCounts)
)
s.putAttribute("alwaysCacheAttributesFeatureFlag",
OpenCensusAttributeValue.booleanAttributeValue(flags.alwaysCacheAttributes)
)
}
logger.info(
s"entity statistics cache: partial hit (alwaysCacheTypeCounts=${flags.alwaysCacheTypeCounts}, alwaysCacheAttributes=${flags.alwaysCacheAttributes}, staleness=$stalenessSeconds) [${workspaceContext.workspaceIdAsUUID}]"
)
s.putAttribute("alwaysCacheAttributesFeatureFlag",
OpenCensusAttributeValue.booleanAttributeValue(flags.alwaysCacheAttributes)
} else {
logger.info(
s"entity statistics cache: miss (cache is out of date, staleness=$stalenessSeconds) [${workspaceContext.workspaceIdAsUUID}]"
)
// and opportunistically save
}
logger.info(
s"entity statistics cache: partial hit (alwaysCacheTypeCounts=${flags.alwaysCacheTypeCounts}, alwaysCacheAttributes=${flags.alwaysCacheAttributes}, staleness=$stalenessSeconds) [${workspaceContext.workspaceIdAsUUID}]"
)
} else {
logger.info(
s"entity statistics cache: miss (cache is out of date, staleness=$stalenessSeconds) [${workspaceContext.workspaceIdAsUUID}]"
calculateMetadataResponse(dataAccess,
countsFromCache = flags.alwaysCacheTypeCounts,
attributesFromCache = flags.alwaysCacheAttributes,
localContext
)
// and opportunistically save
}
calculateMetadataResponse(dataAccess,
countsFromCache = flags.alwaysCacheTypeCounts,
attributesFromCache = flags.alwaysCacheAttributes,
localContext
)
} // end feature-flags lookup
} // end staleness lookup
} // end if useCache/cacheEnabled check
} // end transaction
} // end feature-flags lookup
} // end staleness lookup
} // end if useCache/cacheEnabled check
,
TransactionIsolation.ReadCommitted
) // end transaction
} // end root trace

override def createEntity(entity: Entity): Future[Entity] =
Expand Down
Loading

0 comments on commit 3ca2c0b

Please sign in to comment.