diff --git a/packages/dev/quickstart-adoc.js b/packages/dev/quickstart-adoc.js deleted file mode 100644 index 8a798898..00000000 --- a/packages/dev/quickstart-adoc.js +++ /dev/null @@ -1,283 +0,0 @@ -/* eslint-disable */ - -const yaml = require('yaml'); -const path = require('path'); -const jp = require('jsonpath'); -const fs = require('fs-extra'); -const { JSDOM } = require('jsdom'); -const fetch = require('sync-fetch'); -const asciidoctor = require('asciidoctor')(); -const Ajv = require('ajv').default; - -const { addReactConverter } = require('@patternfly/transform-adoc'); -addReactConverter(asciidoctor); - -const pantheonBaseUrl = process.env.PANTHEON_URL || 'https://pantheon.corp.redhat.com/api'; -const attributesFile = process.env.ATTRIBUTES_FILE || 'quickstart-attributes.yml'; - -const buildQuickStart = (content, filePath, basePath, asciidocOptions) => { - const validateJSON = (instance, schemaPath) => { - const ajv = new Ajv(); - const rawSchema = fs.readFileSync(schemaPath, 'utf8').toString(); - const schema = JSON.parse(rawSchema); - const validate = ajv.compile(schema); - const valid = validate(instance); - if (!valid) { - throw new Error(`${filePath} ${validate.errors.map((error) => error.message).toString()}`); - } - }; - - const snippetCache = {}; - - const pantheonMappingsPath = path.join(basePath, 'pantheon.yml'); - - let pantheonMappings; - - if (fs.existsSync(pantheonMappingsPath)) { - // load the pantheon mappings - pantheonMappings = yaml.parse(fs.readFileSync(pantheonMappingsPath, 'utf8').toString()); - // validate it - validateJSON(pantheonMappings, path.join('../', 'pantheon.schema.json')); - } - - let attributes; - - if (fs.existsSync(attributesFile)) { - const extname = path.extname(attributesFile); - // Support for attributes declared as an asciidoc file, like pantheon does it - console.log(extname); - if (extname === '.asciidoc' || extname === '.adoc') { - const attributesDoc = asciidoctor.loadFile(attributesFile, { - attributes: { - qs: 'true', - }, - }); - attributes = attributesDoc.getAttributes(); - } else if (extname === '.yml' || extname === '.yaml') { - attributes = yaml.parse(fs.readFileSync(attributesFile, 'utf-8').toString()) || {}; - } else { - throw new Error( - `${attributesFile} type is unsupported, must be .yml, .yaml, .adoc or .asciidoc`, - ); - } - } else { - attributes = {}; - } - // Inject auto-set attributes - attributes.qs = 'true'; - - if (!asciidocOptions) { - asciidocOptions = {}; - } - if (asciidocOptions.attributes) { - // Already set, merge them, using the ones passed in as higher precedent - Object.assign(attributes, asciidocOptions.attributes); - } - asciidocOptions.attributes = attributes; - - const loadSnippet = ( - ref, - tag, - type, - asciiDocCallback, - defaultPathExpression, - defaultCssSelector, - ) => { - if (pantheonMappings) { - // Load from pantheon, if mapped - const answer = loadFromPantheon(ref, tag, defaultPathExpression, defaultCssSelector); - if (answer) { - return answer; - } - // otherwise continue to load from asciidoc - } - if (!snippetCache[ref]) { - const parts = ref.split('#'); - if (parts.length !== 2) { - throw Error(`malformed ${tag} ${ref}, must be like !${tag} README.adoc#task-1`); - } - const fileName = parts[0]; - const filePath = path.normalize(path.join(basePath, fileName)); - const adoc = asciidoctor.loadFile(filePath, asciidocOptions); - // create an array with all the blocks in the doc in it - const context = adoc.getAttribute('context', '{context}'); - - const blocks = flattenBlocks(adoc); - - blocks - // only blocks with an id can be used - .filter((block) => block.getId()) - // If we are looking for a particular moduleType, we can filter for it - .filter((block) => (type ? getModuleType(block) === type : true)) - .forEach((block) => { - // create versions with, and without, the context - const id = block.getId(); - const contextLessId = block.getId().replace(`_${context}`, ''); - snippetCache[`${fileName}#${id}`] = block; - if (!snippetCache[`${fileName}#${contextLessId}`]) { - snippetCache[`${fileName}#${contextLessId}`] = block; - } - }); - } - if (!snippetCache[ref]) { - throw new Error(`${filePath} unable to locate snippet for ${tag} ${ref}`); - } - // Apply the callback, if passed - if (asciiDocCallback) { - return asciiDocCallback(snippetCache[ref]); - } - return snippetCache[ref]; - }; - - const loadFromPantheon = (ref, tag, defaultPathExpression, defaultCssSelector) => { - const mapping = pantheonMappings[`${tag} ${ref}`]; - if (!mapping) { - return undefined; - } - let uuid, type, cssSelector, pathExpression; - - if (typeof mapping === 'object') { - uuid = mapping.uuid; - type = mapping.type; - cssSelector = mapping.cssSelector || defaultCssSelector; - pathExpression = mapping.jsonPathExpression || defaultPathExpression; - } else if (typeof mapping === 'string') { - if (mapping.startsWith('https')) { - const parts = mapping.match(/https:\/\/.*\/api\/(\w*)\/.*\/([a-z0-9-]*)/); - if (parts.length !== 3) { - throw new Error(`Unable to parse ${mapping} as pantheon URL`); - } - type = parts[1]; - uuid = parts[2]; - cssSelector = defaultCssSelector; - pathExpression = defaultPathExpression; - } - } else { - throw new Error( - '${tag} ${ref} mapping to pantheon API is unsupported, should either be a URL or have keys for uuid and type', - ); - } - - if (!uuid) { - throw new Error(`uuid not set in ${pantheonMappingsPath}`); - } - if (!type) { - throw new Error(`type not set in ${pantheonMappingsPath}`); - } - const data = loadFromPantheonApi(uuid, type, pathExpression); - const result = jp.nodes(data, pathExpression); - return result - .map((node) => { - const path = node.path; - if (cssSelector && path && path[path.length - 1] === 'body') { - const dom = new JSDOM(node.value); - return dom.window.document.querySelector(cssSelector); - } - return node.value; - }) - .reduce((previousValue, currentValue) => `${previousValue} ${currentValue}`, ''); - }; - - const loadFromPantheonApi = (uuid, type) => { - const url = `${pantheonBaseUrl}/${type}/variant.json/${uuid}`; - const res = fetch(url); - if (res.status != 200) { - throw new Error(`error fetching from pantheon ${res.status} ${res.text()}`); - } - return res.json(); - }; - - const flattenBlocks = (block) => { - const flat = []; - - flat.push(block); - if (block.hasBlocks()) { - block.getBlocks().forEach((block) => { - flat.push(...flattenBlocks(block)); - }); - } - return flat; - }; - - const snippetTag = { - identify: (value) => value instanceof asciidoctor.AbstractBlock, - tag: '!snippet', - resolve: (doc, cst) => { - const parts = cst.strValue.split('#'); - if (parts.length !== 2) { - throw Error(`malformed !snippet ${cst.str}, must be like !snippet README.adoc#task-1`); - } - const id = parts[1]; - return loadSnippet( - cst.strValue, - '!snippet', - undefined, - (block) => block.convert(), - '$.*.body', - `#${id}`, - ); - }, - stringify(item) { - return item.convert(); - }, - }; - - const procTag = { - identify: (value) => - value instanceof asciidoctor.AbstractBlock && getModuleType(value) === 'proc', - tag: '!snippet/proc', - resolve: (doc, cst) => - loadSnippet(cst.strValue, '!snippet/proc', 'proc', (block) => block.convert(), '$.*.body'), - stringify: () => '', - }; - - const titleTag = { - identify: false, - tag: '!snippet/title', - resolve: (doc, cst) => - loadSnippet( - cst.strValue, - '!snippet/title', - undefined, - (block) => block.getTitle(), - '$.*.title', - ), - stringify: () => '', - }; - - // load the yaml - const qs = yaml.parse(content.toString(), { - customTags: [snippetTag, procTag, titleTag], - }); - - validateJSON(qs, path.join('src/quickstarts-data/asciidoc/', 'quickstart.schema.json')); - - // transform the yaml to json for the browser to load - const json = JSON.stringify(qs); - return json; -}; - -const MODULE_TYPE_ATTRIBUTE = 'module-type'; - -const getModuleType = (node) => { - if (node.getAttributes()[MODULE_TYPE_ATTRIBUTE]) { - return node.getAttributes()[MODULE_TYPE_ATTRIBUTE]; - } - - const id = node.getId(); - - if (id && id.startsWith('con-')) { - return 'con'; - } - - if (id && id.startsWith('proc-')) { - return 'proc'; - } - - if (id && id.startsWith('ref-')) { - return 'ref'; - } - return 'unknown'; // punt, we don't know -}; - -exports.buildQuickStart = buildQuickStart; diff --git a/packages/dev/src/AppContext.tsx b/packages/dev/src/AppContext.tsx index a39f4f32..be7f94bb 100755 --- a/packages/dev/src/AppContext.tsx +++ b/packages/dev/src/AppContext.tsx @@ -2,13 +2,11 @@ import './App.css'; import { Page } from '@patternfly/react-core'; import { LoadingBox, - QuickStart, QuickStartContextProvider, QuickStartContextValues, QuickStartDrawer, useLocalStorage, } from '@patternfly/quickstarts'; -import { loadJSONQuickStarts } from './quickstarts-data/asciidoc/quickstartLoader'; import { allQuickStarts as yamlQuickStarts } from './quickstarts-data/quick-start-test-data'; import React from 'react'; import i18n from './i18n/i18n'; @@ -31,22 +29,12 @@ const App: React.FC = ({ children, showCardFooters }) => { console.log(allQuickStartStates); }, [allQuickStartStates]); - const [allQuickStarts, setAllQuickStarts] = React.useState([]); - React.useEffect(() => { - const load = async () => { - const masGuidesQuickstarts = await loadJSONQuickStarts(''); - setAllQuickStarts(yamlQuickStarts.concat(masGuidesQuickstarts)); - }; - setTimeout(() => { - load(); - }, 500); - }, []); const language = localStorage.getItem('bridge/language') || 'en'; const resourceBundle = i18n.getResourceBundle(language, 'quickstart'); const valuesForQuickstartContext: QuickStartContextValues = { - allQuickStarts, + allQuickStarts: yamlQuickStarts, activeQuickStartID, setActiveQuickStartID, allQuickStartStates, @@ -65,7 +53,7 @@ const App: React.FC = ({ children, showCardFooters }) => { return ( }> - {allQuickStarts && allQuickStarts.length ? ( + {yamlQuickStarts && yamlQuickStarts.length ? ( diff --git a/packages/dev/src/AppProps.tsx b/packages/dev/src/AppProps.tsx index b9a7ad76..1636c11a 100755 --- a/packages/dev/src/AppProps.tsx +++ b/packages/dev/src/AppProps.tsx @@ -2,7 +2,6 @@ import './App.css'; import { Page, Button } from '@patternfly/react-core'; import { LoadingBox, - QuickStart, QuickStartContainer, QuickStartContainerProps, useLocalStorage, @@ -10,7 +9,6 @@ import { removeQueryArgument, QUICKSTART_ID_FILTER_KEY, } from '@patternfly/quickstarts'; -import { loadJSONQuickStarts } from './quickstarts-data/asciidoc/quickstartLoader'; import { allQuickStarts as yamlQuickStarts } from './quickstarts-data/quick-start-test-data'; import React from 'react'; import i18n from './i18n/i18n'; @@ -35,23 +33,12 @@ const App: React.FC = ({ children, showCardFooters }) => { console.log(allQuickStartStates); }, [allQuickStartStates]); - const [loading, setLoading] = React.useState(true); - const [quickStarts, setQuickStarts] = React.useState([]); - React.useEffect(() => { - const load = async () => { - const masGuidesQuickstarts = await loadJSONQuickStarts(''); - setQuickStarts(yamlQuickStarts.concat(masGuidesQuickstarts)); - setLoading(false); - }; - setTimeout(() => { - load(); - }, 500); - }, []); + const withQueryParams = true; const drawerProps: QuickStartContainerProps = { - quickStarts, + quickStarts: yamlQuickStarts, activeQuickStartID, allQuickStartStates, setActiveQuickStartID, @@ -59,7 +46,6 @@ const App: React.FC = ({ children, showCardFooters }) => { resourceBundle, showCardFooters, language, - loading, useQueryParams: withQueryParams, alwaysShowTaskReview: true, markdown: { diff --git a/packages/dev/src/CustomCatalog.tsx b/packages/dev/src/CustomCatalog.tsx index 8c4a382d..520d2b18 100644 --- a/packages/dev/src/CustomCatalog.tsx +++ b/packages/dev/src/CustomCatalog.tsx @@ -17,6 +17,8 @@ import { getQuickStartStatus, LoadingBox, } from '@patternfly/quickstarts'; +import BookmarkIcon from '@patternfly/react-icons/dist/esm/icons/bookmark-icon'; +import OutlinedBookmarkIcon from '@patternfly/react-icons/dist/esm/icons/outlined-bookmark-icon'; import { Divider, Gallery, @@ -80,9 +82,60 @@ export const CustomCatalog: React.FC = () => { setFilteredQuickStarts(result); }; + const [bookmarked, setBookmarked] = React.useState([]) + const CatalogWithSections = React.useMemo( () => ( <> + + + Bookmarkable + + Bookmarkable examples + + + + {allQuickStarts + .filter((quickStart: QuickStart) => quickStart.metadata.instructional) + .map((quickStart: QuickStart) => ({ + ...quickStart, + metadata: { + ...quickStart.metadata, + id: `${quickStart.metadata.name}-bookmar` + } + })) + .map((quickStart: QuickStart) => { + const { + metadata: { name: id }, + } = quickStart; + + return ( + + { + e.preventDefault(); + e.stopPropagation(); + setBookmarked((prev) => { + if (prev.includes(id)) { + return prev.filter((bookmark) => bookmark !== id) + } + + return [...prev, id]; + }); + }, + icon: bookmarked.includes(id) ? BookmarkIcon : OutlinedBookmarkIcon, + 'aria-label': 'bookmark' + }} + quickStart={quickStart} + isActive={id === activeQuickStartID} + status={getQuickStartStatus(allQuickStartStates, id)} + /> + + ); + })} + + Instructional @@ -142,7 +195,7 @@ export const CustomCatalog: React.FC = () => { ), - [activeQuickStartID, allQuickStartStates, allQuickStarts], + [activeQuickStartID, allQuickStartStates, allQuickStarts, bookmarked], ); const clearFilters = React.useCallback(() => { diff --git a/packages/dev/src/quickstarts-data/asciidoc/.build/README.md b/packages/dev/src/quickstarts-data/asciidoc/.build/README.md deleted file mode 100644 index 797a6260..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/.build/README.md +++ /dev/null @@ -1,8 +0,0 @@ -To build the HTML from the adoc: - -clone pantheon from https://github.com/redhataccess/pantheon -Set the env var `PANTHEON_DIR` to the location of this checkout -install asciidoctor and other supporting gems sudo gem install asciidoctor haml tilt -generate the html - in the `src/quickstarts-data/asciidoc` dir run - -```asciidoctor -o adding_health_checks.quickstart.html -T ${PANTHEON_DIR}/pantheon-bundle/src/main/resources/apps/pantheon/templates/haml/html5 assemblies/assembly_adding_health_checks.adoc -a qs=true``` diff --git a/packages/dev/src/quickstarts-data/asciidoc/.build/adding_health_checks.quickstart.html b/packages/dev/src/quickstarts-data/asciidoc/.build/adding_health_checks.quickstart.html deleted file mode 100644 index 290e0b6d..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/.build/adding_health_checks.quickstart.html +++ /dev/null @@ -1,198 +0,0 @@ - - - - - - - -[adoc html] Adding health checks to your sample application - - - - - - - - - - - -
-
-

[adoc html] Adding health checks to your sample application

-
-
-
-
-
-
-
-

Duration: 5 minutes

-
-
-
-
-

You just created a sample application. Now, let’s add health checks to it.

-
-
-
-
-
-Prerequisites -
    -
  • -

    Prereq 1

    -
  • -
  • -

    Prereq 2

    -
  • -
-
-
-
-
-
-

This quick start shows you how to add health checks to your sample application

-

You should have previously created the sample-app application and nodejs-sample deployment using the Get started with a sample quick start. If you haven’t, you may be able to follow these tasks with any existing deployment without configured health checks.

-
-
-
-
-
-
-

To view the details of your sample application:

-
-
-

Viewing the details of your sample application

-
-Procedure -
    -
  1. -

    Go to the project your sample application was created in.

    -
  2. -
  3. -

    In the </> Developer perspective, go to Topology.

    -
  4. -
  5. -

    Click on the nodejs-sample deployment to view its details.

    -
  6. -
  7. -

    A side panel is displayed containing the details of your sample application.

    -
  8. -
-
-
-Verification -
    -
  1. -

    To verify you are viewing the details of your sample application:

    -
      -
    1. -

      Is the side panel titled nodejs-sample?

      -
    2. -
    -
  2. -
-
-
-
-
-
-
-
-

To verify that there your sample application has no health checks configured:

-
-
-

Verifying that there are no health checks

-
-Procedure -
    -
  1. -

    View the information in the Resources tab in the side panel.

    -
  2. -
-
-
-Verification -
    -
  1. -

    Do you see an inline alert stating that nodejs-sample does not have health checks?

    -
  2. -
-
-
-
-

This task isn’t verified yet. Try the task again.

-
-
-
-
-

You have verified that there are no existing health checks!

-
-
-
-
-

Try the steps again.

-
-
-
-
-
-
-

Conclusion

-

Your sample application now has health checks. To ensure that your application is running correctly, take the Monitor your sample application quick start.

-
-
- -
-
-
-

Additional resources

- -
- -
-
- -
-
- - \ No newline at end of file diff --git a/packages/dev/src/quickstarts-data/asciidoc/.build/assemblies/assembly_adding_health_checks.adoc b/packages/dev/src/quickstarts-data/asciidoc/.build/assemblies/assembly_adding_health_checks.adoc deleted file mode 100644 index c9929d57..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/.build/assemblies/assembly_adding_health_checks.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// UserStory: Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed et ante ut est suscipit suscipit. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Mauris molestie laoreet pharetra. Nulla magna nisl, congue eget augue ut, tempor bibendum leo. Phasellus mollis ex molestie cursus commodo. Curabitur et orci tristique, suscipit velit eget, pretium lectus. Morbi sed eros sed felis facilisis convallis. Pellentesque id euismod massa. Fusce non orci convallis, fermentum eros pulvinar, consequat nulla. Cras at massa massa. Phasellus metus magna, ornare sed purus non, consequat mattis erat. Cras ornare massa molestie, pharetra dui non, euismod tortor. Integer ornare, nunc molestie vestibulum ultrices, velit quam consectetur ex, id tincidunt dui ante quis nulla. Nunc condimentum tempus tellus, sit amet placerat massa euismod tincidunt. Etiam a elementum massa. - -[id="proc-adding-health-checks_{context}"] -= [adoc html] Adding health checks to your sample application - -:context: healthcheck-workflow - -ifdef::qs[] -[.qs-duration] --- -*Duration:* 5 minutes --- - -[.qs-description] --- -You just created a sample application. Now, let’s add health checks to it. --- - -[.qs-prerequisites] --- -.Prerequisites -* Prereq 1 -* Prereq 2 --- - -endif::qs[] - -[.qs-intro._abstract] --- -[discrete] -== This quick start shows you how to add health checks to your sample application -You should have previously created the **sample-app** application and **nodejs-sample** deployment using the **Get started with a sample** quick start. If you haven't, you may be able to follow these tasks with any existing deployment without configured health checks. --- - -[.qs-task] --- - -[.qs-task-description] -==== -[discrete] -To view the details of your sample application: -==== -[discrete] -include::../modules/viewing_the_details_of_your_sample_application.adoc[leveloffset=+1] - --- - -[.qs-task] --- -[.qs-task-description] -==== -[discrete] -=== To verify that there your sample application has no health checks configured: -==== - -[discrete] -include::../modules/proc_verify_no_healthchecks.adoc[leveloffset=+1] - -ifdef::qs[] -[.qs-review.failedTaskHelp] -===== -This task isn’t verified yet. Try the task again. -===== - -[.qs-summary.success] -===== -You have verified that there are no existing health checks! -===== - -[.qs-summary.failed] -===== -Try the steps again. -===== -endif::qs[] --- - -ifdef::qs[] -[.qs-conclusion] --- -[discrete] -== Conclusion -Your sample application now has health checks. To ensure that your application is running correctly, take the *Monitor your sample application* quick start. --- - -[.qs-next-quick-start] --- -[] -link:../monitor-sampleapp{refilesuffix}[Start monitor-sampleapp quick start] --- -endif::qs[] - -[role="_additional-resources"] -== Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_3scale_api_management/2.9/[Product Documentation for {health-check-name}] diff --git a/packages/dev/src/quickstarts-data/asciidoc/.build/modules/proc_verify_no_healthchecks.adoc b/packages/dev/src/quickstarts-data/asciidoc/.build/modules/proc_verify_no_healthchecks.adoc deleted file mode 100644 index 6d8950cf..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/.build/modules/proc_verify_no_healthchecks.adoc +++ /dev/null @@ -1,10 +0,0 @@ -[id="verifying_that_there_are_no_health_checks_{context}",role="qs-task-title"] -= Verifying that there are no health checks - -[.qs-task-procedure] -.Procedure -. View the information in the *Resources* tab in the side panel. - -[.qs-task-verification] -.Verification -. Do you see an inline alert stating that *nodejs-sample* does not have health checks? diff --git a/packages/dev/src/quickstarts-data/asciidoc/.build/modules/viewing_the_details_of_your_sample_application.adoc b/packages/dev/src/quickstarts-data/asciidoc/.build/modules/viewing_the_details_of_your_sample_application.adoc deleted file mode 100644 index bfd0225c..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/.build/modules/viewing_the_details_of_your_sample_application.adoc +++ /dev/null @@ -1,14 +0,0 @@ -[id="viewing_the_details_of_your_sample_application_{context}",role="qs-task-title"] -= Viewing the details of your sample application - -[.qs-task-procedure] -.Procedure -. Go to the project your sample application was created in. -. In the * Developer* perspective, go to *Topology*. -. Click on the *nodejs-sample* deployment to view its details. -. A side panel is displayed containing the details of your sample application. - -[.qs-task-verification] -.Verification -. To verify you are viewing the details of your sample application: -.. Is the side panel titled *nodejs-sample*? diff --git a/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/index.ts b/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/index.ts deleted file mode 100644 index 95e87025..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/index.ts +++ /dev/null @@ -1,3 +0,0 @@ -export * from './qs-adoc-parser'; -export * from './procedure-adoc-parser'; -export * from './procedure-adoc-html-parser'; diff --git a/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/procedure-adoc-html-parser.ts b/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/procedure-adoc-html-parser.ts deleted file mode 100644 index bacac070..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/procedure-adoc-html-parser.ts +++ /dev/null @@ -1,92 +0,0 @@ -import { QuickStart, QuickStartTask } from '@patternfly/quickstarts'; - -export const ProcedureAdocHtmlParser = ( - body: string, - id: string, - environmentVariables?: { [name: string]: string }, -) => { - const replaceEnvironmentVariables = (s: string | undefined) => - s?.replace(/\${(\w+)}/, (substring, name) => { - return environmentVariables[name] ? environmentVariables[name] : substring; - }); - - const bodyDOM = document.createElement('body'); - bodyDOM.innerHTML = body; - - const displayName = replaceEnvironmentVariables( - bodyDOM.querySelector('header')?.textContent.trim(), - ); - const introduction = replaceEnvironmentVariables( - bodyDOM.querySelector('.qs-intro')?.innerHTML.trim(), - ); - const prereqs = bodyDOM.querySelectorAll('.qs-prerequisites ul li'); - const procedures = bodyDOM.querySelectorAll('.qs-task'); - const duration = bodyDOM.querySelector('.qs-duration')?.textContent.trim(); - const durationMinutes = parseInt(duration.match(/\d+/)[0], 10); - const icon = bodyDOM.querySelector('.qs-icon .icon')?.innerHTML.trim(); - const description = replaceEnvironmentVariables( - bodyDOM.querySelector('.qs-description')?.innerHTML.trim(), - ); - const conclusion = replaceEnvironmentVariables( - bodyDOM.querySelector('.qs-conclusion')?.innerHTML.trim(), - ); - - const prerequisites: string[] = []; - prereqs.forEach((n) => { - prerequisites.push(n.textContent.trim()); - }); - - const qsTasks: QuickStartTask[] = []; - procedures.forEach((procedure) => { - const verificationBlock = procedure.querySelector('.olist.qs-task-verification ol'); - qsTasks.push({ - title: replaceEnvironmentVariables( - procedure.querySelector('.qs-task-title')?.textContent.trim(), - ), - description: - replaceEnvironmentVariables( - procedure.querySelector('.qs-task-description')?.innerHTML.trim(), - ) + - replaceEnvironmentVariables( - procedure.querySelector('.olist.qs-task-procedure ol')?.outerHTML.trim(), - ), - review: { - instructions: verificationBlock - ? replaceEnvironmentVariables(`
    ${verificationBlock.innerHTML}
`) - : 'Have you completed these steps?', - failedTaskHelp: - replaceEnvironmentVariables( - procedure.querySelector('.qs-review.failed')?.innerHTML.trim(), - ) || 'This task isn’t verified yet. Try the task again.', - }, - summary: { - success: - replaceEnvironmentVariables( - procedure.querySelector('.qs-summary.success')?.innerHTML.trim(), - ) || 'You have completed this task!', - failed: - replaceEnvironmentVariables( - procedure.querySelector('.qs-summary.failed')?.innerHTML.trim(), - ) || 'Try the steps again.', - }, - }); - }); - - const processedAsciiDoc: QuickStart = { - metadata: { - name: id, - }, - spec: { - displayName, - durationMinutes, - icon, - description, - introduction, - conclusion, - prerequisites, - nextQuickStart: ['todo'], - tasks: qsTasks, - }, - }; - return processedAsciiDoc; -}; diff --git a/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/procedure-adoc-parser.ts b/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/procedure-adoc-parser.ts deleted file mode 100644 index 7f4b0a59..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/procedure-adoc-parser.ts +++ /dev/null @@ -1,215 +0,0 @@ -/* eslint-disable */ - -import Processor from 'asciidoctor'; -import { QuickStart, QuickStartTask } from '@patternfly/quickstarts'; - -const processor = Processor(); -const asciiOptions = { - // https://docs.asciidoctor.org/asciidoc/latest/document/doctypes/ - // article, book, manpage, inline - doctype: 'article', - // add header/footer when true - standalone: false, - safe: 'unsafe', - // base_dir: "quickstarts-data/asciidoc/", - sourcemap: true, -}; - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -const getInnerText = (html: string) => { - const span = document.createElement('span'); - span.innerHTML = html; - return span.textContent || span.innerText; -}; -// eslint-disable-next-line @typescript-eslint/no-unused-vars -const getInnerList = (html: string) => { - const span = document.createElement('span'); - span.innerHTML = html; - return span.querySelector('ol') || span.querySelector('ul'); -}; -const getListItems = (html: string) => { - const span = document.createElement('span'); - span.innerHTML = html; - const elements = span.querySelectorAll('li'); - const items: string[] = []; - elements.forEach((el) => { - items.push(el.textContent.trim()); - }); - return items; -}; -const elementWithoutTitle = (html: string) => { - const span = document.createElement('span'); - span.innerHTML = html; - span.querySelector('.title') && span.querySelector('.title').remove(); - return span; -}; -const addClasses = (html: string) => { - const span = document.createElement('span'); - span.innerHTML = html; - span.querySelectorAll('div.title').forEach((el) => el.classList.add('pf-v5-c-title', 'pf-m-md')); - return span; -}; - -const getIntro = (taskBlocks: any[]) => { - let intro = ''; - let lastIndex = -1; - // @ts-ignore-next-line - taskBlocks.some((taskBlock) => { - lastIndex += 1; - let stopHere = false; - // grab until we hit a subtitle, or an olist or ulist with a title of - // Prerequisites || Procedure - if (taskBlock.context === 'floating_title') { - stopHere = true; - } else if (taskBlock.context === 'ulist' || taskBlock.context === 'olist') { - if (taskBlock.title === 'Prerequisites' || taskBlock.title === 'Procedure') { - stopHere = true; - } - } - if (!stopHere) { - intro += taskBlock.convert(); - } else { - return true; - } - }); - return { - intro, - lastIndex, - nextBlock: taskBlocks[lastIndex].title, - }; -}; - -const getPrereqs = (taskBlocks: any[], startingIndex: number) => { - const lastIndex = startingIndex; - const initialBlock = taskBlocks[startingIndex]; - if ( - (initialBlock.context !== 'olist' || initialBlock.context !== 'ulist') && - initialBlock.title !== 'Prerequisites' - ) { - return { - prereqs: [], - lastIndex, - }; - } - const list: any[] = []; - (initialBlock.getItems() as any[]).forEach((li) => { - list.push(li.getText()); - }); - return { - prereqs: list, - lastIndex: lastIndex + 1, - }; -}; - -const getProcedures = (taskBlocks: any[], startingIndex: number) => { - const procedures: any[] = []; - let procedure: any; - let lastIndex = startingIndex; - const nextPossibleSections = ['Next steps', 'Verification', 'Additional resources']; - const endOfDoc = (block: any) => { - return block.$$id === taskBlocks.slice(-1)[0].$$id; - }; - // @ts-ignore-next-line - taskBlocks.slice(startingIndex).some((taskBlock) => { - if ((taskBlock.title && typeof taskBlock.title === 'string') || endOfDoc(taskBlock)) { - // new section, first push the previous procedure - procedure && - procedure.hasSeenList && - procedures.push({ - ...procedure, - }); - if (nextPossibleSections.indexOf(taskBlock.title) > -1) { - // another (non-procedure) section - return true; - } - if (!procedure || procedure.hasSeenList) { - // next procedure - procedure = { - title: '', - description: '', - hasSeenList: false, - }; - } - } - lastIndex += 1; - if (taskBlock.context === 'floating_title') { - (procedure as any).title = taskBlock.getTitle(); - } else { - (procedure as any).description += elementWithoutTitle(taskBlock.convert()).outerHTML; - if (taskBlock.context === 'olist' || taskBlock.context === 'ulist') { - (procedure as any).hasSeenList = true; - } - } - }); - return { - procedures, - lastIndex, - }; -}; - -const getRest = (taskBlocks: any[], startingIndex: number) => { - let remainingContent = ''; - taskBlocks.slice(startingIndex).some((taskBlock, index) => { - remainingContent += addClasses(taskBlock.convert()).outerHTML; - }); - return remainingContent; -}; - -export const ProcedureAsciiDocParser = (file: string, options: any = {}) => { - const fullAdoc = processor.load(file, { - ...asciiOptions, - ...options, - }); - const docId = fullAdoc.getId(); - const docTitle = fullAdoc.getDocumentTitle(); - const taskBlocks = fullAdoc.getBlocks(); - const { intro: docIntro, lastIndex: lastIndexIntro, nextBlock } = getIntro(taskBlocks); - const { prereqs, lastIndex: lastIndexPrereqs } = getPrereqs(taskBlocks, lastIndexIntro); - const { procedures, lastIndex: lastIndexProcedures } = getProcedures( - taskBlocks, - lastIndexPrereqs, - ); - const remainingContent = getRest(taskBlocks, lastIndexProcedures); - - const qsTasks: QuickStartTask[] = []; - procedures.forEach((procedure, index) => { - qsTasks.push({ - title: procedure.title || `Procedure ${index + 1}`, - description: procedure.description, - review: { - instructions: 'Have you completed these steps?', - failedTaskHelp: 'This task isn’t verified yet. Try the task again.', - }, - summary: { - success: 'You have completed this task!', - failed: 'Try the steps again.', - }, - }); - }); - - const processedAsciiDoc: QuickStart = { - metadata: { - name: fullAdoc.getAttribute('qs-id') || docId, - }, - spec: { - displayName: fullAdoc.getAttribute('qs-display-name') || (docTitle as string), - durationMinutes: fullAdoc.getAttribute('qs-duration-minutes'), - icon: fullAdoc.getAttribute('qs-icon'), - description: fullAdoc.getAttribute('qs-description') || docIntro, - introduction: - (fullAdoc.getAttribute('qs-introduction') && - (processor.convert(fullAdoc.getAttribute('qs-introduction')) as string)) || - docIntro, - conclusion: fullAdoc.getAttribute('qs-conclusion') || remainingContent, - prerequisites: - (fullAdoc.getAttribute('qs-prerequisites') && - getListItems(processor.convert(fullAdoc.getAttribute('qs-prerequisites')) as string)) || - prereqs, - nextQuickStart: fullAdoc.getAttribute('qs-next-quick-start') && [ - fullAdoc.getAttribute('qs-next-quick-start'), - ], - tasks: qsTasks, - }, - }; - return processedAsciiDoc; -}; diff --git a/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/qs-adoc-parser.ts b/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/qs-adoc-parser.ts deleted file mode 100644 index 50bc384e..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/.build/parsers/qs-adoc-parser.ts +++ /dev/null @@ -1,101 +0,0 @@ -import Processor from 'asciidoctor'; -import { - QuickStart, - QuickStartTask, - QuickStartTaskReview, - QuickStartTaskSummary, -} from '@patternfly/quickstarts'; - -const processor = Processor(); -const asciiOptions = { - // https://docs.asciidoctor.org/asciidoc/latest/document/doctypes/ - // article, book, manpage, inline - doctype: 'article', - // add header/footer when true - standalone: false, - safe: 'unsafe', - // base_dir: "quickstarts-data/asciidoc/", - sourcemap: true, -}; - -const getInnerText = (html: string) => { - const span = document.createElement('span'); - span.innerHTML = html; - return span.textContent || span.innerText; -}; -const getListItems = (html: string) => { - const span = document.createElement('span'); - span.innerHTML = html; - const elements = span.querySelectorAll('li'); - const items: string[] = []; - elements.forEach((el) => { - items.push(el.textContent.trim()); - }); - return items; -}; - -export const QuickstartAsciiDocParser = (file: string, options: any = {}) => { - const fullAdoc = processor.load(file, { - ...asciiOptions, - ...options, - }); - const taskBlocks = fullAdoc.getBlocks(); - const qsTasks: QuickStartTask[] = []; - taskBlocks.forEach((taskBlock) => { - const adocBlocks = taskBlock.getBlocks(); - const qsTask: QuickStartTask = {}; - const qsReview: QuickStartTaskReview = {}; - const qsSummary: QuickStartTaskSummary = {}; - - for (const block in adocBlocks) { - if (Object.prototype.hasOwnProperty.call(adocBlocks, block)) { - const content = adocBlocks[block].getContent(); - switch (adocBlocks[block].getRole()) { - case 'qs-title': - qsTask.title = getInnerText(content); - break; - case 'qs-description': - qsTask.description = content; - break; - case 'qs-review instructions': - qsReview.instructions = content; - break; - case 'qs-review failedTaskHelp': - qsReview.failedTaskHelp = content; - break; - case 'qs-summary success': - qsSummary.success = content; - break; - case 'qs-summary failed': - qsSummary.failed = content; - break; - default: - break; - } - } - } - qsTask.review = qsReview; - qsTask.summary = qsSummary; - - qsTasks.push(qsTask); - }); - const processedAsciiDoc: QuickStart = { - metadata: { - name: fullAdoc.getAttribute('qs-id'), - }, - spec: { - displayName: fullAdoc.getAttribute('qs-display-name'), - durationMinutes: fullAdoc.getAttribute('qs-duration-minutes'), - icon: fullAdoc.getAttribute('qs-icon'), - description: fullAdoc.getAttribute('qs-description'), - introduction: processor.convert(fullAdoc.getAttribute('qs-introduction')) as string, - conclusion: fullAdoc.getAttribute('qs-conclusion'), - prerequisites: getListItems( - processor.convert(fullAdoc.getAttribute('qs-prerequisites')) as string, - ), - nextQuickStart: [fullAdoc.getAttribute('qs-next-quick-start')], - tasks: qsTasks, - }, - }; - return processedAsciiDoc; -}; diff --git a/packages/dev/src/quickstarts-data/asciidoc/alert-note-prereq/README.adoc b/packages/dev/src/quickstarts-data/asciidoc/alert-note-prereq/README.adoc deleted file mode 100644 index 9fc160c9..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/alert-note-prereq/README.adoc +++ /dev/null @@ -1,119 +0,0 @@ -[id="chap-alert-note-prereq"] -= Note/Tip card, 3 alert variants and prerequisites example quickstart - -.Prerequisites -* You want to see some new features condensed into one example quickstart. - -ifdef::qs[] -[#description-alert-note] -Preview newly added features: Alert, note, and prerequisite sections rendered using Patternfly React Components. - -[#introduction] -Welcome to this example quickstart making it easy to see some new features, which will be included in a design rework of the quickstart drawer. -endif::[] - -[id="task-1_{context}",module-type="proc"] -== Prerequisites expanded, note, tip, and 3 alert variants in procedure - -In this Quick Start you will preview some design changes. - -.Prerequisites -* Take a look at the new design changes, including this prerequisites section. -* Also take a look at the note, tip, and 3 types of alerts renderd by PF React. - -.Procedure -. Note -+ -NOTE: I am a note card (triggerd by NOTE admonition in asciidoc) rendered by Patternfly React Components. -+ - -. Tip -+ -TIP: I am a tip card (triggerd by TIP admonition in asciidoc) rendered by Patternfly React Components. -+ - -. Info -+ -IMPORTANT: I am an info alert (triggerd by IMPORTANT admonition in asciidoc) rendered by Patternfly React Components. -+ - -. Warning -+ -CAUTION: I am a warning alert (triggerd by CAUTION admonition in asciidoc) rendered by Patternfly React Components. -+ - -. Danger -+ -WARNING: I am a danger alert (triggerd by WARNING admonition in asciidoc) rendered by Patternfly React Components. - -.Verification -. Do you see a note and info card? -. Do you see 3 types of alerts and task-level prerequisites? - - -[id="proc-description-with-admonition-blocks_{context}"] -== Note, tip and 3 alert variants in task description - -Note: - -NOTE: If you'd like to use a service account that already has the required role, you can skip this section. - -Tip: - -TIP: If you want to use a service account that already has the required role, you can skip this section. - -Important: - -IMPORTANT: If you want to use a service account that already has the required role, you can skip this section. - -Caution: - -CAUTION: If you want to use a service account that already has the required role, you can skip this section. - -Warning: - -WARNING: If you want to use a service account that already has the required role, you can skip this section. - -.Prerequisites -* Review at description above - -.Procedure -. No steps for this task - -.Verification -* Did you see the sections above? - -[id="task-3_{context}",module-type="proc"] -== Long procedure step with multiple paragraphs and admonitions - -.Prerequisites -* None - -.Procedure --- -First paragraph before procedure steps. - -Second paragraph before procedure steps. - -. Procedure step 1 -+ -TIP: I am a tip card (triggerd by TIP admonition in asciidoc) rendered by Patternfly React Components. -+ -. Procedure step 2 -+ -In the web console left menu, go to *Service Accounts*, and click *Create service account* to generate the credentials that you can use to connect to {registry} and Kafka instances. - -. Procedure step 3 - -First paragraph after procedure steps. - -Second paragraph after procedure steps. --- - -.Verification -. Do you see a note section? -. Do you see an alert section and task-level prerequisites?. - -[#conclusion] -Congratulations! You successfully completed the example quick start, and are now aware of the new features. - diff --git a/packages/dev/src/quickstarts-data/asciidoc/alert-note-prereq/quickstart.yml b/packages/dev/src/quickstarts-data/asciidoc/alert-note-prereq/quickstart.yml deleted file mode 100644 index a523afc0..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/alert-note-prereq/quickstart.yml +++ /dev/null @@ -1,23 +0,0 @@ -metadata: - name: mas-alert-note-prereq - instructional: true -spec: - displayName: !snippet/title README.adoc#chap-alert-note-prereq - durationMinutes: 2 - type: - text: AsciiDoc - color: purple - icon: data:image/svg+xml;base64,PCEtLSBHZW5lcmF0ZWQgYnkgSWNvTW9vbi5pbyAtLT4KPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgd2lkdGg9IjUxMiIgaGVpZ2h0PSI1MTIiIHZpZXdCb3g9IjAgMCA1MTIgNTEyIj4KPHRpdGxlPjwvdGl0bGU+CjxnIGlkPSJpY29tb29uLWlnbm9yZSI+CjwvZz4KPHBhdGggZD0iTTQ0OCA2NHY0MTZoLTMzNmMtMjYuNTEzIDAtNDgtMjEuNDktNDgtNDhzMjEuNDg3LTQ4IDQ4LTQ4aDMwNHYtMzg0aC0zMjBjLTM1LjE5OSAwLTY0IDI4LjgtNjQgNjR2Mzg0YzAgMzUuMiAyOC44MDEgNjQgNjQgNjRoMzg0di00NDhoLTMyeiI+PC9wYXRoPgo8cGF0aCBkPSJNMTEyLjAyOCA0MTZ2MGMtMC4wMDkgMC4wMDEtMC4wMTkgMC0wLjAyOCAwLTguODM2IDAtMTYgNy4xNjMtMTYgMTZzNy4xNjQgMTYgMTYgMTZjMC4wMDkgMCAwLjAxOS0wLjAwMSAwLjAyOC0wLjAwMXYwLjAwMWgzMDMuOTQ1di0zMmgtMzAzLjk0NXoiPjwvcGF0aD4KPC9zdmc+Cg== - description: !snippet README.adoc#description-alert-note - prerequisites: - - Get ready to see a note in the task 1 steps - - Get ready to see an alert in the task 1 steps - - Get ready to see prerequisites displayed in task 1 - introduction: !snippet README.adoc#introduction - tasks: - - !snippet/proc README.adoc#task-1 - - !snippet/proc README.adoc#proc-description-with-admonition-blocks - - !snippet/proc README.adoc#task-3 - conclusion: !snippet README.adoc#conclusion - nextQuickStart: - - 'kafka-bin-scripts' diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/README.adoc b/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/README.adoc deleted file mode 100644 index d2841472..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/README.adoc +++ /dev/null @@ -1,255 +0,0 @@ -//// -START GENERATED ATTRIBUTES -WARNING: This content is generated by running npm --prefix .build run generate:attributes -//// - - -:community: -:imagesdir: ./images -:product-version: 1 -:product-long: Application Services -:product: App Services -:registry-product-long: Red Hat OpenShift{nbsp}Service Registry -:registry: Service Registry -// Placeholder URL, when we get a HOST UI for the service we can put it here properly -:service-url: https://console.redhat.com/application-services/streams/ -:registry-url: https://console.redhat.com/beta/application-services/service-registry/ -:property-file-name: app-services.properties -:rhoas-version: 0.32.0 - -// Other upstream project names -:samples-git-repo: https://github.com/redhat-developer/app-services-guides - -//URL components for cross refs -:base-url: https://github.com/redhat-developer/app-services-guides/blob/main/ -:base-url-cli: https://github.com/redhat-developer/app-services-cli/tree/main/docs/ -:getting-started-url: getting-started/README.adoc -:getting-started-service-registry-url: getting-started-service-registry/README.adoc -:kafka-bin-scripts-url: kafka-bin-scripts/README.adoc -:kafkacat-url: kafkacat/README.adoc -:quarkus-url: quarkus/README.adoc -:quarkus-service-registry-url: quarkus-service-registry/README.adoc -:rhoas-cli-url: rhoas-cli/README.adoc -:rhoas-cli-kafka-url: rhoas-cli-kafka/README.adoc -:rhoas-cli-service-registry-url: rhoas-cli-service-registry/README.adoc -:rhoas-cli-ref-url: commands -:topic-config-url: topic-configuration/README.adoc -:consumer-config-url: consumer-configuration/README.adoc -:service-binding-url: service-discovery/README.adoc -:access-mgmt-url: access-mgmt/README.adoc - -//// -END GENERATED ATTRIBUTES -//// - -[id="chap-getting-started-service-registry"] -= Getting Started with {registry-product-long} -ifdef::context[:parent-context: {context}] -:context: getting-started-sr - -[IMPORTANT] -==== -{registry-product-long} is currently available for Development Preview. Development Preview releases provide early access to a limited set of features that might not be fully tested and that might change in the final GA version. Users should not use Development Preview software in production or for business-critical workloads. Limited documentation is available for Development Preview releases and is typically focused on fundamental user goals. -==== - -// Purpose statement for the assembly -[role="_abstract"] -As a developer of applications and services, you can use {registry-product-long} to create and set up {registry} instances and connect your applications and services to these instances. {registry} is a managed cloud service that enables you to manage schema and API definitions in your applications without having to install, configure, run, and maintain your own {registry} clusters. - -For more overview information about {registry}, see https://access.redhat.com/documentation/en-us/red_hat_openshift_service_registry/[]. - -ifndef::community[] -.Prerequisites -* You have a Red Hat account. -* You have a subscription to {product-long}. -//For more information about signing up, see *<@SME: Where to link?>*. -endif::[] - -// Condition out QS-only content so that it doesn't appear in docs. -// All QS anchor IDs must be in this alternate anchor ID format `[#anchor-id]` because the ascii splitter relies on the other format `[id="anchor-id"]` to generate module files. -ifdef::qs[] -[#description] -Learn how to create and set up your first {registry} instance in {registry-product-long}. - -[#introduction] -Welcome to the quick{nbsp}start for {registry-product-long}. In this quick start, you'll learn how to create and view a {registry} instance, create a schema in this instance, and create a service account to connect an application or service to this instance. -endif::[] - -[id="proc-creating-service-registry-instance_{context}"] -== Creating a {registry} instance - -[role="_abstract"] -Use the {product-long} web console to create and configure a {registry} instance for your applications or services. A {registry} instance is an isolated virtual tenant in a multi-tenanted deployment with its own unique instance URL and configuration to connect to producer and consumer applications. - -*Note:* If you want to use a service account that already has the required role, you can skip this section. - -Note: - -NOTE: If NOTE you want to use a service account that already has the required role, you can skip this section. - -Tip: - -TIP: If TIP you want to use a service account that already has the required role, you can skip this section. - -Important: - -IMPORTANT: If IMPORTANT you want to use a service account that already has the required role, you can skip this section. - -Caution: - -CAUTION: If CAUTION you want to use a service account that already has the required role, you can skip this section. - -Warning: - -WARNING: If WARNING you want to use a service account that already has the required role, you can skip this section. - -ifndef::qs[] -.Prerequisites -* You're logged in to the {registry} web console at {registry-url}[^]. -endif::[] - -.Procedure -. In the {product-long} web console, go to *{registry}* > *{registry} Instances*, and click *Create {registry} instance*. -. Enter a unique *Instance name*, such as `my-service-registry-instance`. Optionally, you can add a longer text description for this instance. -+ -[.screencapture] -.Create {registry} instance details -image::configure-service-registry-instance.png[Image of instance configuration details in Create {registry} instance window] -. Click *Create* to start the creation process for your {registry} instance. The new {registry} instance is listed in the instances table. - -. When the *Status* is *Ready*, you can start using this {registry} instance. You can use the options icon (three vertical dots) to connect to or delete the instance as needed. - -[.screencapture] -.{registry} instance options menu -image::service-registry-instance-options.png[Image of {registry} instance options menu] - -.Verification -ifdef::qs[] -* Is the new {registry} instance listed in the instances table? -* Is the state of the new {registry} instance shown as *Ready*? -endif::[] -ifndef::qs[] -. Verify that the new {registry} instance is listed in the instances table. -. Verify that the state of the new {registry} instance is shown as *Ready*. -endif::[] - - -[id="proc-uploading-registry-schema_{context}"] -== Uploading a schema to {registry} - -[role="_abstract"] -After you create a {registry} instance, you can upload schema or API content to the instance. The following example shows uploading an Apache Avro schema for serializing and deserializing Kafka messages in client applications. - -.Prerequisites -* You've created a {registry} instance and the instance is in *Ready* state. - -.Procedure -. In the *{registry}* instances page of the web console, select the {registry} instance that you want to upload a schema to. -. Click *Upload artifact* and complete the form to define the schema details: -+ -[.screencapture] -.Guided steps to define artifact details -image::upload-schema.png[Image of form to upload a schema] -+ -* *Group*: Enter an optional unique group name such as `my-org` to organize the artifact in a named collection. Each group contains a logically related set of schemas or API designs, typically managed by a single entity, belonging to a particular application or organization. -+ -NOTE: Specifying a group is optional when using the web console, and a `default` group is automatically created. When using the REST API or Maven plug-in, you can specify the `default` group in the API path if you do not want to create a unique group. -+ -* *ID*: Enter an optional unique ID for this artifact such as `my-ID`. If you do not specify a unique artifact ID, {registry} generates one automatically as a UUID. -* *Type*: Use the default *Auto-Detect* setting to automatically detect the artifact type, or select the artifact type from the drop-down, for example, Avro Schema or OpenAPI. -* *Artifact*: Drag and drop or click *Browse* to upload a file. For this example, copy and paste the following simple Avro schema: -+ -[source,json,subs="+quotes,attributes"] ----- -{ -"type": "record", -"namespace": "com.example", -"name": "FullName", -"fields": [ -{ "name": "first", "type": "string" }, -{ "name": "last", "type": "string" } -]} ----- - -. Click *Upload* to complete the operation and display the new artifact details: - -* *Info*: Displays the artifact name, group, description, lifecycle status, when created, and last modified. Click the *Edit Artifact Metadata* pencil icon to edit the artifact name and description or add labels, or click *Download* to download the artifact file locally. -* *Content*: Displays a read-only view of the full artifact content. -* *Documentation*: (OpenAPI only): Displays automatically-generated REST API documentation. -* *Content Rules*: Displays artifact content rules that you can enable and configure. You can configure a *Validity Rule* or *Compatibility Rule* by selecting the appropriate rule configuration from the drop-down. For details on supported rules, see the https://access.redhat.com/documentation/en-us/red_hat_openshift_service_registry/1[{registry-product-long} user documentation]. -+ -You can now use this schema to serialize and deserialize messages from Kafka client applications. - -. On the right of the screen, you can click *Upload new version* to add a new artifact version. - -. You can click *Delete* to delete an artifact as needed. -+ -IMPORTANT: Deleting an artifact deletes the artifact and all of its versions, and cannot be undone. Artifact versions are immutable and cannot be deleted individually. - -.Verification -ifdef::qs[] -* Is the new schema in the Artifacts table? -endif::[] -ifndef::qs[] -* Verify that the new schema is listed in the Artifacts table. -endif::[] - -[id="proc-connecting-registry-clients_{context}"] -== Connecting client applications to {registry} - -To connect your applications or services to a {registry} instance in the web console, you must copy and save the {registry} instance URL, create a service account, and copy and save the generated credentials. You'll use these details later when you configure your application for {registry}. - -.Prerequisites -* You've created a {registry} instance and the instance is in *Ready* state. - -.Procedure -. In the *{registry}* instances page of the web console, for the instance that you want to connect to, select the options icon (three vertical dots), and click *Connection*. -. Depending on the client libraries that you want to use, chose the API for your needs: -+ - * *Core Registry API* is the most powerful and works with Apicurio client libraries - * *Schema Registry compatibility API* provides compatibility with the Confluent Schema Registry API - * *CNCF Schema Registry API* provides compatibility with the CNCF specification - -. In the *Connection* page, copy the *Core Registry API* URL, or one of the other API URLs if you are using a different client, to a secure location. This is the server endpoint that you'll need to connect to this {registry} instance. - -. In the web console left menu, go to *Service Accounts*, and click *Create service account* to generate the credentials that you can use to connect to {registry} and Kafka instances. - -. Copy the generated *Client ID* and *Client Secret* to a secure location. -+ -IMPORTANT: The generated credentials are displayed only one time. Ensure that you've successfully and securely saved the copied credentials before closing the credentials window. - -. After you save the generated credentials to a secure location, select the confirmation check box in the credentials window and close the window. -+ -You’ll use the details that you saved to configure your applications to connect to your {registry} instances later when you're ready. -+ -For example, if you plan to use https://github.com/edenhill/kafkacat[Kafkacat^] to interact with your Kafka instance and deserialize Avro messages using {registry}, you'll use this information to set your {registry} URL in the client environment variables. -//// -. For the *Authentication method* for SASL/OAUTHBEARER, use `https://identity.api.openshift.com/auth/realms/rhoas/protocol/openid-connect/token` as the *Token endpoint URL* and store it to a secure location. This is the endpoint that you'll use with your service account credentials to authenticate the connection to this {registry} instance. - -+ -NOTE: SASL/PLAIN authentication is also available for tools and libraries that don't support SASL/OAUTHBEARER, but SASL/OAUTHBEARER is recommended whenever possible. With SASL/PLAIN authentication, you use only the service account credentials to authenticate the connection to the Kafka instance. -//// - - -.Verification -ifdef::qs[] -* Did you save the {registry} instance URL to a secure location? -* Did you save the client credentials to a secure location? -endif::[] -ifndef::qs[] -. Verify that the {registry} instance URL is saved to a secure location. -. Verify that the client credentials are saved to a secure location. -endif::[] - -[role="_additional-resources"] -== Additional resources -* https://access.redhat.com/documentation/en-us/red_hat_openshift_service_registry/1[{registry-product-long} user documentation] -* https://access.redhat.com/documentation/en-us/red_hat_openshift_streams_for_apache_kafka/1[OpenShift Streams for Apache Kafka user documentation] - -ifdef::qs[] -[#conclusion] -Congratulations! You successfully completed the {registry} Getting Started quick start, and are now ready to use the service. -endif::[] - -ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/images/configure-service-registry-instance.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/images/configure-service-registry-instance.png deleted file mode 100644 index 73d69d27..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/images/configure-service-registry-instance.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/images/service-registry-instance-options.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/images/service-registry-instance-options.png deleted file mode 100644 index 913da0c4..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/images/service-registry-instance-options.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/images/upload-schema.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/images/upload-schema.png deleted file mode 100644 index 4dedcd29..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/images/upload-schema.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/quickstart.yml b/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/quickstart.yml deleted file mode 100644 index b04f4014..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/getting-started-service-registry/quickstart.yml +++ /dev/null @@ -1,22 +0,0 @@ -metadata: - name: getting-started-service-registry -spec: - type: - text: AsciiDoc - color: purple - displayName: !snippet/title README.adoc#chap-getting-started-service-registry - durationMinutes: 10 - icon: >- - data:image/svg+xml;base64,PHN2ZyBpZD0iZTA1M2ZiYWQtY2I0Ny00OGNiLThiNDMtZDczY2RmMzQyNGJjIiBkYXRhLW5hbWU9IkxheWVyIDEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDM3IDM3Ij4KICA8ZGVmcz4KICAgIDxzdHlsZT4KICAgICAgLmE0NjBjMWU3LWMyNjAtNDc5MS1iZjc0LTI0ZmE4OGU4MTcwZSB7CiAgICAgICAgZmlsbDogI2UwMDsKICAgICAgfQoKICAgICAgLmU4YTQ2MmQ1LTgzYWUtNGRjMi04OTVlLWE3NDU3ZjY2MDNjYSB7CiAgICAgICAgZmlsbDogI2ZmZjsKICAgICAgfQogICAgPC9zdHlsZT4KICA8L2RlZnM+CiAgPHBhdGggZD0iTTI3LjUuNUg5LjVhOSw5LDAsMCwwLTksOXYxOGE5LDksMCwwLDAsOSw5aDE4YTksOSwwLDAsMCw5LTlWOS41YTksOSwwLDAsMC05LTlaIi8+CiAgPHBhdGggY2xhc3M9ImE0NjBjMWU3LWMyNjAtNDc5MS1iZjc0LTI0ZmE4OGU4MTcwZSIgZD0iTTI2LjUsMjcuMTI1aC02YS42MjUuNjI1LDAsMCwxLS42MjUtLjYyNXYtNmEuNjI1LjYyNSwwLDAsMSwuNjI1LS42MjVoNmEuNjI1LjYyNSwwLDAsMSwuNjI1LjYyNXY2QS42MjUuNjI1LDAsMCwxLDI2LjUsMjcuMTI1Wm0tNS4zNzUtMS4yNWg0Ljc1di00Ljc1aC00Ljc1WiIvPgogIDxwYXRoIGNsYXNzPSJhNDYwYzFlNy1jMjYwLTQ3OTEtYmY3NC0yNGZhODhlODE3MGUiIGQ9Ik0yNi41LDE3LjEyNWgtNmEuNjI1LjYyNSwwLDAsMS0uNjI1LS42MjV2LTZhLjYyNS42MjUsMCwwLDEsLjYyNS0uNjI1aDZhLjYyNS42MjUsMCwwLDEsLjYyNS42MjV2NkEuNjI1LjYyNSwwLDAsMSwyNi41LDE3LjEyNVptLTUuMzc1LTEuMjVoNC43NXYtNC43NWgtNC43NVoiLz4KICA8cGF0aCBjbGFzcz0iYTQ2MGMxZTctYzI2MC00NzkxLWJmNzQtMjRmYTg4ZTgxNzBlIiBkPSJNMTYuNSwxNy4xMjVoLTZhLjYyNS42MjUsMCwwLDEtLjYyNS0uNjI1di02YS42MjUuNjI1LDAsMCwxLC42MjUtLjYyNWg2YS42MjUuNjI1LDAsMCwxLC42MjUuNjI1djZBLjYyNS42MjUsMCwwLDEsMTYuNSwxNy4xMjVabS01LjM3NS0xLjI1aDQuNzV2LTQuNzVoLTQuNzVaIi8+CiAgPHBhdGggY2xhc3M9ImU4YTQ2MmQ1LTgzYWUtNGRjMi04OTVlLWE3NDU3ZjY2MDNjYSIgZD0iTTE2LjUsMTkuODc1aC01YS42MjUuNjI1LDAsMCwwLDAsMS4yNWgzLjQ5MWwtNC45MzMsNC45MzNhLjYyNS42MjUsMCwwLDAsLjg4NC44ODRsNC45MzMtNC45MzNWMjUuNWEuNjI1LjYyNSwwLDAsMCwxLjI1LDB2LTVBLjYyNS42MjUsMCwwLDAsMTYuNSwxOS44NzVaIi8+Cjwvc3ZnPgo= - description: !snippet README.adoc#description - prerequisites: - - A Red Hat identity - - Access to the Red Hat OpenShift Service Registry environment at https://console.redhat.com - introduction: !snippet README.adoc#introduction - tasks: - - !snippet/proc README.adoc#proc-creating-service-registry-instance - - !snippet/proc README.adoc#proc-uploading-registry-schema - - !snippet/proc README.adoc#proc-connecting-registry-clients - conclusion: !snippet README.adoc#conclusion - nextQuickStart: - - 'service-registry-quarkus-dev' diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/README.adoc b/packages/dev/src/quickstarts-data/asciidoc/getting-started/README.adoc deleted file mode 100644 index dbc407f0..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/getting-started/README.adoc +++ /dev/null @@ -1,210 +0,0 @@ -[id="chap-getting-started"] -= Getting Started with {product-long} -ifdef::context[:parent-context: {context}] -:context: getting-started - -//// -START GENERATED ATTRIBUTES -WARNING: This content is generated by running npm --prefix .build run generate:attributes -//// - - -:community: -:imagesdir: ./images -:product-long: bf2fc6cc711aee1a0c2a -:product: bf2 -// Placeholder URL, when we get a HOST UI for the service we can put it here properly -:service_url: https://localhost:1234/ - -//// -END GENERATED ATTRIBUTES -//// - -// Purpose statement for the assembly -[role="_abstract"] -As a developer of applications and services, you can use the {product-long} cloud service to create and set up Kafka instances and connect your applications and services to these instances. {product} is a cloud service that enables you to implement Kafka data-streaming functionality in your applications without having to install, configure, run, and maintain your own Kafka clusters. - -.Prerequisites -ifndef::community[] -* You have a Red Hat account. -endif::[] -//* You have a subscription to {product-long}. For more information about signing up, see *<@SME: Where to link?>*. -* You have access to an application or service that you want to connect to a Kafka instance in {product-long}. - -// Condition out QS-only content so that it doesn't appear in docs. -// All QS anchor IDs must be in this alternate anchor ID format `[#anchor-id]` because the ascii splitter relies on the other format `[id="anchor-id"]` to generate module files. -ifdef::qs[] -[#description-getting-started] -Learn how to create and set up your first Apache Kafka instance in {product-long}. - -[#introduction] -Welcome to the {product-long} Getting Started quick start. In this quick start, you'll learn how to create and inspect a Kafka instance, create a service account to connect an application or service to the instance, and create a topic in the instance. -endif::[] - -[id="con-product-overview_{context}"] -== {product-long} - -{product-long} is a fully managed cloud service running on https://www.openshift.com/products/dedicated/[OpenShift Dedicated]. {product} enables you to implement Kafka data-streaming functionality in your applications without having to install, configure, run, and maintain your own Kafka clusters. With {product}, you can share data between microservices and other applications with high throughput and low latency. - -{product} also has the following key attributes: - -* *Scales with your application needs*: As your applications and services grow and require more Kafka instances, topics, and data, {product} can scale to meet those needs. -* *Provides flexible user control*: You can use the {product} web console UI or the relevant {product} API to configure the service as you desire. -ifndef::community[] -* *Offers a Service Level Agreement (SLA)*: You can rely on the service functionality as defined in the SLA. -endif::[] - -[id="proc-creating-kafka-instance_{context}"] -== Creating a Kafka instance in {product} - -A Kafka instance in {product} includes a Kafka cluster, bootstrap server, and other required configurations for connecting to Kafka producer and consumer services. You can use the {product} web console to create and configure a Kafka instance for your applications or services. - -. To highlight items from a quick start, first the target item needs to have a data attribute: *data-quickstart-id="something"* -. Then in asciidoc, the trigger element needs to have the *`+data-highlight__something+`* class/role, where the part after *`+data-highlight__+`* matches the data-quickstart-id of the target -Here are some examples: -* `+link:[Click me to highlight the logo, role="data-highlight__logo"]+` -** link:[Click me to highlight the logo, role="data-highlight__logo"] -* `+link:[Click me to highlight the Home nav item, role="data-highlight__home"]+` -** link:[Click me to highlight the Home nav item, role="data-highlight__home"] -* `+link:[Click here to highlight the Quick starts nav item, role="data-highlight__quickstarts"]+` -** link:[Click here to highlight the Quick starts nav item, role="data-highlight__quickstarts"] -* `+link:[Click here to highlight the Custom catalog nav item, role="data-highlight__custom-catalog"]+` -** link:[Click here to highlight the Custom catalog nav item, role="data-highlight__custom-catalog"] - -ifndef::qs[] -.Prerequisites -* You are logged in to the {product} web console at {service-url}. -endif::[] - -.Procedure -. In the *Streams for Apache Kafka* page of the web console, click *Create Kafka instance* and define the following instance details. Some values currently have only one option. -* *Instance name*: Enter a unique hyphenated name for the instance, such as `my-first-kafka-instance` in this example. -* *Cloud provider*: Select `Amazon Web Services`. -* *Cloud region*: Select `US East, N. Virginia`. -* *Availability zones*: Select `Multi`. -. Click *Create instance* to start the creation process for your Kafka instance. -+ --- -[.screencapture] -.Kafka instance configuration details -image::sak-configure-kafka-instance.png[Image of instance configuration details in Create Kafka instance window] - -The new Kafka instance is listed in the instances table. When the instance *Status* becomes *Ready*, you can start using the Kafka instance. - -[.screencapture] -.New Kafka instance in Ready state -image::sak-kafka-instance-ready.png[Image of new Kafka instance in Ready state] --- -. In the instances table, on the right side of the Kafka instance, use the options icon (three vertical dots) to view the instance details, connect to the instance, or delete the instance as needed. -+ -[.screencapture] -.Kafka instance options menu -image::sak-kafka-instance-options.png[Image of Kafka instance options menu] - -.Verification -ifdef::qs[] -* Is the new Kafka instance listed in the instances table? -* Is the state of the new Kafka instance shown as *Ready*? -endif::[] -ifndef::qs[] -. Verify that the new Kafka instance is listed in the instances table. -. Verify that the state of the new Kafka instance is shown as *Ready*. -endif::[] - - -//// -// Commenting out the following for now, which belongs in an onboarding tour (Stetson, 4 March 2021) - -When you're in the {Product_short} environment, you will see a left menu panel. This panel provides access to all resources related to the service, including the `Quick Starts` and `Documentation`. - -In the lower left of the screen you'll see a lightbulb icon. This icon gives access to the `Resource Center`. Here you can find the latest information about the service, like product updates, upcoming events, etc. - -image::sak-crc-resource-center.png[Image of Resource Center in web console] - -The center of the page shows you the list of Kafka instances that are currently running within your organisation. If this is your, or your organisations, first interaction with {Product_short}, this list will be empty. - -image::sak-kafka-overview.png[Image of initial empty instances table] -//// - -[id="proc-creating-service-account_{context}"] -== Creating a service account to connect to a Kafka instance in {product} - -To connect your applications or services to a Kafka instance in the {product} web console, you must create a service account, copy and save the generated credentials, and copy and save the bootstrap server location. {product-long} uses the SASL/OAUTHBEARER authentication mechanism over TLS to provide secure connections between your applications and Kafka instances. SASL/PLAIN authentication is also available for tools and libraries that do not support SASL/OAUTHBEARER, but SASL/OAUTHBEARER is recommended whenever possible. When you generate credentials for a Kafka instance, {product} creates a service account that contains the generated user name and password associated with the instance. - -.Prerequisites -* You have created a Kafka instance in the {product} web console and the instance is in *Ready* state. - -.Procedure -. In the *Streams for Apache Kafka* page of the web console, on the right side of the relevant Kafka instance, select the options icon (three vertical dots) and click *Connect to instance*. -. In the *Connection* page, copy the *External server* endpoint to a secure location. This is the bootstrap server endpoint that your application requires to connect to the Kafka instance. -. Click *Generate service account* and copy the *Client ID* and *Client secret* to a secure location. Your application requires these credentials to authenticate the connection to the Kafka instance. -+ -IMPORTANT: The generated credentials are displayed only one time, so ensure that you have successfully and securely saved the copied credentials before closing the credentials window. - -. After you save the generated credentials to a secure location, select the confirmation check box in the credentials window and close the window. -+ -You'll use the server and client information that you saved to configure your application to connect to your Kafka instances when you're ready. For example, if you plan to use https://github.com/edenhill/kafkacat[Kafkacat] to interact with your Kafka instance, you'll use this information to set your bootstrap server and client environment variables. - -.Verification -ifdef::qs[] -* Did you save the bootstrap server endpoint to a secure location? -* Did you save the client credentials to a secure location? -endif::[] -ifndef::qs[] -. Verify that the bootstrap server endpoint is saved to a secure location. -. Verify that the client credentials are saved to a secure location. -endif::[] - -[id="proc-creating-kafka-topic_{context}"] -== Creating a Kafka topic in {product} - -After you create a Kafka instance, you can create Kafka topics to start producing and consuming messages in your services. - -.Prerequisites -* You have created a Kafka instance in the {product} web console and the instance is in *Ready* state. - -.Procedure -. In the *Streams for Apache Kafka* page of the web console, select the name of the Kafka instance that you want to add a topic to. -. Click *Create topic* and follow the guided steps to define the topic details. Click *Next* to complete each step and click *Finish* to complete the setup. -+ --- -[.screencapture] -.Guided steps to define topic details -image::sak-create-topic.png[Image of wizard to create a topic] - -* *Topic name*: Enter a unique hyphenated topic name, such as `my-first-kafka-topic` in this example. -* *Partitions*: Set the number of partitions for this topic. This example sets the partition to `1` for a single partition. Partitions are distinct lists of messages within a topic and enable parts of a topic to be distributed over multiple brokers in the cluster. A topic can contain one or more partitions, enabling producer and consumer loads to be scaled. -+ -NOTE: You can increase the number of partitions later, but you cannot decrease them. -+ - -* *Message retention*: Set the message retention time to the relevant value and increment. This example sets the retention to `7 days`. Message retention time is the amount of time that messages are retained in a topic before they are deleted or compacted, depending on the cleanup policy. -* *Replicas*: Set the number of partition replicas for the topic and the minimum number of follower replicas that must be in sync with a partition leader. This example sets the replica factor and in-sync replicas to `1`. Replicas are copies of partitions in a topic. Partition replicas are distributed over multiple brokers in the cluster to ensure topic availability if a broker fails. When a follower replica is in sync with a partition leader, the follower replica can become the new partition leader if needed. - -After you complete the topic setup, the new Kafka topic is listed in the topics table. You can now start producing and consuming messages to and from this topic using services that you connect to this instance. --- -. In the topics table, on the right side of the Kafka topic, use the options icon (three vertical dots) to edit or delete the topic as needed. - -[.screencapture] -.Edit or delete Kafka topic -image::sak-edit-topic.png[Image of topic options to edit or delete] - -.Verification -ifdef::qs[] -* Is the new Kafka topic listed in the topics table? -endif::[] -ifndef::qs[] -* Verify that the new Kafka topic is listed in the topics table. -endif::[] - -[role="_additional-resources"] -== Additional resources -* https://kafka.apache.org/081/documentation.html#configuration[Configuration] in Kafka - -ifdef::qs[] -[#conclusion] -Congratulations! You successfully completed the {product} Getting Started quick start, and are now ready to use the service. -endif::[] - -ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] \ No newline at end of file diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-configure-kafka-instance.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-configure-kafka-instance.png deleted file mode 100644 index d18338d1..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-configure-kafka-instance.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-crc-resource-center.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-crc-resource-center.png deleted file mode 100644 index 501294af..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-crc-resource-center.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-create-kafka-form.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-create-kafka-form.png deleted file mode 100644 index ef49baae..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-create-kafka-form.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-create-topic.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-create-topic.png deleted file mode 100644 index 85e36ceb..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-create-topic.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-creation-in-progress.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-creation-in-progress.png deleted file mode 100644 index fe8e3e9a..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-creation-in-progress.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-edit-topic.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-edit-topic.png deleted file mode 100644 index 8bc1cbfd..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-edit-topic.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-kafka-instance-options.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-kafka-instance-options.png deleted file mode 100644 index 8c20aa36..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-kafka-instance-options.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-kafka-instance-ready.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-kafka-instance-ready.png deleted file mode 100644 index 60699e31..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-kafka-instance-ready.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-kafka-overview.png b/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-kafka-overview.png deleted file mode 100644 index 590f454a..00000000 Binary files a/packages/dev/src/quickstarts-data/asciidoc/getting-started/images/sak-kafka-overview.png and /dev/null differ diff --git a/packages/dev/src/quickstarts-data/asciidoc/getting-started/quickstart.yml b/packages/dev/src/quickstarts-data/asciidoc/getting-started/quickstart.yml deleted file mode 100644 index 705217f1..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/getting-started/quickstart.yml +++ /dev/null @@ -1,23 +0,0 @@ -metadata: - name: mas-getting-started -spec: - displayName: !snippet/title README.adoc#chap-getting-started - durationMinutes: 10 - type: - text: AsciiDoc - color: purple - icon: '' - description: !snippet README.adoc#description-getting-started - prerequisites: - - A Red Hat identity - - Access to the Red Hat OpenShift Streams for Apache Kafka environment at https://cloud.redhat.com - introduction: !snippet README.adoc#introduction - tasks: - - !snippet/proc README.adoc#proc-creating-kafka-instance - - !snippet/proc README.adoc#proc-creating-service-account - - !snippet/proc README.adoc#proc-creating-kafka-topic - conclusion: !snippet README.adoc#conclusion - nextQuickStart: - - 'kafkacat' - - 'kafka-bin-scripts' - - 'quarkus-kafka' \ No newline at end of file diff --git a/packages/dev/src/quickstarts-data/asciidoc/kafka-bin-scripts/README.adoc b/packages/dev/src/quickstarts-data/asciidoc/kafka-bin-scripts/README.adoc deleted file mode 100644 index 7306d288..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/kafka-bin-scripts/README.adoc +++ /dev/null @@ -1,208 +0,0 @@ -:parent-context: {context} - -// ATTRIBUTES -// We always have to provide default attributes in every file, this enables rendering e.g. in GitHub -:Product: bf2 -:Product_short: bf2 -:Propertyfile_name: bf2.properties - -[id="chap-kafka-bin-scripts"] -= Using Kafka Bin Scripts with {Product} - -ifdef::qs[] -[#description-kafka] -Learn how to use Kafka Bin Scripts to interact with a {Product} Kafka instance. - -[#introduction] -Welcome to the {Product} Kafka Bin Scripts Guide. - -In this guide we will walk you through the basics you need to know to use the *Kafka Bin Scripts* to interact with your Kafka cloud service. -endif::[] - -[id="task-1_{context}",module-type="proc"] -== Download the Kafka Bin Scripts - -The _Kafka Bin Scripts_ are the binary scripts that are provided in the https://kafka.apache.org/downloads[Apache Kafka distribution/download]. When you extract the Apache Kafka distribution, the `bin` directory of that distribution contains a set of tools that allow you to interact with your Kafka instance. There are scripts to produce message, consume messages, and to execute various operations against the Kafka APIs to administer topics, consumer-groups, etc. - -In this Quick Start you will use the Kafka Bin Scripts to produce and consume messages. - -NOTE: The Kafka Bin Scripts are part of the open-source community version of Apache Kafka. It's not a part of {Product} and therefore not supported by Red Hat. - -If you already have the Kafka Bin Scripts installed on your system, you can continue to the next task of this tutorial. - -.Prerequisites -* tbd - -.Procedure -. [.data-highlight_quickstarts]#Click here# to highlight the Quick starts nav item -. [.data-highlight_custom-catalog]#Click here# to highlight the Quick starts nav item -. Download the latest Kafka distribution from https://kafka.apache.org/downloads[here]. -. Extract the downloaded archive with your tool of choice. -. In a terminal, navigate into the Apache Kafka folder. From this folder, navigate into the `bin` folder. -. The `bin` folder contains the Kafka Bin Scripts. Take a look at the scripts inside this folder and verify that you have the `kafka-console-producer` and `kafka-console-consumer` scripts. -. From the `bin` directory, run `kafka-console-producer.sh --version` to verify that the scripts can start and run without any problems. -+ -[source,bash] ----- -$ ./kafka-console-producer.sh --version -2.7.0 (Commit:448719dc99a19793) ----- - -.Verification -. Execute `kafka-console-producer.sh --version` in your terminal. This should print the Kafkas version information. - -[id="task-2_{context}",module-type="proc"] -== Create the Configuration File - -You will need to configure the Kafka Bin Scripts to connect with, and authenticate to, your {Product_short} Kafka instance. -The easiest way to do this is to create a configuration file in the `config` directory of your Apache Kafka folder. - -.Prerequisites -* Kafka Bin Scripts available on your system. -* A running {Product_short} Kafka instance. -* The bootstrap server location of your Kafka instance. -* The credentials of the Service Account to authenticate against the Kafka instance. - -.Procedure -. Get the credentials (_Client ID_ and _Secret_) of your Kafka instance. You've created the Service Account in the _Getting Started Guide_. If you haven't created a Service Account yet, or you if you lost the credentials, generate a new set of credentials. This can be done as follows: -.. Navigate to the {Product} environment. -.. In the Kakfa instance table, click on the kebab/three-dot menu icon of your Kafka and select `Connect to instance`. -.. Click the _Generate Credential_ button. -.. Copu the _Client ID_ and _Client Secret_ and store them in a safe place. -. Navigate to the `config` folder of your Apache Kafka folder. -. Create a new file. Call it `{Propertyfile_name}`. -. Add the following content to your configuration file. Use the _Client ID_ as the __ and _Client Secret_ as the __ -+ -[source,properties] ----- -sasl.mechanism=PLAIN -security.protocol=SASL_SSL - -sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ - username="" \ - password=""; ----- -+ -NOTE: {Product} supports both the SASL/OAUTHBEARER and SASL/PLAIN mechanism for authentication. SASL/OAUTHBEARER is the recommended authentication mechanism to use, and superior to SASL/PLAIN. We highly recommend to always SASL/OAUTHBEARER if your Kafka client or library of choice supports it. -+ -. Save the file. We will use it in the next task to connect to your Kafka instance and produce messages. - -.Verification -. You have a new file named {Propertyfile_name} in your Apache Kafka's `config` directory - -[id="task-3_{context}",module-type="proc"] -== Produce messages -The Kafka Bin Scripts contain a script to easily produce message to Apache Kafka from the console/command line/terminal. -To prdouce messages from the console/terminal, you simply need to run the `kafka-console-producer` script. - -In the previous task, you've created a configuration file with the credentials to connect to your Kafka instance. We will use this configuration file to connect the `kafka-console-producer` to Kafka. - -.Prerequisites -* Kafka Bin Scripts available on your system. -* A running {Product_short} Kafka instance. -* The bootstrap server location of your Kafka instance. -* A configuration file containing the authentication information to connect to your Kafka instance. - -.Procedure -. Get the Bootstrap Server host and port of your {Product_short} Kafka instance. In the _Getting Started Guide_ we've explained where you can find this information. To quickly recap: -.. Navigate to the {Product} environment. -.. In the Kakfa instance table, click on the kebab/three-dot menu icon of your Kafka and select `Connect to instance`. -.. Copy the _Bootstrap Server_ hostname and port to your clipboard. -. Start the `kafka-console-producer` using the following command, replacing the _BOOTSTRAP_SERVER_ with the value for your Kafka instance. -+ -[source,bash] ----- -./kafka-console-producer.sh --topic my-other-topic --bootstrap-server "$BOOTSTRAP_SERVER" --producer.config ../config/{Propertyfile_name} ----- -+ -. If the producer starts correctly, you will see the following prompt. -+ -[source,bash] ----- -> ----- -+ -. With `kafka-console-producer` running, you can produce messages by simply typing the message values in your terminal. The following will produce 3 messages to your topic. -+ -[source,bash] ----- ->First message ->Second message ->Third message ----- -+ -. Keep the producer running. We will use this producer again in one of the following tasks. - -.Verification -. You've a `kafka-console-producer` running without any errors printed to your console. -. No errors were printed to the console when you sent the 3 messages. - -[id="task-4_{context}",module-type="proc"] -== Consume messages -Apart from producing messages, the Kafka Bin Scripts can also be used to consume messages. -To consume messages from the console/terminal, you simply need to run the `kafka-console-consumer` script, referencing the same configuration file we created earlier. - -.Prerequisites -* The Kafka Bin Scripts installed on your system. -* A running {Product_short} Kafka instance. -* The bootstrap server location of your Kafka instance. -* The credentials of the Service Account to authenticate against the Kafka instance. -* Messages produced to the `my-other-topic` topic as described in the previous task. - -.Procedure -. Using the information from the the previous tasks, like the _Bootstrap Server_ of your {Product_short} Kafka instance and the configuration file, start the `kafka_console_consumer`. You will consume messages from the same topic, `my-other-topic` that you produced messages to in the previous task. Execute the following command, replacing the _BOOTSTRAP_SERVER_ with the value for your Kafka instance. You should see the 3 messages you produced in the previous task. -+ -[source,bash] ----- -$ ./kafka-console-consumer.sh -topic my-other-topic --bootstrap-server "$BOOTSTRAP_SERVER" --from-beginning --consumer.config {Propertyfile_name} -First message -Second message -Third message ----- -+ -. The `kafka-console-consumer` has consumed the 3 messages you've sent to the topic earlier. -. Keep the consumer running, as you will use it in the next task. - -.Verification -. You've a `kafka-console-consumer` running without any errors printed to your console. -. `kafka-console-consumer` consumed the 3 messages you've sent to the `my-other-topic` topic in the previous task. - - -[id="task-5_{context}",module-type="proc"] -== Produce and Consume messages -Now that you've produced and consumed some messages, and have your `kafka-console-producer` and `kafka-console-consumer` still running, we can produce and consume some more messages. - -.Prerequisites -* A `kafka-console-producer` running and connected to your {Product_short} Kafka instance, producing to your `my-other-topic` topic. -* A `kafka-console-consumer` running and connected to your {Product_short} Kafka instance, consuming from your `my-other-topic` topic. - -.Procedure -. In your terminal with the running `kafka-console-producer`, send the following message. -+ -[source,bash] ----- -My Kafka Bin Scripts messsage ----- -+ -. Switch to the terminal in which you have your `kafka-console-consumer` running. You should see your message being consumed. -+ -[source,bash] ----- -First message -Second message -Third message -My Kafka Bin Scripts messsage ----- -+ -. Produce some more messages to your {Product_short} Kafka instance and see how they are being consumed by your `kafka-console-consumer`. - -.Verification -. You've produced the _My Kakfa Bin Scripts message_ to your `my-other-topic` Kafka topic without errors. -. The _My Kafka Bin Scripts message_ was successfully consumed by your `kafka-console-consumer`. - -ifdef::qs[] -[#conclusion] -Congratulations! You've successfully completed the {Product} Kafka Bin Scripts Guide, and are now ready to produce message to, and consume messages from, the service. -endif::[] - -:context: {parent-context} \ No newline at end of file diff --git a/packages/dev/src/quickstarts-data/asciidoc/kafka-bin-scripts/quickstart.yml b/packages/dev/src/quickstarts-data/asciidoc/kafka-bin-scripts/quickstart.yml deleted file mode 100644 index ff374b5d..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/kafka-bin-scripts/quickstart.yml +++ /dev/null @@ -1,23 +0,0 @@ -metadata: - name: mas-quarkus-kafka -spec: - displayName: !snippet/title README.adoc#chap-kafka-bin-scripts - durationMinutes: 10 - type: - text: AsciiDoc - color: purple - icon: '' - description: !snippet README.adoc#description-kafka - prerequisites: - - A running Kafka instance (see the Getting Started Guide) - - A command line/terminal - introduction: !snippet README.adoc#introduction - tasks: - - !snippet/proc README.adoc#task-1 - - !snippet/proc README.adoc#task-2 - - !snippet/proc README.adoc#task-3 - - !snippet/proc README.adoc#task-4 - - !snippet/proc README.adoc#task-5 - conclusion: !snippet README.adoc#conclusion - nextQuickStart: - - 'todo' \ No newline at end of file diff --git a/packages/dev/src/quickstarts-data/asciidoc/procedure-parser.ts b/packages/dev/src/quickstarts-data/asciidoc/procedure-parser.ts deleted file mode 100644 index 7eab7acb..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/procedure-parser.ts +++ /dev/null @@ -1,3 +0,0 @@ -/* eslint-disable */ - -export { ProcQuickStartParser } from '@patternfly/quickstarts'; diff --git a/packages/dev/src/quickstarts-data/asciidoc/quickstart.schema.json b/packages/dev/src/quickstarts-data/asciidoc/quickstart.schema.json deleted file mode 100644 index fce621b2..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/quickstart.schema.json +++ /dev/null @@ -1,500 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema", - "$id": "https://github.com/bf2fc6cc711aee1a0c2a/guides/quickstart.schema.json", - "type": "object", - "title": "A Quick Start", - "examples": [ - { - "apiVersion": "console.openshift.io/v1", - "kind": "QuickStarts", - "metadata": { - "name": "quarkus-kafka" - }, - "spec": { - "version": 0.1, - "displayName": "!snippet/title README.adoc#using-with", - "durationMinutes": 15, - "icon": "", - "description": "!snippet README.adoc#description", - "type": { - "text": "Instructional", - "color": "green" - }, - "prerequisites": [ - "Requirement 1", - "Requirement 2", - "Requirement 2" - ], - "introduction": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", - "tasks": [ - "!snippet/proc README.adoc#task-1", - { - "proc": "!snippet/proc README.adoc#task-2", - "title": "foo" - } - ], - "conclusion": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", - "nextQuickStart": [ - "" - ] - } - }, - { - "apiVersion": "console.openshift.io/v1", - "kind": "QuickStarts", - "metadata": { - "name": "explore-pipelines" - }, - "spec": { - "version": 4.7, - "displayName": "Installing the Pipelines Operator", - "durationMinutes": 10, - "icon": "", - "description": "Install the OpenShift® Pipelines Operator to build Pipelines using Tekton.", - "prerequisites": [ - "" - ], - "introduction": "OpenShift® Pipelines is a cloud-native, continuous integration and continuous delivery (CI/CD) solution based on Kubernetes resources. It uses Tekton building blocks to automate deployments across multiple Kubernetes distributions by abstracting away the underlying implementation details.\n* OpenShift Pipelines is a serverless CI/CD system that runs pipelines with all the required dependencies in isolated containers.\n* They are designed for decentralized teams that work on a microservice-based architecture.\n* They are defined using standard Custom Resource Definitions making them extensible and easy to integrate with the existing Kubernetes tools. This enables you to scale on-demand.\n* You can use OpenShift Pipelines to build images with Kubernetes tools such as Source-to-Image (S2I), Buildah, Buildpacks, and Kaniko that are portable across any Kubernetes platform.\n* You can use the Developer perspective to create and manage pipelines and view logs in your namespaces.\n\nTo start using Pipelines, install the OpenShift® Pipelines Operator on your cluster.", - "tasks": [ - { - "title": "Installing the OpenShift Pipelines Operator", - "description": "### To install the OpenShift Pipelines Operator:\n\n1. From the **Administrator** perspective in the console navigation panel, click **Operators > OperatorHub**.\n2. In the **Filter by keyword** field, type `OpenShift Pipelines Operator`.\n3. If the tile has an Installed label, the Operator is already installed. Proceed to the next quick start to create a Pipeline.\n4. Click the **tile** to open the Operator details.\n5. At the top of the OpenShift Pipelines Operator panel that opens, click **Install**.\n6. Fill out the Operator subscription form by selecting the channel that matches your OpenShift cluster, and then click **Install**.\n7. On the **Installed Operators** page, wait for the OpenShift Pipelines Operator's status to change from **Installing** to **Succeeded**. ", - "review": { - "instructions": "#### To verify that the OpenShift Pipelines Operator is installed:\n1. From the **Operators** section of the navigation, go to the **Installed Operators** page.\n2. Verify that the **OpenShift Pipelines Operator** appears in the list of Operators.\n\nIn the status column, is the status of the OpenShift Pipelines Operator **Succeeded**?", - "failedTaskHelp": "This task isn’t verified yet. Try the task again, or [read more](https://docs.openshift.com/container-platform/4.6/pipelines/installing-pipelines.html#op-installing-pipelines-operator-in-web-console_installing-pipelines) about this topic." - }, - "summary": { - "success": "You have installed the Pipelines Operator!", - "failed": "Try the steps again." - } - } - ], - "conclusion": "You successfully installed the OpenShift Pipelines Operator! If you want to learn how to deploy an application and associate a Pipeline with it, take the Creating a Pipeline quick start.", - "nextQuickStart": [ - "install-app-and-associate-pipeline" - ] - } - } - ], - "required": [ - "metadata", - "spec" - ], - "properties": { - "apiVersion": { - "$id": "#/properties/apiVersion", - "type": "string", - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "examples": [ - "console.openshift.io/v1" - ] - }, - "kind": { - "$id": "#/properties/kind", - "type": "string", - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "examples": [ - "QuickStarts" - ] - }, - "metadata": { - "$id": "#/properties/metadata", - "type": "object", - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "examples": [ - { - "name": "explore-pipelines" - } - ], - "required": [ - "name" - ], - "properties": { - "name": { - "$id": "#/properties/metadata/properties/name", - "type": "string", - "description": "Name must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "examples": [ - "explore-pipelines" - ] - } - }, - "additionalProperties": true - }, - "spec": { - "$id": "#/properties/spec", - "type": "object", - "description": "Specification of the Quick Start", - "examples": [ - { - "version": 4.7, - "displayName": "Installing the Pipelines Operator", - "durationMinutes": 10, - "icon": "", - "description": "Install the OpenShift® Pipelines Operator to build Pipelines using Tekton.", - "prerequisites": [ - "" - ], - "introduction": "OpenShift® Pipelines is a cloud-native, continuous integration and continuous delivery (CI/CD) solution based on Kubernetes resources. It uses Tekton building blocks to automate deployments across multiple Kubernetes distributions by abstracting away the underlying implementation details.\n* OpenShift Pipelines is a serverless CI/CD system that runs pipelines with all the required dependencies in isolated containers.\n* They are designed for decentralized teams that work on a microservice-based architecture.\n* They are defined using standard Custom Resource Definitions making them extensible and easy to integrate with the existing Kubernetes tools. This enables you to scale on-demand.\n* You can use OpenShift Pipelines to build images with Kubernetes tools such as Source-to-Image (S2I), Buildah, Buildpacks, and Kaniko that are portable across any Kubernetes platform.\n* You can use the Developer perspective to create and manage pipelines and view logs in your namespaces.\n\nTo start using Pipelines, install the OpenShift® Pipelines Operator on your cluster.", - "tasks": [ - { - "title": "Installing the OpenShift Pipelines Operator", - "description": "### To install the OpenShift Pipelines Operator:\n\n1. From the **Administrator** perspective in the console navigation panel, click **Operators > OperatorHub**.\n2. In the **Filter by keyword** field, type `OpenShift Pipelines Operator`.\n3. If the tile has an Installed label, the Operator is already installed. Proceed to the next quick start to create a Pipeline.\n4. Click the **tile** to open the Operator details.\n5. At the top of the OpenShift Pipelines Operator panel that opens, click **Install**.\n6. Fill out the Operator subscription form by selecting the channel that matches your OpenShift cluster, and then click **Install**.\n7. On the **Installed Operators** page, wait for the OpenShift Pipelines Operator's status to change from **Installing** to **Succeeded**. ", - "review": { - "instructions": "#### To verify that the OpenShift Pipelines Operator is installed:\n1. From the **Operators** section of the navigation, go to the **Installed Operators** page.\n2. Verify that the **OpenShift Pipelines Operator** appears in the list of Operators.\n\nIn the status column, is the status of the OpenShift Pipelines Operator **Succeeded**?", - "failedTaskHelp": "This task isn’t verified yet. Try the task again, or [read more](https://docs.openshift.com/container-platform/4.6/pipelines/installing-pipelines.html#op-installing-pipelines-operator-in-web-console_installing-pipelines) about this topic." - }, - "summary": { - "success": "You have installed the Pipelines Operator!", - "failed": "Try the steps again." - } - } - ], - "conclusion": "You successfully installed the OpenShift Pipelines Operator! If you want to learn how to deploy an application and associate a Pipeline with it, take the Creating a Pipeline quick start.", - "nextQuickStart": [ - "install-app-and-associate-pipeline" - ] - }, { - "version": 0.1, - "displayName": "!snippet/title README.adoc#using-with", - "durationMinutes": 15, - "icon": "", - "description": "!snippet README.adoc#description", - "prerequisites": [ - "Requirement 1", - "Requirement 2", - "Requirement 2" - ], - "introduction": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", - "tasks": [ - "!snippet/proc README.adoc#task-1", - { - "proc": "!snippet/proc README.adoc#task-2", - "title": "foo" - } - ], - "conclusion": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", - "nextQuickStart": [ - "" - ] - } - ], - "required": [ - "displayName", - "durationMinutes", - "icon", - "description", - "prerequisites", - "introduction", - "tasks", - "conclusion", - "nextQuickStart" - ], - "properties": { - "version": { - "$id": "#/properties/spec/properties/version", - "type": "number", - "description": "The version of the Quick Start", - "default": 0.0, - "examples": [ - 4.7 - ] - }, - "displayName": { - "$id": "#/properties/spec/properties/displayName", - "type": "string", - "description": "Used both in the catalog and as the heading for the quick start drawer", - "examples": [ - "Installing the Pipelines Operator", - "!snippet/title README.adoc#using-with-quarkus" - ] - }, - "durationMinutes": { - "$id": "#/properties/spec/properties/durationMinutes", - "type": "integer", - "description": "How long the quick start should take to complete", - "default": 0, - "examples": [ - 10 - ] - }, - "type": { - "$id": "#/properties/spec/properties/type", - "type": "object", - "description": "Used to distinguish between quick start types, an additional tile is shown on the quick start card", - "examples": [ - { - "text": "Documentation", - "color": "blue" - } - ], - "required": [ - "text" - ], - "properties": { - "text": { - "$id": "#/properties/spec/properties/type/properties/text", - "type": "string", - "description": "Text to display in the tile on the quick start card", - "examples": [ - "Documentation", - "Quick start" - ] - }, - "color": { - "$id": "#/properties/spec/properties/type/properties/color", - "type": "string", - "description": "Color of the tile on the quick start card", - "examples": [ - "green", - "blue" - ] - } - }, - "additionalProperties": false - }, - "icon": { - "$id": "#/properties/spec/properties/icon", - "type": "string", - "title": "The icon schema", - "description": "The URL of an icon to use", - "examples": [ - "" - ] - }, - "description": { - "$id": "#/properties/spec/properties/description", - "type": "string", - "description": "Rendered in the quick start catalog below the display name", - "examples": [ - "Install the OpenShift® Pipelines Operator to build Pipelines using Tekton.", - "!snippet README.adoc#description" - ] - }, - "prerequisites": { - "$id": "#/properties/spec/properties/prerequisites", - "type": "array", - "description": "Rendered in the quick start catalog", - "default": [], - "examples": [ - [ - "Requirement 1", - "Requirement 2" - ] - ], - "items": { - "$id": "#/properties/spec/properties/prerequisites/items", - "anyOf": [ - { - "$id": "#/properties/spec/properties/prerequisites/items/anyOf/0", - "type": "string", - "description": "Rendered in the quick start catalog", - "examples": [ - "Requirement 1", - "Requirement 2" - ] - } - ] - } - }, - "introduction": { - "$id": "#/properties/spec/properties/introduction", - "type": "string", - "description": "Used as the content for the first page of the quick start", - "examples": [ - "OpenShift® Pipelines is a cloud-native, continuous integration and continuous delivery (CI/CD) solution based on Kubernetes resources. It uses Tekton building blocks to automate deployments across multiple Kubernetes distributions by abstracting away the underlying implementation details.\n* OpenShift Pipelines is a serverless CI/CD system that runs pipelines with all the required dependencies in isolated containers.\n* They are designed for decentralized teams that work on a microservice-based architecture.\n* They are defined using standard Custom Resource Definitions making them extensible and easy to integrate with the existing Kubernetes tools. This enables you to scale on-demand.\n* You can use OpenShift Pipelines to build images with Kubernetes tools such as Source-to-Image (S2I), Buildah, Buildpacks, and Kaniko that are portable across any Kubernetes platform.\n* You can use the Developer perspective to create and manage pipelines and view logs in your namespaces.\n\nTo start using Pipelines, install the OpenShift® Pipelines Operator on your cluster." - ] - }, - "tasks": { - "$id": "#/properties/spec/properties/tasks", - "type": "array", - "title": "The tasks schema", - "description": "The stages of the QuickStart", - "default": [], - "examples": [ - [ - "!snippet/proc README.adoc#task-1", - { - "proc": "!snippet/proc README.adoc#task-2", - "title": "foo" - } - ], - [ - - { - "title": "Installing the OpenShift Pipelines Operator", - "description": "### To install the OpenShift Pipelines Operator:\n\n1. From the **Administrator** perspective in the console navigation panel, click **Operators > OperatorHub**.\n2. In the **Filter by keyword** field, type `OpenShift Pipelines Operator`.\n3. If the tile has an Installed label, the Operator is already installed. Proceed to the next quick start to create a Pipeline.\n4. Click the **tile** to open the Operator details.\n5. At the top of the OpenShift Pipelines Operator panel that opens, click **Install**.\n6. Fill out the Operator subscription form by selecting the channel that matches your OpenShift cluster, and then click **Install**.\n7. On the **Installed Operators** page, wait for the OpenShift Pipelines Operator's status to change from **Installing** to **Succeeded**. ", - "review": { - "instructions": "#### To verify that the OpenShift Pipelines Operator is installed:\n1. From the **Operators** section of the navigation, go to the **Installed Operators** page.\n2. Verify that the **OpenShift Pipelines Operator** appears in the list of Operators.\n\nIn the status column, is the status of the OpenShift Pipelines Operator **Succeeded**?", - "failedTaskHelp": "This task isn’t verified yet. Try the task again, or [read more](https://docs.openshift.com/container-platform/4.6/pipelines/installing-pipelines.html#op-installing-pipelines-operator-in-web-console_installing-pipelines) about this topic." - }, - "summary": { - "success": "You have installed the Pipelines Operator!", - "failed": "Try the steps again." - } - } - ] - ], - "items": { - "$id": "#/properties/spec/properties/tasks/items", - "anyOf": [ - { - "$id": "#/properties/spec/properties/tasks/items/anyOf/0", - "type": "string", - "description": "The custom !snippet/proc # tag", - "examples": [ - "!snippet/proc README.adoc#task-1" - ] - }, - { - "$id": "#/properties/spec/properties/tasks/items/anyOf/1", - "type": "object", - "description": "A full description of the Quick Start", - "examples": [ - { - "title": "Installing the OpenShift Pipelines Operator", - "description": "### To install the OpenShift Pipelines Operator:\n\n1. From the **Administrator** perspective in the console navigation panel, click **Operators > OperatorHub**.\n2. In the **Filter by keyword** field, type `OpenShift Pipelines Operator`.\n3. If the tile has an Installed label, the Operator is already installed. Proceed to the next quick start to create a Pipeline.\n4. Click the **tile** to open the Operator details.\n5. At the top of the OpenShift Pipelines Operator panel that opens, click **Install**.\n6. Fill out the Operator subscription form by selecting the channel that matches your OpenShift cluster, and then click **Install**.\n7. On the **Installed Operators** page, wait for the OpenShift Pipelines Operator's status to change from **Installing** to **Succeeded**. ", - "review": { - "instructions": "#### To verify that the OpenShift Pipelines Operator is installed:\n1. From the **Operators** section of the navigation, go to the **Installed Operators** page.\n2. Verify that the **OpenShift Pipelines Operator** appears in the list of Operators.\n\nIn the status column, is the status of the OpenShift Pipelines Operator **Succeeded**?", - "failedTaskHelp": "This task isn’t verified yet. Try the task again, or [read more](https://docs.openshift.com/container-platform/4.6/pipelines/installing-pipelines.html#op-installing-pipelines-operator-in-web-console_installing-pipelines) about this topic." - }, - "summary": { - "success": "You have installed the Pipelines Operator!", - "failed": "Try the steps again." - } - } - ], - "properties": { - "proc": { - "$id": "#/properties/spec/properties/tasks/items/anyOf/2", - "type": "string", - "description": "The custom !snippet/proc # tag", - "examples": [ - "!snippet/proc README.adoc#task-1" - ] - }, - "title": { - "$id": "#/properties/spec/properties/tasks/items/anyOf/2/properties/title", - "type": "string", - "description": "The title of the Quick Start", - "examples": [ - "Installing the OpenShift Pipelines Operator" - ] - }, - "description": { - "$id": "#/properties/spec/properties/tasks/items/anyOf/2/properties/description", - "type": "string", - "description": "The body of the Quick Start", - "default": "", - "examples": [ - "### To install the OpenShift Pipelines Operator:\n\n1. From the **Administrator** perspective in the console navigation panel, click **Operators > OperatorHub**.\n2. In the **Filter by keyword** field, type `OpenShift Pipelines Operator`.\n3. If the tile has an Installed label, the Operator is already installed. Proceed to the next quick start to create a Pipeline.\n4. Click the **tile** to open the Operator details.\n5. At the top of the OpenShift Pipelines Operator panel that opens, click **Install**.\n6. Fill out the Operator subscription form by selecting the channel that matches your OpenShift cluster, and then click **Install**.\n7. On the **Installed Operators** page, wait for the OpenShift Pipelines Operator's status to change from **Installing** to **Succeeded**. " - ] - }, - "review": { - "$id": "#/properties/spec/properties/tasks/items/anyOf/2/properties/review", - "type": "object", - "description": "The messages used review the Quick Start", - "default": {}, - "examples": [ - { - "instructions": "#### To verify that the OpenShift Pipelines Operator is installed:\n1. From the **Operators** section of the navigation, go to the **Installed Operators** page.\n2. Verify that the **OpenShift Pipelines Operator** appears in the list of Operators.\n\nIn the status column, is the status of the OpenShift Pipelines Operator **Succeeded**?", - "failedTaskHelp": "This task isn’t verified yet. Try the task again, or [read more](https://docs.openshift.com/container-platform/4.6/pipelines/installing-pipelines.html#op-installing-pipelines-operator-in-web-console_installing-pipelines) about this topic." - } - ], - "properties": { - "instructions": { - "$id": "#/properties/spec/properties/tasks/items/anyOf/2/properties/review/properties/instructions", - "type": "string", - "description": "The verification steps", - "examples": [ - "#### To verify that the OpenShift Pipelines Operator is installed:\n1. From the **Operators** section of the navigation, go to the **Installed Operators** page.\n2. Verify that the **OpenShift Pipelines Operator** appears in the list of Operators.\n\nIn the status column, is the status of the OpenShift Pipelines Operator **Succeeded**?" - ] - }, - "failedTaskHelp": { - "$id": "#/properties/spec/properties/tasks/items/anyOf/2/properties/review/properties/failedTaskHelp", - "type": "string", - "description": "The message to show if verification fails", - "examples": [ - "This task isn’t verified yet. Try the task again, or [read more](https://docs.openshift.com/container-platform/4.6/pipelines/installing-pipelines.html#op-installing-pipelines-operator-in-web-console_installing-pipelines) about this topic." - ] - } - }, - "additionalProperties": false - }, - "summary": { - "$id": "#/properties/spec/properties/tasks/items/anyOf/3/properties/summary", - "type": "object", - "description": "The summary screen for the Quick Start task", - "default": {}, - "examples": [ - { - "success": "You have installed the Pipelines Operator!", - "failed": "Try the steps again." - } - ], - "properties": { - "success": { - "$id": "#/properties/spec/properties/tasks/items/anyOf/3/properties/summary/properties/success", - "type": "string", - "description": "The success message for the summary screen", - "examples": [ - "You have installed the Pipelines Operator!" - ] - }, - "failed": { - "$id": "#/properties/spec/properties/tasks/items/anyOf/3/properties/summary/properties/failed", - "type": "string", - "description": "The failure message for the summary screen", - "default": "", - "examples": [ - "Try the steps again." - ] - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - ] - } - }, - "conclusion": { - "$id": "#/properties/spec/properties/conclusion", - "type": "string", - "description": "An explanation about the purpose of this instance.", - "default": "Used for the content of the final page of the quick start.", - "examples": [ - "You successfully installed the OpenShift Pipelines Operator! If you want to learn how to deploy an application and associate a Pipeline with it, take the Creating a Pipeline quick start." - ] - }, - "nextQuickStart": { - "$id": "#/properties/spec/properties/nextQuickStart", - "type": "array", - "description": "Rendered at the end of the quick start to provide the user with next steps. The value of each list member should be the _identifier_ of another quick start in this repository.", - "default": [], - "examples": [ - [ - "install-app-and-associate-pipeline" - ] - ], - "items": { - "$id": "#/properties/spec/properties/nextQuickStart/items", - "anyOf": [ - { - "$id": "#/properties/spec/properties/nextQuickStart/items/anyOf/0", - "type": "string", - "description": "Rendered at the end of the quick start to provide the user with next steps. The value of each list member should be the _identifier_ of another quick start in this repository.", - "examples": [ - "install-app-and-associate-pipeline" - ] - } - ] - } - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false -} diff --git a/packages/dev/src/quickstarts-data/asciidoc/quickstartLoader.tsx b/packages/dev/src/quickstarts-data/asciidoc/quickstartLoader.tsx deleted file mode 100644 index 733dd9bc..00000000 --- a/packages/dev/src/quickstarts-data/asciidoc/quickstartLoader.tsx +++ /dev/null @@ -1,20 +0,0 @@ -import { ProcQuickStartParser } from './procedure-parser'; - -const loadJSONQuickStartsFilesFromAssets = async (basePath: string): Promise => { - const data = await fetch(`${basePath}/webpack-assets.json`).then((response) => response.json()); - const files = Array.isArray(data[''].json) ? data[''].json : [data[''].json]; - return files - .filter((url: string) => url.endsWith('.quickstart.json')) - .map((f: string) => (!f.startsWith('http') ? `${basePath}/${f}` : f)); -}; - -export const loadJSONQuickStarts = async (basePath: string) => { - const files = await loadJSONQuickStartsFilesFromAssets(basePath); - const result = [] as any[]; - - for (const file of files) { - await fetch(file).then((response) => response.json().then((data) => result.push(data))); - } - - return result.map((content) => ProcQuickStartParser(content)); -}; diff --git a/packages/dev/webpack.config.js b/packages/dev/webpack.config.js index 417534ae..a7157758 100644 --- a/packages/dev/webpack.config.js +++ b/packages/dev/webpack.config.js @@ -4,7 +4,6 @@ const MiniCssExtractPlugin = require('mini-css-extract-plugin'); const HtmlWebpackPlugin = require('html-webpack-plugin'); const CopyPlugin = require('copy-webpack-plugin'); const TsconfigPathsPlugin = require('tsconfig-paths-webpack-plugin'); -const { buildQuickStart } = require('./quickstart-adoc'); const AssetsPlugin = require('assets-webpack-plugin'); const staticDir = path.join(process.cwd(), 'static/'); @@ -110,26 +109,6 @@ module.exports = (_env, argv) => { keepInMemory: false, removeFullPathAutoPrefix: true, }), - new CopyPlugin({ - patterns: [ - { - from: 'src/quickstarts-data/asciidoc/**/quickstart.yml', - to: ({ absoluteFilename }) => { - // The dirname of quickstart is used as the output key - const dirName = path.basename(path.dirname(absoluteFilename)); - if (_env === 'development') { - return `${dirName}.quickstart.json`; - } - return `${dirName}.[contenthash].quickstart.json`; - }, - transform: (content, absoluteFilename) => { - const basePath = path.dirname(absoluteFilename); - return buildQuickStart(content, absoluteFilename, basePath, {}); - }, - noErrorOnMissing: true, - }, - ], - }), ], }; }; diff --git a/packages/module/src/catalog/QuickStartTile.tsx b/packages/module/src/catalog/QuickStartTile.tsx index 4af8c42f..1f43d843 100644 --- a/packages/module/src/catalog/QuickStartTile.tsx +++ b/packages/module/src/catalog/QuickStartTile.tsx @@ -8,7 +8,7 @@ import { camelize } from '../utils/quick-start-utils'; import QuickStartTileDescription from './QuickStartTileDescription'; import QuickStartTileFooter from './QuickStartTileFooter'; import QuickStartTileFooterExternal from './QuickStartTileFooterExternal'; -import QuickStartTileHeader from './QuickStartTileHeader'; +import QuickStartTileHeader, { QuickstartAction } from './QuickStartTileHeader'; import './QuickStartTile.scss'; @@ -17,6 +17,8 @@ interface QuickStartTileProps { status: QuickStartStatus; isActive: boolean; onClick?: () => void; + /** Action config for button rendered next to title */ + action?: QuickstartAction; } const QuickStartTile: React.FC = ({ @@ -24,12 +26,14 @@ const QuickStartTile: React.FC = ({ status, isActive, onClick = () => {}, + action, }) => { const { - metadata: { name: id }, + metadata: { name, id: metaId }, spec: { icon, tasks, displayName, description, durationMinutes, prerequisites, link, type }, } = quickStart; + const id = metaId || name; const { setActiveQuickStart, footer } = React.useContext(QuickStartContext); @@ -68,7 +72,7 @@ const QuickStartTile: React.FC = ({ if (link) { window.open(link.href); } else { - setActiveQuickStart(id, tasks?.length); + setActiveQuickStart(name, tasks?.length); } onClick(); } @@ -92,6 +96,7 @@ const QuickStartTile: React.FC = ({ duration={durationMinutes} type={type} quickStartId={id} + action={action} /> } onClick={handleClick} diff --git a/packages/module/src/catalog/QuickStartTileHeader.tsx b/packages/module/src/catalog/QuickStartTileHeader.tsx index 3e4db458..faf4e58b 100644 --- a/packages/module/src/catalog/QuickStartTileHeader.tsx +++ b/packages/module/src/catalog/QuickStartTileHeader.tsx @@ -1,18 +1,31 @@ import './QuickStartTileHeader.scss'; import * as React from 'react'; -import { Label, Title } from '@patternfly/react-core'; +import { Button, ButtonProps, Flex, Label, Title } from '@patternfly/react-core'; import OutlinedClockIcon from '@patternfly/react-icons/dist/js/icons/outlined-clock-icon'; +import OutlinedBookmarkIcon from '@patternfly/react-icons/dist/js/icons/outlined-bookmark-icon'; import { StatusIcon } from '@console/shared'; import { QuickStartContext, QuickStartContextValues } from '../utils/quick-start-context'; import { QuickStartStatus, QuickStartType } from '../utils/quick-start-types'; import QuickStartMarkdownView from '../QuickStartMarkdownView'; +export interface QuickstartAction { + /** Screen reader aria label. */ + 'aria-label': string; + /** Icon to be rendered as a plain button, by default Bookmark outlined will be used. */ + icon?: React.ComponentType; + /** Callback with synthetic event parameter. */ + onClick?: (e: React.SyntheticEvent) => void; + /** Additional button props to be rendered as extra props. */ + buttonProps?: ButtonProps; +} + interface QuickStartTileHeaderProps { status: string; duration: number; name: string; type?: QuickStartType; quickStartId?: string; + action?: QuickstartAction; } const statusColorMap = { @@ -27,6 +40,7 @@ const QuickStartTileHeader: React.FC = ({ name, type, quickStartId, + action, }) => { const { getResource } = React.useContext(QuickStartContext); @@ -36,11 +50,22 @@ const QuickStartTileHeader: React.FC = ({ [QuickStartStatus.NOT_STARTED]: getResource('Not started'), }; + const ActionIcon = action?.icon || OutlinedBookmarkIcon; + return (
- - <QuickStartMarkdownView content={name} /> - + + + <QuickStartMarkdownView content={name} /> + + {action &&