From da46b72dc51b2b3d3b3a376a74ee3fc8353158ea Mon Sep 17 00:00:00 2001 From: vsoch Date: Wed, 4 Jan 2023 22:28:45 -0700 Subject: [PATCH] adding vendor blogs for redhat, nvidia, linux clusters I could not find feeds for vmware, arm, or aws - these can be added if there are feeds. Signed-off-by: vsoch --- _data/authors.yml | 28 ++ _posts/linuxcluster/2022-11-29-p=8858.md | 73 ++++ _posts/linuxcluster/2022-11-30-p=8878.md | 79 ++++ _posts/linuxcluster/2022-12-1-p=8885.md | 94 +++++ _posts/linuxcluster/2022-12-11-p=8899.md | 29 ++ _posts/linuxcluster/2022-12-11-p=8906.md | 29 ++ _posts/linuxcluster/2022-12-14-p=8915.md | 48 +++ _posts/linuxcluster/2022-12-19-p=8923.md | 93 ++++ _posts/linuxcluster/2022-12-22-p=8932.md | 36 ++ _posts/linuxcluster/2022-12-28-p=8947.md | 41 ++ _posts/linuxcluster/2023-1-3-p=8954.md | 82 ++++ _posts/nvidia/2022-12-27-p=61372.md | 40 ++ _posts/nvidia/2022-12-27-p=61526.md | 100 +++++ _posts/nvidia/2022-12-28-p=61570.md | 33 ++ _posts/nvidia/2022-12-29-p=61453.md | 56 +++ _posts/nvidia/2022-12-29-p=61600.md | 27 ++ _posts/nvidia/2022-12-30-p=61617.md | 59 +++ _posts/nvidia/2023-1-2-p=61502.md | 36 ++ _posts/nvidia/2023-1-3-p=61497.md | 68 +++ _posts/nvidia/2023-1-3-p=61498.md | 60 +++ _posts/nvidia/2023-1-3-p=61565.md | 66 +++ _posts/nvidia/2023-1-3-p=61578.md | 57 +++ _posts/nvidia/2023-1-3-p=61579.md | 49 +++ _posts/nvidia/2023-1-3-p=61580.md | 62 +++ _posts/nvidia/2023-1-3-p=61582.md | 58 +++ _posts/nvidia/2023-1-3-p=61644.md | 93 ++++ _posts/nvidia/2023-1-3-p=61704.md | 109 +++++ _posts/nvidia/2023-1-4-p=61452.md | 77 ++++ _posts/nvidia/2023-1-4-p=61631.md | 51 +++ ...-openshifts-machine-config-operator-mco.md | 22 + ...-19-interview-with-fords-satish-puranam.md | 29 ++ ...openshift-container-platform-ocp-part-2.md | 22 + ...ernal-secrets-operator-eso-as-a-service.md | 24 ++ ...s-and-artifacts-for-openshift-operators.md | 399 ++++++++++++++++++ ...or-observability-using-grafana-and-apis.md | 22 + ...sider-interviews-katie-gamanji-of-apple.md | 29 ++ ...e-insider-interviews-ronen-dar-of-runai.md | 29 ++ ...essive-application-delivery-with-gitops.md | 29 ++ ...-12-28-whats-it-like-to-work-at-red-hat.md | 29 ++ _posts/redhat/2023-1-2-what-is-clair.md | 21 + ...at-advanced-cluster-management-policies.md | 24 ++ scripts/generate_posts.py | 2 +- 42 files changed, 2413 insertions(+), 1 deletion(-) create mode 100644 _posts/linuxcluster/2022-11-29-p=8858.md create mode 100644 _posts/linuxcluster/2022-11-30-p=8878.md create mode 100644 _posts/linuxcluster/2022-12-1-p=8885.md create mode 100644 _posts/linuxcluster/2022-12-11-p=8899.md create mode 100644 _posts/linuxcluster/2022-12-11-p=8906.md create mode 100644 _posts/linuxcluster/2022-12-14-p=8915.md create mode 100644 _posts/linuxcluster/2022-12-19-p=8923.md create mode 100644 _posts/linuxcluster/2022-12-22-p=8932.md create mode 100644 _posts/linuxcluster/2022-12-28-p=8947.md create mode 100644 _posts/linuxcluster/2023-1-3-p=8954.md create mode 100644 _posts/nvidia/2022-12-27-p=61372.md create mode 100644 _posts/nvidia/2022-12-27-p=61526.md create mode 100644 _posts/nvidia/2022-12-28-p=61570.md create mode 100644 _posts/nvidia/2022-12-29-p=61453.md create mode 100644 _posts/nvidia/2022-12-29-p=61600.md create mode 100644 _posts/nvidia/2022-12-30-p=61617.md create mode 100644 _posts/nvidia/2023-1-2-p=61502.md create mode 100644 _posts/nvidia/2023-1-3-p=61497.md create mode 100644 _posts/nvidia/2023-1-3-p=61498.md create mode 100644 _posts/nvidia/2023-1-3-p=61565.md create mode 100644 _posts/nvidia/2023-1-3-p=61578.md create mode 100644 _posts/nvidia/2023-1-3-p=61579.md create mode 100644 _posts/nvidia/2023-1-3-p=61580.md create mode 100644 _posts/nvidia/2023-1-3-p=61582.md create mode 100644 _posts/nvidia/2023-1-3-p=61644.md create mode 100644 _posts/nvidia/2023-1-3-p=61704.md create mode 100644 _posts/nvidia/2023-1-4-p=61452.md create mode 100644 _posts/nvidia/2023-1-4-p=61631.md create mode 100644 _posts/redhat/2022-12-16-the-consequences-of-pausing-machineconfig-pools-in-openshifts-machine-config-operator-mco.md create mode 100644 _posts/redhat/2022-12-19-interview-with-fords-satish-puranam.md create mode 100644 _posts/redhat/2022-12-19-multiple-ways-of-authentication-on-openshift-container-platform-ocp-part-2.md create mode 100644 _posts/redhat/2022-12-20-how-to-setup-external-secrets-operator-eso-as-a-service.md create mode 100644 _posts/redhat/2022-12-20-how-to-version-bound-images-and-artifacts-for-openshift-operators.md create mode 100644 _posts/redhat/2022-12-21-custom-queries-for-observability-using-grafana-and-apis.md create mode 100644 _posts/redhat/2022-12-21-kbe-insider-interviews-katie-gamanji-of-apple.md create mode 100644 _posts/redhat/2022-12-23-kbe-insider-interviews-ronen-dar-of-runai.md create mode 100644 _posts/redhat/2022-12-26-learn-about-progressive-application-delivery-with-gitops.md create mode 100644 _posts/redhat/2022-12-28-whats-it-like-to-work-at-red-hat.md create mode 100644 _posts/redhat/2023-1-2-what-is-clair.md create mode 100644 _posts/redhat/2023-1-4-deploying-triliovault-for-kubernetes-with-openshift-red-hat-advanced-cluster-management-policies.md diff --git a/_data/authors.yml b/_data/authors.yml index 34e40b3..840df10 100644 --- a/_data/authors.yml +++ b/_data/authors.yml @@ -4,3 +4,31 @@ - https://feeds.feedburner.com/mathworks/moler url: https://blogs.mathworks.com/matlab/ image: https://www.mathworks.com/images/responsive/global/pic-header-mathworks-logo.svg +- name: "RedHat Hybrid cloud blog" + tag: "redhat" + feed: https://cloud.redhat.com/blog/rss.xml + url: https://cloud.redhat.com/blog/tag/hpc + image: https://static.redhat.com/libs/redhat/brand-assets/latest/products/red-hat-hybrid-cloud--stacked--on-dark.svg +- name: "The Linux Cluster" + tag: "linuxcluster" + feed: https://thelinuxcluster.com/feed.xml + url: https://thelinuxcluster.com + image: https://linuxcluster.files.wordpress.com/2020/05/cropped-wooden_pigeon.jpg +- name: "NVIDIA High Performance Computing" + tag: "nvidia" + feed: https://feeds.feedburner.com/nvidiablog + url: https://blogs.nvidia.com/blog/tag/high-performance-computing/ +# Could not find feed +# - name: "The ARM High Performance Computing Blog" +# tag: "arm" +# feed: +# url: https://community.arm.com/arm-community-blogs/b/high-performance-computing-blog +# - name: "AWS HPC Blog" +# tag: "aws" +# feed: +# url: https://aws.amazon.com/blogs/hpc/ +# - name: "vmware High Performance Computing" +# tag: "vmware" +# feed: +# url: https://octo.vmware.com/tag/hpc/ + diff --git a/_posts/linuxcluster/2022-11-29-p=8858.md b/_posts/linuxcluster/2022-11-29-p=8858.md new file mode 100644 index 0000000..c804e34 --- /dev/null +++ b/_posts/linuxcluster/2022-11-29-p=8858.md @@ -0,0 +1,73 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2022-11-29 05:37:07' +layout: post +original_url: https://thelinuxcluster.com/2022/11/29/understanding-the-difference-between-qsfp-qsfp-qsfp28/ +slug: understanding-the-difference-between-qsfp-qsfp-qsfp28 +title: Understanding the Difference between QSFP, QSFP+, QSFP28 +--- + +
+ + + +

Sometimes I use these terms loosely. Here an article that explain the 3 fiber optic transceivers QSFP, QSFP+ and QSFP28

+ + + + +

Taken from the article “Difference between QSFP, QSFP+, QSFP28

+ + + + +

Here are some main points

+ + + + +
    +
  1. The QSFP specification supports Ethernet, Fibre Channel, InfiniBand and SONET/SDH standards with different data rate options.
  2. + + + +
  3. QSFP transceivers support the network link over singlemode or multimode fiber patch cable.
  4. + + + +
  5. Common ones are 4x10G QSFP+, 4x28G QSFP28
  6. + + + +
  7. QSFP+ are designed to support 40G Ethernet, Serial Attached SCSI, QDR (40G) and FDR (56G) Infiniband, and other communication standards
  8. + + + +
  9. QSFP+ modules integrate 4 transmit and 4 receive channels plus sideband signals. Then QSFP+ modules can break out into 4x10G lanes. 
  10. + + + +
  11. QSFP28 is a hot-pluggable transceiver module designed for 100G data rate.
  12. + + + +
  13. QSFP28 integrates 4 transmit and 4 receiver channels. “28” means each lane carries up to 28G data rate.
  14. + + + +
  15. QSFP28 can do 4x25G breakout connection, 2x50G breakout, or 1x100G depending on the transceiver used.
  16. + + + +
  17. Usually QSFP28 modules can’t break out into 10G links. But it’s another case to insert a QSFP28 module into a QSFP+ port if switches support.
  18. + + + +
  19. QSFP+ and QSFP28 modules can support both short and long-haul transmission.
  20. +
\ No newline at end of file diff --git a/_posts/linuxcluster/2022-11-30-p=8878.md b/_posts/linuxcluster/2022-11-30-p=8878.md new file mode 100644 index 0000000..0f6ac94 --- /dev/null +++ b/_posts/linuxcluster/2022-11-30-p=8878.md @@ -0,0 +1,79 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2022-11-30 02:05:41' +layout: post +original_url: https://thelinuxcluster.com/2022/11/30/using-sslscan-to-determine-supported-cipers/ +slug: using-sslscan-to-determine-supported-cipers +title: Using SSLScan to determine supported cipers +--- + +

SSLScan queries SSL services to determine the ciphers that are supported. This is a very useful tool if you wish to

+ + + + +

SSLScan is designed to be easy, lean and fast. The output includes preferred ciphers of the SSL service, and the certificate and is in text and XML formats.

+ + + + +

The Project Site and Installation can be found at https://github.com/rbsec/sslscan

+ + + + +

I was checking my Windows Server,

+ + + + +
$ sslscan --rdp x.x.x.x
+Version: 2.0.15-static
+OpenSSL 1.1.1t-dev  xx XXX xxxx
+
+Connected to x.x.x.x
+
+Testing SSL server x.x.x.x on port 3389 using SNI name x.x.x.x
+
+SSL/TLS Protocols:
+SSLv2     disabled
+SSLv3     disabled
+TLSv1.0   disabled
+TLSv1.1   disabled
+TLSv1.2   enabled
+TLSv1.3   disabled
+
+  TLS Fallback SCSV:
+Server supports TLS Fallback SCSV
+
+  TLS renegotiation:
+Session renegotiation not supported
+
+  TLS Compression:
+Compression disabled
+
+  Heartbleed:
+TLSv1.2 not vulnerable to heartbleed
+
+  Supported Server Cipher(s):
+Preferred TLSv1.2  256 bits  ECDHE-RSA-AES256-GCM-SHA384   Curve 25519 DHE 253
+Accepted  TLSv1.2  128 bits  ECDHE-RSA-AES128-GCM-SHA256   Curve 25519 DHE 253
+.....
+.....
+
+
+ + + +

You may want to scan by port level

+ + + + +
$ sslscan x.x.x.x:8444
\ No newline at end of file diff --git a/_posts/linuxcluster/2022-12-1-p=8885.md b/_posts/linuxcluster/2022-12-1-p=8885.md new file mode 100644 index 0000000..7652dfb --- /dev/null +++ b/_posts/linuxcluster/2022-12-1-p=8885.md @@ -0,0 +1,94 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2022-12-01 01:12:20' +layout: post +original_url: https://thelinuxcluster.com/2022/12/01/how-to-disable-cbc-mode-ciphers-in-rhel-8-or-rocky-linux-8/ +slug: how-to-disable-cbc-mode-ciphers-in-rhel-8-or-rocky-linux-8 +title: How to disable CBC Mode Ciphers in RHEL 8 or Rocky Linux 8 +--- + +

This writeup is reference from The Geek Diary

+ + + + + + + + +

Edit /etc/sysconfig/sshd and uncomment CRYPTO_POLICY line:

+ + + + +
CRYPTO_POLICY=
+ + + +

Edit /etc/ssh/sshd_config file. Add Ciphers, MACs and KexAlgorithms have been added

+ + + + +
KexAlgorithms curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com
+ + + +

After making changes to the configuration file, you may want to do a sanity check on the configuration file

+ + + + +
# sshd -t
+ + + +

Restart sshd services

+ + + + +
# systemctl restart sshd
+ + + +

To test if weak CBC Ciphers are enabled

+ + + + +
$ ssh -vv -oCiphers=3des-cbc,aes128-cbc,aes192-cbc,aes256-cbc [youruserid@IP of your Server]
+
+ + + +

References:

+ + + + + \ No newline at end of file diff --git a/_posts/linuxcluster/2022-12-11-p=8899.md b/_posts/linuxcluster/2022-12-11-p=8899.md new file mode 100644 index 0000000..7d2fd6f --- /dev/null +++ b/_posts/linuxcluster/2022-12-11-p=8899.md @@ -0,0 +1,29 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2022-12-11 17:42:48' +layout: post +original_url: https://thelinuxcluster.com/2022/12/12/application-i-o-profiling-on-hpc-clusters-with-altair-mistral-and-altair-pbs-professional/ +slug: application-i-o-profiling-on-hpc-clusters-with-altair-mistral-and-altair-pbs-professional +title: Application I/O Profiling on HPC Clusters with Altair Mistral and Altair PBS + Professional +--- + +
+ + + +

A Paper has been published by Altair and myself on the “Application I/O Profiling on HPC Clusters with Altair Mistral and Altair PBS Professional”. For more information, do take a look at

+ + + + +
+

The High Performance Computing Centre (HPCC) at Nanyang Technological University (NTU) Singapore employs the latest techniques to ensure good system utilization and a high-performance user experience. The university has a large HPC cluster with the Altair® PBS Professional® workload manager, and the HPCC team installed Altair Mistral™ to monitor application I/O and storage performance. In this paper, we describe how they used Mistral to analyze an HPC application. After getting some insights into the application, they profiled it against HPCC’s three storage tiers and gained detailed insights into application I/O patterns and storage performance.

+ +Application I/O Profiling on HPC Clusters with Altair Mistral and Altair PBS Professional
\ No newline at end of file diff --git a/_posts/linuxcluster/2022-12-11-p=8906.md b/_posts/linuxcluster/2022-12-11-p=8906.md new file mode 100644 index 0000000..1f1000b --- /dev/null +++ b/_posts/linuxcluster/2022-12-11-p=8906.md @@ -0,0 +1,29 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2022-12-11 17:56:27' +layout: post +original_url: https://thelinuxcluster.com/2022/12/12/tuning-compute-performance-nanyang-technological-university-targets-i-o-bottlenecks-to-speed-up-research/ +slug: tuning-compute-performance-nanyang-technological-university-targets-i-o-bottlenecks-to-speed-up-research +title: Tuning Compute Performance – Nanyang Technological University Targets I/O Bottlenecks + to Speed Up Research +--- + +
+ + + +

A customer case study writeup on how the HPC Team at Nanyang Technological University used Altair Mistral to tune Compute Performance.

+ + + + +
+

The High Performance Computing Centre (HPCC) at Nanyang Technological University Singapore supports the university’s large-scale and data-intensive computing needs, and resource requirements continue to grow. HPCC churned out nearly 19 million core CPU-hours and nearly 300,000 GPU-hours in 2021 to enable more than 160 NTU researchers. HPCC’s small, four-engineer team turned to Altair for cutting-edge tools to help support their growing user community and evaluate scaling up to a hybrid cloud environment. They needed job-level insights to understand runtime issues; metrics on I/O, CPU, and memory to identify bottlenecks; and the ability to detect problematic applications and rogue jobs with bad I/O patterns that could overload shared storage. The HPCC team deployed Altair Mistral™ to profile application I/O and determine the most efficient options to optimize HPC at NTU.

+ +Tuning Compute Performance – Nanyang Technological University Targets I/O Bottlenecks to Speed Up Research
\ No newline at end of file diff --git a/_posts/linuxcluster/2022-12-14-p=8915.md b/_posts/linuxcluster/2022-12-14-p=8915.md new file mode 100644 index 0000000..8d1c1b3 --- /dev/null +++ b/_posts/linuxcluster/2022-12-14-p=8915.md @@ -0,0 +1,48 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2022-12-14 04:39:14' +layout: post +original_url: https://thelinuxcluster.com/2022/12/14/harnessing-the-advantages-of-edge-ai/ +slug: harnessing-the-advantages-of-edge-ai +title: Harnessing the Advantages of Edge AI +--- + +
+ + + +

Article taken from HPCWire “Harnessing the Advantages of Edge AI”

+ + + + +

You can enjoy a number of advantages when you deploy edge AI applications. It’s about empowering your users in the field to convert data to value in real-time.

+ + + + + + + + +

Do go to the article for full read. Harnessing the Advantages of Edge AI

\ No newline at end of file diff --git a/_posts/linuxcluster/2022-12-19-p=8923.md b/_posts/linuxcluster/2022-12-19-p=8923.md new file mode 100644 index 0000000..04c2f3b --- /dev/null +++ b/_posts/linuxcluster/2022-12-19-p=8923.md @@ -0,0 +1,93 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2022-12-19 16:54:07' +layout: post +original_url: https://thelinuxcluster.com/2022/12/20/abrt-cli-status-timed-out-is-always-shown-when-logging-on-or-changing-users/ +slug: abrt-cli-status-timed-out-is-always-shown-when-logging-on-or-changing-users +title: abrt-cli status’ timed out is always shown when logging on or changing users +--- + +

When change or login to specific user, ‘abrt-cli status’ timed out is always shown

+ + + + +
Last login: Mon Dec 19 23:32:58 +08 2022 on pts/21 
'abrt-cli status' timed out
+ + + +

To resolve the issue, you may want to check the status of the ‘abrtd’ service, the output will indicate a locked file

+ + + + +
# systemctl status abrtd
+● abrtd.service - ABRT Automated Bug Reporting Tool
+   Loaded: loaded (/usr/lib/systemd/system/abrtd.service; disabled; vendor preset: enabled)
+   Active: active (running) since Mon 2022-12-19 23:34:58 +08; 2s ago
+ Main PID: 273413 (abrtd)
+   CGroup: /system.slice/abrtd.service
+           └─273413 /usr/sbin/abrtd -d -s
+
+Dec 19 23:34:58 node1 systemd[1]: Started ABRT Automated Bug Reporting Tool.
+Dec 19 23:34:58 node1 systemd[1]: Starting ABRT Automated Bug Reporting Tool...
+Dec 19 23:34:58 node1 abrtd[273413]: Lock file '.lock' is locked by process 191242
+Dec 19 23:34:59 node1 abrtd[273413]: Lock file '.lock' is locked by process 191242
+Dec 19 23:34:59 node1 abrtd[273413]: Lock file '.lock' is locked by process 191242
+Dec 19 23:35:00 node1 abrtd[273413]: Lock file '.lock' is locked by process 191242
+Dec 19 23:35:00 node1 abrtd[273413]: Lock file '.lock' is locked by process 191242
+
+
+ + + +

Stop the abrt Service first.

+ + + + +
# systemctl stop abrtd
+ + + +

Kill the Process holding the Lock File

+ + + + +
# pkill -9 systemctl stop abrtd
+ + + +

Start the Service again

+ + + + +
# systemctl start abrtd
+ + + +

The Lock File should go away.

+ + + + +
# systemctl status abrtd
+● abrtd.service - ABRT Automated Bug Reporting Tool
+   Loaded: loaded (/usr/lib/systemd/system/abrtd.service; disabled; vendor preset: enabled)
+   Active: active (running) since Mon 2022-12-19 23:48:02 +08; 4s ago
+ Main PID: 334010 (abrtd)
+   CGroup: /system.slice/abrtd.service
+           └─334010 /usr/sbin/abrtd -d -s
+
+Dec 19 23:48:02 hpc-gekko1 systemd[1]: Started ABRT Automated Bug Reporting Tool.
+Dec 19 23:48:02 hpc-gekko1 systemd[1]: Starting ABRT Automated Bug Reporting Tool...
+Dec 19 23:48:02 hpc-gekko1 abrtd[334010]: Init complete, entering main loop
+
\ No newline at end of file diff --git a/_posts/linuxcluster/2022-12-22-p=8932.md b/_posts/linuxcluster/2022-12-22-p=8932.md new file mode 100644 index 0000000..7f5ccfe --- /dev/null +++ b/_posts/linuxcluster/2022-12-22-p=8932.md @@ -0,0 +1,36 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2022-12-22 04:52:58' +layout: post +original_url: https://thelinuxcluster.com/2022/12/22/end-of-life-and-support-information/ +slug: end-of-life-and-support-information +title: End-Of-Life and Support Information +--- + +

Searching End-Of-Life Information can be a hassle if you are looking for information for OS, Applications, Databases etc. It will be very helpful if everything can be found on a website.

+ + + + +

Hooray! It exists! https://endoflife.date/

+ + + + +

endoflife.date documents End-of-life dates, and support lifecycles of various products. This project collates this data and presents it in an easily accessible format, with URLs that are easy to guess and remember.

+ + + + +

For example, I click on Red Hat Enterprise Linux and the table layout is clear and intuitive.

+ + + + +
ReleaseReleasedActive SupportSecurity SupportLatest
9 (Upcoming ELS)7 months ago(17 May 2022)Ends in 4 years and 5 months(31 May 2027)Ends in 9 years(31 May 2032)9.1(15 Nov 2022)
8 (Upcoming ELS)3 years and 7 months ago(07 May 2019)Ends in 1 year and 5 months(31 May 2024)Ends in 6 years(31 May 2029)8.7(09 Nov 2022)
7 (Upcoming ELS)9 years ago(11 Dec 2013)Ended 2 years and 11 months ago(31 Dec 2019)Ends in 1 year and 6 months(30 Jun 2024)7.9(29 Sep 2020)
6 (ELS)12 years ago(09 Nov 2010)Ended 6 years ago(10 May 2016)Ended 2 years ago(30 Nov 2020)6.10
5 (ELS)15 years ago(15 Mar 2007)Ended 9 years ago(08 Jan 2013)Ended 5 years and 8 months ago(31 Mar 2017)5.11
417 years ago(15 Feb 2005)Ended 13 years ago(31 Mar 2009)Ended 10 years ago(29 Feb 2012)4.9
Taken from https://endoflife.date/
\ No newline at end of file diff --git a/_posts/linuxcluster/2022-12-28-p=8947.md b/_posts/linuxcluster/2022-12-28-p=8947.md new file mode 100644 index 0000000..328fdd7 --- /dev/null +++ b/_posts/linuxcluster/2022-12-28-p=8947.md @@ -0,0 +1,41 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2022-12-28 01:10:17' +layout: post +original_url: https://thelinuxcluster.com/2022/12/28/protecting-red-hat-openshift-containerized-environment-with-ibm-spectrum-protect-plus/ +slug: protecting-red-hat-openshift-containerized-environment-with-ibm-spectrum-protect-plus +title: Protecting Red Hat OpenShift Containerized Environment with IBM Spectrum Protect + Plus +--- + +
+ + + +

Introduction

+ + + + +

This IBM® Redpaper publication describes support for Red Hat OpenShift Container Platform application data protection with IBM Spectrum® Protect Plus. It explains backup and restore operations for persistent volume data by using the Container Storage Interface (CSI) plug-in.

+ + + + +

Table of Contents

+ + + + +

Chapter 1. Introducing containers
Chapter 2. IBM Spectrum Protect Plus architecture
Chapter 3. Installing IBM Spectrum Protect Plus as a containerized application
Chapter 4. Container Backup Support
Chapter 5. Implementing Container Backup Support
Chapter 6. Using Container Backup Support
Chapter 7. Red Hat OpenShift cluster disaster recovery solution

+ + + + +

More Information at IBM Spectrum Protect Plus: Protecting Red Hat OpenShift Containerized Environments

\ No newline at end of file diff --git a/_posts/linuxcluster/2023-1-3-p=8954.md b/_posts/linuxcluster/2023-1-3-p=8954.md new file mode 100644 index 0000000..f5084fc --- /dev/null +++ b/_posts/linuxcluster/2023-1-3-p=8954.md @@ -0,0 +1,82 @@ +--- +author: The Linux Cluster +author_tag: linuxcluster +blog_subtitle: Linux Cluster Blog is a collection of how-to and tutorials for Linux + Cluster and Enterprise Linux +blog_title: The Linux Cluster +blog_url: https://thelinuxcluster.com +category: linuxcluster +date: '2023-01-03 08:03:24' +layout: post +original_url: https://thelinuxcluster.com/2023/01/03/installing-7-zip-on-centos-7-and-rocky-linux-8/ +slug: installing-7-zip-on-centos-7-and-rocky-linux-8 +title: Installing 7-zip on CentOS-7 and Rocky Linux 8 +--- + +

7-zip is free software with open source. It has a high compression ratio in 7z format with LZMA and LZMA2 compression. Supported formats:

+ + + + + + + + +

+ + + + +

Get 7-zip in Linux

+ + + + +
$ wget https://sourceforge.net/projects/sevenzip/files/7-Zip/22.01/7z2201-linux-x64.tar.xz --no-check-certificate
+ + + +

+ + + + +

Unpack 7-zip in Linux

+ + + + +
$ tar xf 7z2101-linux-x64.tar.xz
+
+
+ + + +

Running Issues

+ + + + +

If you are encountering issues like (especially on CentOS-7)

+ + + + +
[user1@node1 7-zip]$ ./7zz
+./7zz: /lib64/libstdc++.so.6: version CXXABI_1.3.8' not found (required by ./7zz) 
+./7zz: /lib64/libstdc++.so.6: versionCXXABI_1.3.9' not found (required by ./7zz)
+ + + +

You need a more recent GNU Compilers rather than the default one used in CentOS-7 which is very old, you may want to compile more recent GNU. Remember to complete the $LD_LIBRARY_PATH and $PATH something like this

+ + + + +
export PATH=$PATH:/usr/local/gcc-6.5.0/bin
export LD_LIBRARY_PATH= $LD_LIBRARY_PATH:/usr/local/gcc-6.5.0/lib64
\ No newline at end of file diff --git a/_posts/nvidia/2022-12-27-p=61372.md b/_posts/nvidia/2022-12-27-p=61372.md new file mode 100644 index 0000000..a0189a5 --- /dev/null +++ b/_posts/nvidia/2022-12-27-p=61372.md @@ -0,0 +1,40 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2022-12-27 14:00:22' +layout: post +original_url: https://blogs.nvidia.com/blog/2022/12/27/11-explainers-ai-2023/ +slug: 11-essential-explainers-to-keep-you-in-the-know-in-2023 +title: 11 Essential Explainers to Keep You in the Know in 2023 +--- + +

The NVIDIA corporate blog has long been a go-to source for information on the latest developments in AI and accelerated computing.

+ +

The blog’s series of “explainers” are among our most-read posts, offering a quick way to catch up on the newest technologies.

+ +

In this post, we’ve rounded up 11 of the most popular explainers from the blog, providing a beginner’s guide to understanding the concepts and applications of these cutting-edge technologies.

+ +

From AI models to quantum computing, these explainers are a must-read for anyone looking to stay informed on the latest tech developments in 2022.

+ +
    +
  1. What Is a Pretrained AI Model?” – This post covers the basics of pretrained AI models, including how they work and why they’re useful.
  2. +
  3. What Is Denoising?” – This piece explains denoising and its use in image and signal processing.
  4. +
  5. What Are Graph Neural Networks?” – This article introduces graph neural networks, including how they work and are used in various applications.
  6. +
  7. What Is Green Computing?” – This post explains the basics of green computing, including why it’s important and how it can be achieved.
  8. +
  9. What is Direct and Indirect Lighting?” – This piece covers the differences between direct and indirect lighting in computer graphics, and how they’re used in different applications.
  10. +
  11. What Is a QPU?” – This blog introduces the quantum processing unit, including what it is and how they’re used in quantum computing.
  12. +
  13. What Is an Exaflop?” – This article explains what an exaflop is and why it’s an important measure of computational power.
  14. +
  15. What Is Zero Trust?” – This post covers the basics of zero trust, including what it is and how it can improve network security.
  16. +
  17. What Is Extended Reality?” – This piece provides an overview of extended reality — the umbrella term for virtual, augmented and mixed reality — including what it is and how it’s used in different applications.
  18. +
  19. What Is a Transformer Model?” – This blog explains what transformer models are and how they’re used in AI.
  20. +
  21. What Is Path Tracing?” – This article covers the basics of path tracing, including how it works and why it’s important for creating realistic computer graphics. It provides examples of its applications in different fields.
  22. +
+

Let us know in the comments section below which AI and accelerated computing concepts you’d like explained next on our blog. We’re always looking for suggestions and feedback. 

+ +

 

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2022-12-27-p=61526.md b/_posts/nvidia/2022-12-27-p=61526.md new file mode 100644 index 0000000..b1982fa --- /dev/null +++ b/_posts/nvidia/2022-12-27-p=61526.md @@ -0,0 +1,100 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2022-12-27 14:00:40' +layout: post +original_url: https://blogs.nvidia.com/blog/2022/12/27/in-the-nvidia-studio-december-27/ +slug: 3d-artist-zhelong-xu-revives-chinese-relics-this-week-in-the-nvidia-studio- +title: 3D Artist Zhelong Xu Revives Chinese Relics This Week ‘In the NVIDIA Studio’ +--- + +

Editor’s note: This post is part of our weekly In the NVIDIA Studio series, which celebrates featured artists, offers creative tips and tricks, and demonstrates how NVIDIA Studio technology improves creative workflows. We’re also deep diving on new GeForce RTX 40 Series GPU features, technologies and resources, and how they dramatically accelerate content creation.

+ +

Artist Zhelong Xu, aka Uncle Light, brought to life Blood Moon — a 3D masterpiece combining imagination, craftsmanship and art styles from the Chinese Bronze Age — along with Kirin, a symbol of hope and good fortune, using NVIDIA technologies.

+ +

+ +

Also this week In the NVIDIA Studio, the #WinterArtChallenge is coming to a close. Enter by sharing winter-themed art on Instagram, Twitter or Facebook for a chance to be featured on NVIDIA Studio’s social media channels. Be sure to tag #WinterArtChallenge to join.

+ + +

+ +

 

+ +

Ring in the season and check out the NVIDIA RTX Winter World in Minecraft — now available in the NVIDIA Omniverse Launcher. Download today to use it in your #WinterArtChallenge scenes.

+ +

Tune in to NVIDIA’s special address at CES on Tuesday, Jan. 3, at 8 a.m. PT, when we’ll share the latest innovations made possible by accelerated computing and AI.

+ +

Dare to Dragon

+

Xu is a veteran digital artist who has worked at top game studio Tencent, made key contributions to the third season of Netflix’s Love, Death & Robots, and won ZBrush 2018 Sculpt of the Year award. He carries massive influence in the 3D community in China, and the country’s traditional culture is an inexhaustible treasure of inspiration for the artist.

+ +

“Ancient Chinese artisans have created countless unique, aesthetic systems over time that are completely different from Western art,” said Xu. “My dream is to use modern means to reinterpret Chinese culture and aesthetics as I understand them.”

+ +

Blood Moon is a tribute to the lost Shu civilization, which existed from 2,800 B.C. to 1,100 B.C. The work demonstrates the creative power of ancient China. During a trip to the Sanxingdui Museum in the Sichuan province, where many relics from this era are housed, Xu became inspired by the mysterious, ancient Shu civilization.

+ +

The artist spent around 10 minutes sketching in the Procreate app, looking to capture the general direction and soul of the piece. This conceptual stage is important so that the heart of the artwork doesn’t get lost once 3D is applied, Xu said.

+ +
Sketching in Procreate.
+

He then began sculpting in Maxon’s ZBrush, which is his preferred tool as he says it contains the most convenient sculpting features.

+ +
Advanced sculpting in ZBrush.
+

Next, Xu used Adobe Substance 3D Painter to apply colors and textures directly to 3D models. NVIDIA RTX-accelerated light- and ambient-occlusion features baked and optimized scene assets in mere seconds, giving Xu the option to experiment with visual aesthetics quickly and easily.

+ +
Layers baked in Adobe Substance 3D Painter.
+

NVIDIA Iray technology in the viewport enabled Xu to edit interactively and use ray-traced baking for faster rendering speeds — all accelerated by his GeForce RTX 4090 GPU.

+ +

“The RTX 4090 GPU always gives me reliable performance and smooth interaction; plus, the Iray renderer delivers unbiased rendering,” Xu said.

+ +
Textures and materials applied in Adobe Substance 3D Painter.
+

Xu used the Universal Scene Description file framework to export the scene from Blender into the Omniverse Create app, where he used the advanced RTX Renderer, with path tracing, global illumination, reflections and refractions, to create incredibly realistic visuals.

+ +
Xu used the Blender USD branch to export the scene into Omniverse Create.
+

NVIDIA Omniverse — a platform for creating and operating metaverse applications — was incredibly useful for scene modifications, Xu said, as it enabled him to test lighting scenarios with his scene rendering in real time. This provided Xu with the most accurate iteration of final renders, allowing for more meaningful edits in the moment, he said.

+ + + +

 

+ +

Further edits included adding fog and volume effects, easily applied in Omniverse Create.

+ +
Fog and volume effects applied in Omniverse Create.
+

Omniverse gives 3D artists their choice of renderer within the viewport, with support for Pixar HD Storm, Chaos V-Ray, Maxon’s Redshift, OTOY Octane, Blender Cycles and more. Xu deployed the unbiased NVIDIA Iray renderer to complete the project.

+ +
Xu selected the RTX Iray renderer for final renders.
+

“Omniverse is already an indispensable part of my work,” Xu added.

+ +

The artist demonstrated this in another history-inspired piece, Kirin, built in Omniverse Create.

+ +
‘Kirin’ by Zhelong Xu.
+

“Kirin, or Qilin, is always a symbol of hope and good fortune in China, but there are few realistic works in the traditional culture,” said Xu.

+ +

He wanted to create a Kirin, a legendary hooved creature in Chinese mythology, with a body structure in line with Western fine art and anatomy, as well as with a sense of peace and the wisdom of silence based on Chinese culture.

+ +

“It is not scary,” said Xu. “Instead, it is a creature of great power and majesty.”

+ +

Kirin is decorated with jade-like cloud patterns, symbolizing the intersection of tradition and modernity, something the artist wanted to express and explore. Clouds and fogs are difficult to depict in solid sculpture, though they are often carved in classical Chinese sculpture. These were easily brought to life in Xu’s 3D artwork.

+ +
‘Kirin’ resembles a cross between a dragon and a horse, with the body of a deer and the tail of an ox.
+

Check out Zhelong Xu’s website for more inspirational artwork.

+ +
3D artist Zhelong Xu.
+

For the latest creative app updates, download the monthly NVIDIA Studio Driver.

+ +

Access tutorials on the Studio YouTube channel and get updates directly in your inbox by subscribing to the Studio newsletter.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2022-12-28-p=61570.md b/_posts/nvidia/2022-12-28-p=61570.md new file mode 100644 index 0000000..15976e7 --- /dev/null +++ b/_posts/nvidia/2022-12-28-p=61570.md @@ -0,0 +1,33 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2022-12-28 16:00:08' +layout: post +original_url: https://blogs.nvidia.com/blog/2022/12/28/jetson-innovation-awards-ces/ +slug: these-6-nvidia-jetson-users-win-big-at-ces-in-las-vegas +title: These 6 NVIDIA Jetson Users Win Big at CES in Las Vegas +--- + +

Six companies with innovative products built using the NVIDIA Jetson edge AI platform will leave CES, one of the world’s largest consumer technology trade shows, as big winners next week.

+ +

The CES Innovation Awards each year honor outstanding design and engineering in more than two dozen categories of consumer technology products. The companies to be awarded for their Jetson-enabled products at the conference, which runs Jan. 5-8 in Las Vegas, include:

+ +
    +
  • John Deere: Best of Innovation awardee in the robotics category and honoree in the vehicle tech and advanced mobility category for its fully autonomous tractor. The tractor is capable of using GPS guidance, cameras, sensors and AI to perform essential tasks on the farm without an operator inside the cab.
  • +
  • AGRIST: Honoree for its robot that automatically harvests bell peppers. The smart agriculture company will be at CES booth 62201.
  • +
  • Skydio: Honoree for its Scout drone, which an operator can fly at a set distance and height using the Skydio Enterprise Controller or the Skydio Beacon while on the move, and without having to manually operate the drone. Skydio, at booth 18541 in Central Hall, is a member of NVIDIA Inception, a free, global program for cutting-edge startups.
  • +
  • GlüxKind: Honoree for GlüxKind Ella, an AI-powered intelligent baby stroller that offers advanced safety and convenience for busy parents. The NVIDIA Inception member will be at CES booth 61710.
  • +
  • Neubility: Honoree for its self-driving delivery robot, Neubie, a cost-effective and sustainable alternative for delivery needs that can help alleviate traffic congestion in urban areas. The NVIDIA Inception member will be at Samsung Electronics C-LAB’s booth 61032 in Venetian Hall.
  • +
  • Seoul Robotics: Honoree for its Level 5 Control Tower, which can turn standard vehicles into self-driving cars through a mesh network of sensors and computers installed on infrastructure. The NVIDIA Inception member will be at CES booth 5408.
  • +
+

Also, NVIDIA Inception members and Jetson ecosystem partners, including DriveU, Ecotron, Infineon, Leopard Imaging, Orbecc, Quest Global, Slamcore, Telit, VVDN, Zvision and others, will be at CES, with many announcing systems and demonstrating applications based on the Jetson Orin platform.

+ +

Deepu Talla, vice president of embedded and edge computing at NVIDIA, will join a panel discussion, “The Journey to Autonomous Operations,” on Friday, Jan. 6, at 12:30 p.m. PT, at the Accenture Innovation Hub in ballroom F of the Venetian Expo.

+ +

And tune in to NVIDIA’s virtual special address at CES on Tuesday, Jan. 3, at 8 a.m. PT, to hear the latest in accelerated computing. NVIDIA executives will unveil products, partnerships and offerings in autonomous machines, robotics, design, simulation and more.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2022-12-29-p=61453.md b/_posts/nvidia/2022-12-29-p=61453.md new file mode 100644 index 0000000..6f73a94 --- /dev/null +++ b/_posts/nvidia/2022-12-29-p=61453.md @@ -0,0 +1,56 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2022-12-29 15:45:13' +layout: post +original_url: https://blogs.nvidia.com/blog/2022/12/29/ai-podcast-top-five-episodes-2/ +slug: now-hear-this-top-five-ai-podcasts-of-2022 +title: Now Hear This- Top Five AI Podcasts of 2022 +--- + +

One of tech’s top talk shows, the NVIDIA AI Podcast has attracted more than 3.6 million listens to date from folks who want to hear the latest in machine learning.

+ +

Its 180+ installments so far have included interviews with luminaries like Kai-Fu Lee and explored how AI is advancing everything from monitoring endangered rhinos to analyzing images from the James Webb Space Telescope.

+ +

Here’s a sampler of the most-played episodes in 2022:

+ +

Waabi CEO Raquel Urtasun on Using AI, Simulation to Teach Autonomous Vehicles to Drive

+ +

+ +

A renowned expert in machine learning, Urtasun discusses her current work at Waabi using simulation technology to teach trucks how to drive. Urtasun is a professor of computer science at the University of Toronto and the former chief scientist and head of R&D for Uber’s advanced technology group.

+ +

What Is Conversational AI? ZeroShot Bot CEO Jason Mars Explains

+ +

+ +

Automated chatbots ain’t what they used to be — they’re getting a whole lot better, thanks to advances in conversational AI. Entrepreneur, educator and author Jason Mars breaks down the latest techniques giving AI a voice.

+ +

Exaggeration Detector Could Lead to More Accurate Health Science Journalism

+ +

Dustin Wright, a researcher at the University of Copenhagen, used NVIDIA GPUs to create an “exaggeration detection system.” He pointed it at hyperbole in health science news and explained to the AI Podcast how it works.

+ +

+ +

Fusing Art and Tech: MORF Gallery CEO Scott Birnbaum on Digital Paintings, NFTs and More

+ +

Silicon Valley startup MORF Gallery showcases artists who create with AI, robots and visual effects. Its CEO provides a virtual tour of what’s happening in digital art — including a plug-in device that can turn any TV into an art gallery.

+ +

+ +

‘AI Dungeon’ Creator Nick Walton Uses AI to Generate Infinite Gaming Storylines

+ +

What started as Nick Walton’s college hackathon project grew into “AI Dungeon,” a game with more than 1.5 million users. Now he’s co-founder and CEO of Latitude, a startup using AI to spawn storylines for games.

+ +

+ +

Subscribe to the AI Podcast

+

Get the AI Podcast through iTunes, Google Podcasts, Google Play, Castbox, DoggCatcher, Overcast, PlayerFM, Pocket Casts, Podbay, PodBean, PodCruncher, PodKicker, Soundcloud, Spotify, Stitcher and TuneIn. If your favorite isn’t listed here, drop us a note.

+ +

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2022-12-29-p=61600.md b/_posts/nvidia/2022-12-29-p=61600.md new file mode 100644 index 0000000..cf3958e --- /dev/null +++ b/_posts/nvidia/2022-12-29-p=61600.md @@ -0,0 +1,27 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2022-12-29 16:00:49' +layout: post +original_url: https://blogs.nvidia.com/blog/2022/12/29/ces/ +slug: nvidia-to-reveal-consumer-creative-auto-robotics-innovations-at-ces +title: NVIDIA to Reveal Consumer, Creative, Auto, Robotics Innovations at CES +--- + +

NVIDIA executives will share some of the company’s latest innovations Tuesday, Jan. 3, at 8 a.m. Pacific time ahead of this year’s CES trade show in Las Vegas.

+ +

Jeff Fisher, senior vice president for gaming products, will be joined by Deepu Talla, vice president of embedded and edge computing, Stephanie Johnson, vice president of consumer marketing, and Ali Kani, vice president of automotive, for a special address that you won’t want to miss.

+ +

During the event, which will be streamed on nvidia.com,  the NVIDIA YouTube and Twitch channels, as well as on the GeForce YouTube channel, the executives will reveal exciting gaming, creative, automotive and robotics announcements.

+ +

The broadcast is a unique opportunity to get a sneak peek at the future of technology and see what NVIDIA has in store for the coming year.

+ +

Don’t miss out on this special address from some of the top executives in the industry.

+ +

Tune in on Jan. 3 to get a first look at what’s in store for the future of technology.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2022-12-30-p=61617.md b/_posts/nvidia/2022-12-30-p=61617.md new file mode 100644 index 0000000..a16067f --- /dev/null +++ b/_posts/nvidia/2022-12-30-p=61617.md @@ -0,0 +1,59 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2022-12-30 16:00:46' +layout: post +original_url: https://blogs.nvidia.com/blog/2022/12/30/stephen-tong-omniverse-creator/ +slug: meet-the-omnivore-music-producer-remixes-the-holidays-with-newfound-passion-for-3d-content-creation +title: Meet the Omnivore- Music Producer Remixes the Holidays With Newfound Passion + for 3D Content Creation +--- + +

Editor’s note: This post is a part of our Meet the Omnivore series, which features individual creators and developers who use NVIDIA Omniverse to accelerate their 3D workflows and create virtual worlds.

+ +
Stephen Tong
+

Stephen Tong, aka Funky Boy, has always loved music and photography. He’s now transferring the skills developed over the years as a music producer — shooting time lapses, creating audio tracks and more — to a new passion of his: 3D content creation.

+ +

Tong began creating 3D renders and animations earlier this year, using the NVIDIA Omniverse platform for building and connecting custom 3D pipelines.

+ +

Within just a couple months of learning to use Omniverse, Tong created a music video with the platform. The video received honorable mention in the inaugural #MadeInMachinima contest last March, which invited participants to remix popular characters from games like Squad, Mount & Blade II: Bannerlord and MechWarrior Mercenaries 5 using the Omniverse Machinima app.

+ +

+ +

In September, Tong participated in the first-ever Omniverse developer contest, which he considered the perfect way to learn about extending the platform and coding with the popular Python programming language. He submitted three Omniverse extensions — core building blocks that let anyone create and extend functions of Omniverse apps — aimed at easing creative workflows like his own.

+ +

+ +

Ringing in the Season the Omniverse Way

+

The artist also took part in the #WinterArtChallenge this month from NVIDIA Studio, a creative community and platform of NVIDIA RTX and AI-accelerated creator apps. Creatives from around the world shared winter-themed art on social media using the hashtag.

+ +

Tong said his scene was inspired by cozy settings he often associates with the holidays.

+ +

+ +

First, the artist used AI to generate a mood board. Once satisfied with the warm, cozy mood, he modeled a winter chalet — complete with a snowman, wreaths and sleigh — using the Marbles RTX assets, free to use in the Omniverse Launcher, as well as some models from Sketchfab.

+ +

Tong collected the assets in Unreal Engine before rendering the 3D scene using the Omniverse Create and Blender apps. The Universal Scene Description (USD) framework allowed him to bring the work from these various applications together.

+ +

“USD enables large scenes to be loaded fast and with ease,” he said. “The system of layers makes Omniverse a powerful tool for collaboration and iterations.”

+ +

With his festive creativity on a roll, Tong also orchestrated an animated quartet lip-syncing to “Carol of the Bells” using Omniverse Audio2Face, an AI app that quickly and easily generates expressive facial animations from just an audio source, as well as the DaVinci Resolve application for video editing.

+ +

Watch to keep up the holiday spirit:

+ +

+ +

Join In on the Creation

+

Creators and developers across the world can download NVIDIA Omniverse for free, and enterprise teams can use the platform for their 3D projects.

+ +

To hear the latest made possible by accelerated computing, AI and Omniverse, watch NVIDIA’s special address at CES on Tuesday, Jan. 3, at 8 a.m. PT.

+ +

Check out more artwork from Tong and other “Omnivores” in the gallery. Connect your workflows to Omniverse with software from Adobe, Autodesk, Epic Games, Maxon, Reallusion and more.

+ +

Follow NVIDIA Omniverse on Instagram, Medium, Twitter and YouTube for additional resources and inspiration. Check out the Omniverse forums, and join our Discord server and Twitch channel to chat with the community.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-2-p=61502.md b/_posts/nvidia/2023-1-2-p=61502.md new file mode 100644 index 0000000..51f5181 --- /dev/null +++ b/_posts/nvidia/2023-1-2-p=61502.md @@ -0,0 +1,36 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-02 14:00:21' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/02/5-paths-ai-career/ +slug: new-year-new-career-5-leaders-share-tips-for-building-a-career-in-ai +title: New Year, New Career- 5 Leaders Share Tips for Building a Career in AI +--- + +

Those looking to join the ranks of AI trailblazers or chart a new course in their careers need look no further.

+ +

At NVIDIA’s latest GTC conference, industry leaders in a panel called “5 Paths to a Career in AI” shared tips and insights on how to make a mark in this rapidly evolving field.

+ +

Representing diverse sectors such as healthcare, automotive, augmented and virtual reality, climate and energy, and manufacturing, these experts offered valuable advice for all seeking to build a career in AI.

+ +

Here are five key takeaways from the discussion:

+ +
    +
  1. Be curious and constantly learn: “I think in order to break into this field, you’ve got to be curious. It’s so important to always be learning [and] always be asking questions,” emphasized Chelsea Sumner, healthcare AI startups lead for North and Latin America at NVIDIA. “If we’re not asking questions, and we’re not learning, we’re not growing.”
  2. +
  3. Tell your story effectively to different audiences: “Your ability to tell your story to a variety of different audiences is essential,” noted Justin Taylor, vice president of AI at Lockheed Martin. “So for them to understand what you’re doing [with AI], how you’re doing it, why you’re doing it is essential.”
  4. +
  5. Embrace challenges and be resilient: “When you have all of these different experiences, you understand that it’s not always going to be perfect,” advised Laura Leal-Taixé, professor at the Technical University of Munich and principal scientist at Argo AI. “And when things aren’t always perfect, you’re able to have competence because [you know that you] did that really hard thing and was able to get through it.”
  6. +
  7. Understand the purpose behind your work: “Understand the baseline, how do you collect the data baseline — understand the physical, the bottom line. What’s the purpose, what do you want to do?” advised Jay Lee, Ohio eminent scholar of the University of Cincinnati and board member of Foxconn.
  8. +
  9. Collaborate and seek support from others: “It’s so important for resiliency to find people across different domains and really tap into that,” said Carrie Gotch, creator and content strategy for 3D/AR at Adobe. “No one does it alone, right? You’re always part of a system, part of a team of people.”
  10. +
+

The panelists stressed the importance of staying up to date and curious, gaining practical experience, collaborating with others and taking risks when building a career in AI.

+ +

Start your journey to an AI career by signing up for NVIDIA GTC, running in March, where you can network, get trained on the latest tools and hear from thought leaders about the impact of AI in various industries.

+ +

It could be the first step toward a rewarding AI career that takes you into 2023 and beyond.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-3-p=61497.md b/_posts/nvidia/2023-1-3-p=61497.md new file mode 100644 index 0000000..299b19e --- /dev/null +++ b/_posts/nvidia/2023-1-3-p=61497.md @@ -0,0 +1,68 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-03 16:30:29' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/03/omniverse-ace-early-access/ +slug: creating-faces-of-the-future-build-ai-avatars-with-nvidia-omniverse-ace +title: Creating Faces of the Future- Build AI Avatars With NVIDIA Omniverse ACE +--- + +

Developers and teams building avatars and virtual assistants can now register to join the early-access program for NVIDIA Omniverse Avatar Cloud Engine (ACE), a suite of cloud-native AI microservices that make it easier to build and deploy intelligent virtual assistants and digital humans at scale.

+ +

Omniverse ACE eases avatar development, delivering the AI building blocks necessary to add intelligence and animation to any avatar, built on virtually any engine and deployed on any cloud. These AI assistants can be designed for organizations across industries, enabling organizations to enhance existing workflows and unlock new business opportunities.

+ +

ACE is one of several generative AI applications that will help creators accelerate the development of 3D worlds and the metaverse. Members who join the program will receive access to the prerelease versions of NVIDIA’s AI microservices, as well as the tooling and documentation needed to develop cloud-native AI workflows for interactive avatar applications.

+ +

+ +

Bring Interactive AI Avatars to Life With Omniverse ACE

+

Methods for developing avatars often require expertise, specialized equipment and manually intensive workflows. To ease avatar creation, Omniverse ACE enables seamless integration of NVIDIA’s AI technologies — including pre-built models, toolsets and domain-specific reference applications — into avatar applications built on most engines and deployed on public or private clouds.

+ +

Since it was unveiled in September, Omniverse ACE has been shared with select partners to capture early feedback. Now, NVIDIA is looking for partners who will provide feedback on the microservices, collaborate to improve the product, and push the limits of what’s possible with lifelike, interactive digital humans.

+ +

The early-access program includes access to the prerelease versions of ACE animation AI and conversational AI microservices, including:

+ +
    +
  • 3D animation AI microservice for third-party avatars, which uses Omniverse Audio2Face generative AI to bring to life characters in Unreal Engine and other rendering tools by creating realistic facial animation from just an audio file.
  • +
  • 2D animation AI microservice, called Live Portrait, enables easy animation of 2D portraits or stylized human faces using live video feeds.
  • +
  • Text-to-speech microservice uses NVIDIA Riva TTS to synthesize natural-sounding speech from raw transcripts without any additional information, such as patterns or rhythms of speech.
  • +
+

Program members will also get access to tooling, sample reference applications and supporting resources to help get started.

+ +

Avatars Make Their Mark Across Industries

+

Omniverse ACE can help teams build interactive, digital humans that elevate experiences across industries, providing:

+ +
    +
  • Easy animation of characters, so users can bring them to life with minimal expertise.
  • +
  • The ability to deploy on cloud, which means avatars will be usable virtually anywhere, such as a quick-service restaurant kiosk, a tablet or a virtual-reality headset.
  • +
  • A plug-and-play suite, built on NVIDIA Unified Compute Framework (UCF), which enables interoperability between NVIDIA AI and other solutions, ensuring state-of-the-art AI that fits each use case.
  • +
+

Partners such as Ready Player Me and Epic Games have experienced how Omniverse ACE can enhance workflows for AI avatars.

+ +

The Omniverse ACE animation AI microservice supports 3D characters from Ready Player Me, a platform for building cross-game avatars.

+ +

“Digital avatars are becoming a significant part of our daily lives. People are using avatars in games, virtual events and social apps, and even as a way to enter the metaverse,” said Timmu Tõke, CEO and co-founder of Ready Player Me. “We spent seven years building the perfect avatar system, making it easy for developers to integrate in their apps and games and for users to create one avatar to explore various worlds — with NVIDIA Omniverse ACE, teams can now more easily bring these characters to life.”

+ +

+ +

Epic Games’ advanced MetaHuman technology transformed the creation of realistic, high-fidelity digital humans. Omniverse ACE, combined with the MetaHuman framework, will make it even easier for users to design and deploy engaging 3D avatars.

+ +

Digital humans don’t just have to be conversational. They can be singers, as well — just like the AI avatar Toy Jensen. NVIDIA’s creative team quickly created a holiday performance by TJ, using Omniverse ACE to extract the voice of a singer and turn it into TJ’s voice. This enabled the avatar to sing at the same pitch and with the same rhythm as the original artist.

+ +

+ +

Many creators are venturing into VTubing, a new way of livestreaming. Users embody a 2D avatar and interact with viewers. With Omniverse ACE, creators can move their avatars into 3D from 2D animation, including photos and stylistic faces. Users can render the avatars from the cloud and animate the characters from anywhere.

+ +

Additionally, the NVIDIA Tokkio reference application is expanding, with early partners building cloud-native customer service avatars for industries such as telco, banking and more.

+ +

Join the Early-Access Program

+

Early access to Omniverse ACE is available to developers and teams building avatars and virtual assistants.

+ +

Watch the NVIDIA special address at CES on demand. Learn more about NVIDIA Omniverse ACE and register to join the early-access program.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-3-p=61498.md b/_posts/nvidia/2023-1-3-p=61498.md new file mode 100644 index 0000000..691ab58 --- /dev/null +++ b/_posts/nvidia/2023-1-3-p=61498.md @@ -0,0 +1,60 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-03 16:45:13' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/03/omniverse-enterprise-release/ +slug: nvidia-releases-major-update-to-omniverse-enterprise +title: NVIDIA Releases Major Update to Omniverse Enterprise +--- + +

The latest release of NVIDIA Omniverse Enterprise, available now, brings increased performance, generational leaps in real-time RTX ray and path tracing, and streamlined workflows to help teams build connected 3D pipelines, and develop and operate large-scale, physically accurate, virtual 3D worlds like never before.

+ +

Artists, designers, engineers and developers can benefit from various enhancements across common Omniverse use cases, including breaking down 3D data silos through aggregation, building custom 3D pipeline tools and generating synthetic 3D data.

+ +

The release includes support for the breakthrough innovations within the new NVIDIA Ada Lovelace architecture, including third-generation RTX technology and DLSS 3, delivering up to 3x performance gains when powered by the latest GPU technology, such as NVIDIA RTX 6000 Ada Generation, NVIDIA L40 and OVX systems.

+ +

The update also delivers features and capabilities such as new Omniverse Connectors, layer-based live workflows, improved user experience and customization options, including multi-view ports, editable hotkeys, lighting presets and more.

+ +

Enhancing 3D Creation

+

The Omniverse ecosystem is expanding, with more capabilities and tools that allow teams to elevate 3D workflows and reach new levels of real-time, physically accurate simulations. The latest additions include:

+ +
    +
  • New Connectors: Omniverse Connectors enable more seamless connected workflows between disparate 3D applications. New Adobe Substance 3D Painter, Autodesk Alias, PTC Creo, Kitware’s Paraview Omniverse Connectors are now supported on Omniverse Enterprise. Users can also easily export data created in NX Software from Siemens Digital Industries Software.
  • +
  • Omniverse DeepSearch: Now generally available, this AI-powered service lets teams intuitively search through extremely large, untagged 3D databases with natural language or using 2D reference images. This unlocks major value in previously unwieldy digital backlots and asset collections that lacked the ability to search.
  • +
  • Omniverse Farm: A completely renewed user interface provides improved usability and performance, plus Kubernetes support.
  • +
  • Omniverse Cloud: New cloud containers for Enterprise Nucleus Server, Replicator, Farm and Isaac Sim for AWS provide enterprises more flexibility in connecting and managing distributed teams all over the world. AWS’ security, identity and access-management controls allow teams to maintain complete control over their data. Containers are now available on NVIDIA NGC.
  • +
+

Strengthening Core Components for Building 3D Worlds

+

Omniverse Enterprise is designed for maximum flexibility and scalability. This means creators, designers, researchers and engineers can quickly connect tools, assets and projects to collaborate in a shared virtual space.

+ +

Omniverse Enterprise brings updates to the core components of the platform, including:

+ +
    +
  • Omniverse Kit SDK, the powerful toolkit for building extensions, apps, microservices or plug-ins, now makes it easier than ever to build advanced tools and Omniverse applications with new templates and developer workflows.
  • +
  • Omniverse Create, a reference app for composing large-scale, USD-based worlds, now includes NVIDIA DLSS 3 and multi-viewport support, making it easier for Omniverse Enterprise users to fluidly interact with extremely large and complex scenes.
  • +
  • Omniverse View, a reference app for reviewing 3D scenes, has been streamlined to focus purely on the review and approval experience. New collaborative, real-time, interactive capabilities — including markup, annotation, measure and simple navigation — make stakeholder presentations easier and more interactive than ever.
  • +
  • Omniverse Nucleus, the database and collaboration engine, now includes improved IT management tools, such as expanded version control to handle atomic checkpoints on the server. Updated Large File Transfer service enables users to move files between servers, on premises or in the cloud to benefit hybrid workflows. And new self-service deployment instructions for Enterprise Nucleus Server on AWS are now available, letting customers deploy and manage Nucleus in the cloud.
  • +
+

Customers Dive Into Omniverse Enterprise 

+

Many customers around the world have experienced enhanced 3D workflows with Omniverse Enterprise.

+ +

Dentsu International, one of the largest global marketing and advertising agency networks, always looks for solutions that enable collaborative and seamless work, with a central repository for completed projects.

+ +

In addition to enhancing current pipelines with Omniverse Enterprise, Dentsu is looking to incorporate NVIDIA generative AI into its 3D design pipeline with software development kits like Omniverse ACE and Audio2Face.

+ +

Mercedes Benz, the German premium vehicle manufacturer, is using Omniverse Enterprise at its sites world wide to design, plan and optimize its manufacturing and assembly facilities. By developing full-fidelity digital twins of their production environments, globally dispersed teams will open up new abilities to collaborate in real time, accelerate decision-making and identify opportunities to reduce waste, decrease energy consumption and continuously enhance quality.

+ +

Zaha Hadid Architects (ZHA) is a renowned architectural design firm that has created some of the world’s most singular building designs. ZHA focuses on creating transformative cultural, corporate and residential spaces through cutting-edge technologies. With Omniverse Enterprise, the team can accelerate and automate its workflows, as well as develop custom tools within the platform.

+ +

“We are working with NVIDIA to incorporate Omniverse as the connective infrastructure of our tech stack. Our goal is to retain design intent across the various project stages and improve productivity,” said Shajay Bhooshan, associate director at Zaha Hadid Architects. “We expect NVIDIA Omniverse to play a critical, supportive role to our efforts to create a platform that’s agnostic, version-controlled and a single source of truth for design data, as it evolves from idea to delivery.”

+ +

NVIDIA Omniverse Enterprise is available by subscription from BOXX Technologies, Dell Technologies, Z by HP and Lenovo, and channel partners including Arrow, ASK, PNY and Leadtek. The platform is optimized to run on NVIDIA-Certified, RTX-enabled desktop and mobile workstations, as well as servers, with new support for NVIDIA RTX Ada generation systems.

+ +

Watch the NVIDIA special address at CES on demand. Learn more about NVIDIA Omniverse Enterprise and try Omniverse for free.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-3-p=61565.md b/_posts/nvidia/2023-1-3-p=61565.md new file mode 100644 index 0000000..e58b07a --- /dev/null +++ b/_posts/nvidia/2023-1-3-p=61565.md @@ -0,0 +1,66 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-03 16:30:44' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/03/omniverse-creators-generative-ai-rtx-remix/ +slug: nvidia-opens-omniverse-portals-with-generative-ais-for-3d-and-rtx-remix +title: NVIDIA Opens Omniverse Portals With Generative AIs for 3D and RTX Remix +--- + +

Whether creating realistic digital humans that can express raw emotion or building immersive virtual worlds, those in the design, engineering, creative and other industries across the globe are reaching new heights through 3D workflows.

+ +

Animators, creators and developers can use new AI-powered tools to reimagine 3D environments, simulations and the metaverse — the 3D evolution of the internet.

+ +

Based on the Universal Scene Description (USD) framework, the NVIDIA Omniverse platform — which enables the development of metaverse applications — is expanding with Blender enhancements and a new suite of experimental generative AI tools for 3D artists.

+ +

In a special address at CES, NVIDIA announced these features, as well as Omniverse preinstallation on NVIDIA Studio laptops and thousands of new, free USD assets to help accelerate adoption of 3D workflows.

+ +

+ +

NVIDIA Studio 3D creators Jeremy Lightcap, Edward McEvenue, Rafi Nizam, Jae Solina, Pekka Varis, Shangyu Wang, Ashley Goldstein collaborate across multiple 3D design tools, time zones and RTX systems with Omniverse.

+ +

Plus, a new release for Blender, now available in the Omniverse Launcher, is bringing 3D generative AI capabilities to Blender users everywhere. A new panel lets Blender users easily transfer shape keys and rigged characters.  The challenge of reattaching a rigged character’s head can now be solved with a one-button operation from Omniverse Audio2Face — an AI-enabled tool that automatically generates realistic facial expressions from an audio file.

+ +

Another new panel for scene optimization lets users create USD scenes within their multi-app 3D workflows more easily and in real time.

+ +

In addition, Audio2Face, Audio2Gesture and Audio2Emotion — generative AI tools that enable instant 3D character animation — are getting performance updates that make it easier for developers and creators to integrate into their current 3D pipelines.

+ +

Creators can generate facial expressions from an audio file using Audio2Face; realistic emotions ranging from happy and excited to sad and regretful with Audio2Emotion; and realistic upper-body movement using Audio2Gesture. These audio-to-animation tools are game-changers for 3D artists, eliminating the need to perform tedious, manual tasks.

+ +

AI-assisted creator tools are expanding to even more communities of creative and technical professionals. When NVIDIA Canvas was introduced, it empowered artists to seamlessly generate landscapes and iterate on them with simple brushstrokes and AI. Coming soon, all RTX users will be able to download an update to Canvas that introduces 360 surround images to create and conceptualize panoramic environments and beautiful images. The AI ToyBox, which features extensions derived from NVIDIA Research, enables creators to generate 3D meshes from 2D inputs.

+ +

Omniverse’s powerful AI tools simplify complex tasks. Creators of all levels can tap into these resources to produce high-quality outputs that meet the growing demands for content and virtual worlds in the metaverse.

+ +

“The demand for 3D skills is skyrocketing, but learning 3D can be pretty scary to some, and definitely time consuming,” said Jae Solina, aka JSFilmz. “But these new platform developments not only let creatives and technical professionals continue to work in their favorite 3D tools, but also supercharge their craft and even use AI to assist them in their workflows.”

+ +

Omniverse Launcher, the portal to download Omniverse content and reference applications, has also been made available to system builders so they can preinstall it, enabling optimized, out-of-the-box experiences for 3D creators on NVIDIA Studio-validated laptops. GIGABYTE and AORUS will be the first laptops launching in 2023 with Omniverse Launcher preinstalled, expanding platform access to a growing number of 3D content creators.

+ +

NVIDIA RTX Remix is a free modding platform, built on Omniverse, that enables modders to quickly create and share #RTXON mods for classic games, each with full ray tracing, enhanced materials, NVIDIA DLSS 3 and NVIDIA Reflex. Its release in early access is coming soon. The jaw-dropping Portal with RTX was built with RTX Remix, and to demonstrate how easy it is for modders to turn RTX ON in their mods, we shared RTX Remix with the original creator of Portal: Prelude, an unofficial Portal prequel released in 2008.

+ +

Omniverse users can also access thousands of new, free USD assets, including a USD-based NVIDIA RTX Winter World Minecraft experience, and learn to create their own NVIDIA SimReady assets for complex simulation building. Using Omniverse, creators can supercharge their existing workflows using familiar tools such as Autodesk Maya, Autodesk 3ds Max, Blender, Adobe Substance 3D Painter, and more with AI, simulation tools and real-time RTX-accelerated rendering.

+ +

All types of 3D creators can take advantage of these new tools to push the boundaries of 3D simulation and virtual world-building. Users can reimagine digital worlds and animate lifelike characters with new depths of creativity through the bridging of audio-to-animation tools, generative AI and the metaverse.

+ +

Latest Omniverse Platform Updates

+

The latest updates within Omniverse include:

+ +
    +
  • Early access for the Unity Omniverse Connector is now available.
  • +
  • Blender alpha release, now available in the Omniverse Launcher, enables users to repair geometry, generate automatic UVs and decimate high-resolution CAD data to more usable polycounts.
  • +
  • Audio2Face, Audio2Emotion and Audio2Gesture updates better enable instant, realistic animation of characters, now available in Omniverse Audio2Face and Omniverse Machinima.
  • +
  • NVIDIA Canvas is coming soon to the Omniverse Launcher with new capabilities that enable the creation of 360-degree landscapes with simple brushstrokes. Users can import the environments into 3D apps to test different settings and lighting.
  • +
  • AI ToyBox of experimental tools, built by NVIDIA Research, that include GET3D, an Omniverse extension that generates trainable 3D models from 2D images, letting developers use their own datasets to rapidly create models for 3D virtual worlds, is now available in the Omniverse Extension Manager.
  • +
  • Thousands of new, free 3D assets are now available worldwide for users to build and create within Omniverse.
  • +
+

Watch the NVIDIA special address at CES on demand.

+ +

Creators can download NVIDIA Omniverse for free, submit their work to the NVIDIA Omniverse gallery, and find resources through forums, Medium, Twitter, YouTube, Twitch, Instagram and Discord.

+ +

Follow NVIDIA Studio on Instagram, Twitter and Facebook and access tutorials — including on Omniverse — on the Studio YouTube channel. Get the latest Studio updates directly in your inbox by subscribing to the NVIDIA Studio newsletter.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-3-p=61578.md b/_posts/nvidia/2023-1-3-p=61578.md new file mode 100644 index 0000000..0bc530a --- /dev/null +++ b/_posts/nvidia/2023-1-3-p=61578.md @@ -0,0 +1,57 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-03 16:42:16' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/03/geforce-now-streams-car/ +slug: game-on-nvidia-geforce-now-streams-vast-library-of-games-to-the-car +title: Game On- NVIDIA GeForce NOW Streams Vast Library of Games to the Car +--- + +

Autonomous and electric vehicles are making personal transportation safer and more sustainable — as well as more entertaining.

+ +

At CES today, NVIDIA announced that the NVIDIA GeForce NOW cloud gaming service will be coming to cars, with no special equipment needed. Hyundai Motor Group, BYD and Polestar — already members of the NVIDIA DRIVE ecosystem — are the first automakers working with NVIDIA to deliver GeForce NOW in their vehicles.

+ +

+ +

“We are excited to offer our customers the best in technology, comfort, design from BYD and now the latest in gaming through NVIDIA GeForce NOW high-performance cloud gaming service,” said Stella Li Vice President of BYD and CEO of BYD America. “The experience of driving our cars is becoming increasingly sophisticated, entertaining and sustainable.”

+ +

Thanks to cloud technology and the mobile internet, vehicle occupants can keep boredom at bay with the ability to enjoy music, movies and now video games in the car. Drivers and front seat passengers can access games while parked or charging, while those in the backseat of vehicles where rear entertainment is enabled can play anytime on the road.

+ +

+ +

Hyundai Motor Group, which houses the Hyundai, Kia and Genesis brands, features NVIDIA DRIVE in-vehicle infotainment across its entire lineup.

+ +

In March, BYD, the world’s leading manufacturer of new energy vehicles, announced it would build its NEVs on the NVIDIA DRIVE Hyperion platform, starting in the first half of 2023.

+ +

Polestar is also using NVIDIA DRIVE for its software-defined architecture, with the upcoming Polestar 3 powered by the NVIDIA DRIVE Orin system-on-a-chip.

+ +

Now, these trailblazing automakers will offer vehicles that are as entertaining as they are intelligent with the addition of GeForce NOW.

+ +

1,000+ Games, Anywhere, Anytime

+

The next generation of vehicles are transforming from fixed function to software-defined. At the same time, cars are becoming more connected, opening up new services that weren’t possible before.

+ +

GeForce NOW delivers a full PC-gaming experience to nearly any device, including laptops, mobile devices, smart TVs — and now, personal vehicles. The GeForce NOW client for auto can run on Android- or browser-based in-vehicle infotainment systems, providing an app to easily select and launch favorite games.

+ +

+ +

It uses breakthroughs in low-latency cloud-streaming technology for real-time game play powered by GeForce servers in the cloud. GeForce NOW features more than 1,500 titles — over 1,000 playable with a controller — including top-rated games such as A Plague Tale: Requiem, The Witcher 3: Wild Hunt and Cyberpunk 2077, from leading stores such as Steam, Epic Games Store, the EA app, Ubisoft and GOG.com.

+ +

The service also includes many of the most-played free-to-play games like Fortnite, Lost Ark and Destiny 2.

+ +

Experiencing the Future

+

The ability to stream games to the car is a key component of the future of in-vehicle experiences.

+ +

As autonomous-driving technology becomes more prevalent, the interior will play a much larger role in differentiating brand identities. Consumers will focus more on features such as digital assistants, beautiful graphic displays and streaming content as they make vehicle buying decisions.

+ +

+ +

This shift is turning vehicles into living spaces, where passengers can enjoy what’s important to them — whether it be work or play — rather than the stress of driving.

+ +

By providing GeForce NOW in their vehicles, Hyundai Motor Group, BYD and Polestar are taking a significant step into this new era of personal transportation.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-3-p=61579.md b/_posts/nvidia/2023-1-3-p=61579.md new file mode 100644 index 0000000..066d0f9 --- /dev/null +++ b/_posts/nvidia/2023-1-3-p=61579.md @@ -0,0 +1,49 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-03 16:42:24' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/03/mercedes-benz-next-gen-factories-omniverse/ +slug: manufactured-in-the-metaverse-mercedes-benz-assembles-next-gen-factories-with-nvidia-omniverse +title: Manufactured in the Metaverse- Mercedes-Benz Assembles Next-Gen Factories With + NVIDIA Omniverse +--- + +

Building state-of-the-art factories requires a state-of-the art planning system.

+ +

Mercedes-Benz announced at CES that it is taking the next step in digitizing its production process, using the NVIDIA Omniverse platform to design and plan manufacturing and assembly facilities.

+ +

By tapping into NVIDIA AI and metaverse technologies, the automaker can create feedback loops to reduce waste, decrease energy consumption and continuously enhance quality.

+ +

+ +

Mercedes-Benz has been working with NVIDIA to develop software-defined vehicles. Its upcoming fleets will be built on NVIDIA DRIVE Orin centralized compute, with intelligent driving capabilities tested and validated in the NVIDIA DRIVE Sim platform, built on Omniverse.

+ +

The automaker’s latest announcement infuses AI and metaverse technologies even further into the Mercedes-Benz development process with smarter, more efficient manufacturing.

+ +

Electric-First Platform Built on Digital-First Process

+

Vehicle production is a colossal undertaking that requires thousands of parts and workers moving in harmony. Any supply chain or production issues can lead to costly delays.

+ +

On top of that, when automakers roll out a new model, they must reconfigure the layout of production plants to account for the new vehicle design. This process can take significant portions of the factory offline, pausing manufacturing for existing vehicles.

+ +

Mercedes-Benz plans to start production of its new dedicated platform for electric vehicles at its plant in Rastatt, Germany. The site currently manufactures the automaker’s A- and B-Class as well as the compact SUV GLA and the all-electric Mercedes-Benz EQA.

+ +
Mercedes-Benz plant in Rastatt, Germany, where the all-electric EQA is manufactured.
+

Experts from NVIDIA and Mercedes-Benz operations are setting up a “digital first” – planning process for the plant that won’t disrupt the current production of compact car models at the site. This blueprint will be rolled out to other parts of the global Mercedes-Benz production network for more agile vehicle manufacturing.

+ +

Developing With Digital Twins

+

NVIDIA Omniverse is an open 3D development platform enabling enterprises and institutions across all industries to build and operate digital twins for industrial and scientific use cases. It’s based on Universal Scene Description (USD), allowing enterprises and developers to construct custom 3D pipelines to break down data silos and interact with a single-source-of-truth view of their aggregated simulations.

+ +

With Omniverse, Mercedes-Benz planners can access the digital twin of the factory, reviewing and optimizing the plant as needed. Every change can be quickly evaluated and validated in the virtual world, then implemented in the real world to ensure maximum efficiency and ergonomics for factory workers.

+ +

Additionally, Mercedes-Benz can synchronize plant locations anywhere in the world by connecting Omniverse with its in-house MO360 Data Platform.

+ +

This capability streamlines operations across the global production network and enables over-the-air software updates to manufacturing equipment.

+ +

By planning production on NVIDIA Omniverse, Mercedes-Benz is implementing a manufacturing system as intelligent as its vehicles.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-3-p=61580.md b/_posts/nvidia/2023-1-3-p=61580.md new file mode 100644 index 0000000..a760318 --- /dev/null +++ b/_posts/nvidia/2023-1-3-p=61580.md @@ -0,0 +1,62 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-03 16:36:36' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/03/isaac-sim-major-updates/ +slug: nvidia-advances-simulation-for-intelligent-robots-with-major-updates-to-isaac-sim +title: NVIDIA Advances Simulation for Intelligent Robots With Major Updates to Isaac + Sim +--- + +

Demand for intelligent robots is growing as more industries embrace automation to address supply chain challenges and labor force shortages.

+ +

The installed base of industrial and commercial robots will grow more than 6.4x — from 3.1 million in 2020 to 20 million in 2030, according to ABI Research. Developing, validating and deploying these new AI-based robots requires simulation technology that places them in realistic scenarios.

+ +

At CES, NVIDIA announced major updates to Isaac Sim, its robotics simulation tool to build and test virtual robots in realistic environments across varied operating conditions. Now accessible from the cloud, Isaac Sim is built on NVIDIA Omniverse, a platform for creating and operating metaverse applications.

+ +

Powerful AI-Driven Capabilities for Roboticists 

+

With humans increasingly working side by side with collaborative robots (cobots) or autonomous mobile robots (AMRs), it’s critical that people and their common behaviors are added to simulations.

+ +

Isaac Sim’s new people simulation capability allows human characters to be added to a warehouse or manufacturing facility and tasked with executing familiar behaviors— like stacking packages or pushing carts. Many of the most common behaviors are already supported, so simulating them is as simple as issuing a command.

+ +

To minimize the difference between results observed in a simulated world versus those seen in the real world, it’s imperative to have physically accurate sensor models.

+ +

Using NVIDIA RTX technology, Isaac Sim can now render physically accurate data from sensors in real time. In the case of an RTX-simulated lidar, ray tracing provides more accurate sensor data under various lighting conditions or in response to reflective materials.

+ +

Isaac Sim also provides numerous new simulation-ready 3D assets, which are critical to building physically accurate simulated environments. Everything from warehouse parts to popular robots come ready to go, so developers and users can quickly start building.

+ +

Significant new capabilities for robotics researchers include advances in Isaac Gym for reinforcement learning and Isaac Cortex for collaborative robot programming. Additionally, a new tool, Isaac ORBIT, provides simulation operating environments and benchmarks for robot learning and motion planning.

+ +

For the large community of Robot Operating System (ROS) developers, Isaac Sim upgrades support for ROS 2 Humble and Windows. All of the Isaac ROS software can now be used in simulation.

+ +

Expanding Isaac Platform Capabilities and Ecosystem Drives Adoption 

+

The large and complex robotics ecosystem spans multiple industries, from logistics and manufacturing to retail, energy, sustainable farming and more.

+ +

The end-to-end Isaac robotics platform provides advanced AI and simulation software as well as accelerated compute capabilities to the robotics ecosystem. Over a million developers and more than a thousand companies rely on one or many parts of it. This includes many companies that have deployed physical robots developed and tested in the virtual world using Isaac Sim.

+ +

Telexistence has deployed beverage restocking robots across 300 convenience stores in Japan. To improve safety, Deutsche Bahn is training AI models to handle very important but unexpected corner cases that happen rarely in the real world — like luggage falling on a train track. Sarcos Robotics is developing robots to pick and place solar panels in renewable energy installations.

+ +

Festo uses Isaac Cortex to simplify programming for cobots and transfer simulated skills to the physical robots. Fraunhofer is developing advanced AMRs using the physically accurate and full-fidelity visualization features of Isaac Sim. Flexiv is using Isaac Replicator for synthetic data generation to train AI models.

+ +

While training robots is important, simulation is playing a critical role in training the human operators to work with and program robots. Ready Robotics is teaching programming of industrial robots with Isaac Sim. Universal Robotics is using Isaac Sim for workforce development to train end operators from the cloud.

+ +

+ +

Cloud Access Puts Isaac Platform Within Reach Everywhere

+

With Isaac Sim available in the cloud, global, multidisciplinary teams working on robotics projects can collaborate with increased accessibility, agility and scalability for testing and training virtual robots.

+ +

A lack of adequate training data often hinders deployment when building new facilities with robotics systems or scaling existing autonomous systems. Isaac Sim taps into Isaac Replicator to enable developers to create massive ground-truth datasets that mimic the physics of real-world environments.

+ +

Once deployed, dynamic route planning is required to operate an efficient fleet of hundreds of robots as automation requirements scale. NVIDIA cuOpt, a real-time fleet task-assignment and route-planning engine improves operational efficiencies with automation.

+ +

Get Started on Isaac Sim 

+

Download Isaac Sim today and learn more technical details about the new platform’s features and capabilities.

+ +

Watch NVIDIA’s special address at CES, where its executives unveiled products, partnerships and offerings in autonomous machines, robotics, design, simulation and more.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-3-p=61582.md b/_posts/nvidia/2023-1-3-p=61582.md new file mode 100644 index 0000000..5f7b548 --- /dev/null +++ b/_posts/nvidia/2023-1-3-p=61582.md @@ -0,0 +1,58 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-03 16:44:46' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/03/drive-sim-ix-vehicle-interior-experiences/ +slug: intelligent-design-nvidia-drive-revolutionizes-vehicle-interior-experiences +title: Intelligent Design- NVIDIA DRIVE Revolutionizes Vehicle Interior Experiences +--- + +

AI is extending further into the vehicle as autonomous-driving technology becomes more prevalent.

+ +

With the NVIDIA DRIVE platform, automakers can design and implement intelligent interior features to continuously surprise and delight customers.

+ +

It all begins with the compute architecture. The recently introduced NVIDIA DRIVE Thor platform unifies traditionally distributed functions in vehicles  — including digital cluster, infotainment, parking and assisted driving — for greater efficiency in development and faster software iteration.

+ +

NVIDIA DRIVE Concierge, built on the DRIVE IX software stack, runs an array of safety and convenience features, including driver and occupant monitoring, digital assistants and autonomous-vehicle visualization.

+ +

Automakers can benefit from NVIDIA data center solutions even if they aren’t using the NVIDIA DRIVE platform. With cloud technology, vehicles can stream the NVIDIA GeForce NOW cloud-gaming service without any special equipment. Plus, developers can train, test and validate in-vehicle AI models on NVIDIA DGX servers.

+ +

The same data center technology that’s accelerating AI development — in combination with the NVIDIA Omniverse platform for creating and operating metaverse applications — is also revolutionizing the automotive product cycle. Using NVIDIA DRIVE Sim built on Omniverse, automakers can design vehicle interiors and retail experiences entirely in the virtual world.

+ +

+ +

Easing Pain Points From Concept to Customer

+

Designing and selling vehicles requires the highest levels of organization and orchestration. The cockpit alone has dozens of components — such as steering wheel, cluster and infotainment — that developers must create and integrate with the rest of the car.

+ +

+ +

These processes are incredibly time- and resource-intensive — there are countless configurations, and chosen designs must be built out and tested prior to production. Vehicle designers must collaborate on various layouts, which must then be validated and approved. Customers must travel to dealerships to experience various options, and the ability to test features depends on a store’s inventory at any given time.

+ +

In the virtual world, developers can easily design vehicles, and car buyers can seamlessly test them, leading to an optimal experience on both ends of the production pipeline.

+ +

Design and Collaboration

+

Automakers operate design centers around the world, tapping into expertise from North America, Europe, Asia and other automotive hubs. Working on user experience concepts across these locations requires frequent international travel and close coordination.

+ +

With DRIVE Sim, designers and engineers anywhere in the world can work together to develop the cockpit experience, without having to leave their desks.

+ +

+ +

Design teams can also save time and valuable resources by testing concepts in the virtual world, without having to wait for physical prototypes. Decision-makers can review designs and ensure they meet relevant safety standards in DRIVE Sim before sending them to production.

+ +

Transforming the Customer Experience

+

The benefits of in-vehicle simulation extend far beyond the design phase.

+ +

Consumers are increasingly expecting full-service digital retail experiences. More than 60% of shoppers want to conduct more of the car-buying process online compared to the last time they bought a vehicle, while more than 75% are open to buying a car entirely online, according to an Autotrader survey.

+ +

The same tools used to design the vehicle can help meet these rising consumer expectations.

+ +

With DRIVE Sim, car buyers can configure and test the car from the comfort of their homes. Customers can see all potential options and combinations of vehicle features at the push of a button and take their dream car for a virtual spin — no lengthy trips to the dealership required.

+ +

From concept design to customer experience, DRIVE Sim is easing the process and opening up new ways to design and enjoy intelligent vehicles.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-3-p=61644.md b/_posts/nvidia/2023-1-3-p=61644.md new file mode 100644 index 0000000..834c108 --- /dev/null +++ b/_posts/nvidia/2023-1-3-p=61644.md @@ -0,0 +1,93 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-03 16:51:14' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/03/gaming-creator-robotics-auto-ces/ +slug: nvidia-reveals-gaming-creator-robotics-auto-innovations-at-ces +title: NVIDIA Reveals Gaming, Creator, Robotics, Auto Innovations at CES +--- + +

Powerful new GeForce RTX GPUs, a new generation of hyper-efficient laptops and new Omniverse capabilities and partnerships across the automotive industry were highlights of a news-packed address ahead of this week’s CES trade show in Las Vegas.

+ +

“AI will define the future of computing and this has influenced much of what we’re covering today,” said Jeff Fisher, senior vice president for gaming products at NVIDIA, as he kicked off the presentation.

+ +

Fisher was joined by several leaders from NVIDIA to introduce products and partnerships across gaming and content creation, robotics and next-generation automobiles.

+ +

The headline news:

+ + +

+ +

Introducing GeForce RTX 40 Series Laptops, RTX 4070 Ti Graphics Cards and DLSS 3 Games

+

Fisher said the performance and power efficiency of the NVIDIA GeForce RTX 40 Series Laptop GPUs enable the greatest ever generational leap, including 14-inch gaming and creating powerhouse laptops, starting at $999 in February.

+ +

New GeForce RTX 4070 Ti graphics cards for desktops are faster than last generation’s RTX 3090 Ti at nearly half the power, bringing the NVIDIA Ada Lovelace architecture down to $799, with availability starting Jan. 5.

+ +

And DLSS 3 is being adopted by developers faster than any prior NVIDIA tech, with 50 released and upcoming titles, including Witchfire, The Day Before, Warhaven, THRONE AND LIBERTY and Atomic Heart.

+ +

In addition, RTX 4080 performance is coming to the NVIDIA GeForce NOW cloud-gaming service. As a result, Fisher said millions more gamers will have access to the NVIDIA Ada architecture with GeForce NOW’s Ultimate membership.

+ +

The new tier will bring NVIDIA Reflex and 240 frames per second streaming to the cloud for the first time, along with full ray tracing and DLSS 3 in games like Portal With RTX.

+ +

Momentum for NVIDIA RTX continues to build, Fisher said. “Creating has grown beyond photos and videos to virtual worlds rendered with 3D cinematic graphics and true-to-life physics,” Fisher said. “The RTX platform is powering this growth.”

+ +

Ray tracing and AI are defining the next generation of content, and NVIDIA Studio is the platform for this new breed of content creators. The heartbeat of Studio is found in NVIDIA Omniverse, where creators can connect accelerated apps and collaborate in real time.

+ +
NVIDIA’s Stephanie Johnson, vice president of consumer marketing, introduced a new suite of generative AI tools and experimental plug-ins that harness the power of AI.
+

Built with NVIDIA RTX, Omniverse is a platform enabling 3D artists to connect their favorite tools from Adobe, Autodesk, SideFX, Unreal Engine and more. And Omniverse now has a new Connector for Unity, said Stephanie Johnson, vice president of consumer marketing at NVIDIA.

+ +

Johnson introduced a suite of new generative AI tools and experimental plug-ins using the power of AI as the ultimate creative assistant. Audio2Face and Audio2Gesture generate animations from an audio file. The AI ToyBox by NVIDIA Research lets users generate 3D meshes from 2D inputs.

+ +

Companies have used generative AI technology to build Omniverse Connectors and extensions. Move.AI’s Omniverse extension, for example, enables video-to-animation. Lumirithmic generates 3D mesh for heads from facial scans. And Elevate3D generates photorealistic 3D visualizations of products from 360-degree video recordings.

+ +

Johnson also announced that NVIDIA RTX Remix, which is built on Omniverse and is “the easiest way to mod classic games,” will be entering early access soon. “The modding community can’t wait to get their hands on Remix,” she said.

+ +

NVIDIA Isaac Sim Brings Significantly Improved Features, Tools for Developing Intelligent Robots 

+

Simulation plays a vital role in the lifecycle of a robotics project, explained Deepu Talla, vice president of embedded and edge computing at NVIDIA. Partners are using NVIDIA Isaac Sim to create digital twins that help speed the training and deployment of intelligent robots.

+ +
NVIDIA’s Deepu Talla, vice president of embedded and edge computing, announced the next release of Isaac Sim, NVIDIA’s robotics simulation application and synthetic data generation tool.
+

To revolutionize the way the robotics ecosystem develops the next generation of autonomous robots, Talla announced major updates to the next release of Isaac Sim. This includes improved sensor and lidar support to more accurately model real-world performance, a new conveyor-building tool, a new utility to add people to the simulation environment, a collection of new sim-ready warehouse assets and a host of new popular robots that come pre-integrated.

+ +

For the open-source ROS developer community, this release upgrades support for ROS 2 Humble and Windows, Talla added. And for robotics researchers, NVIDIA is introducing a new tool called Isaac ORBIT, which provides operating environments for manipulator robots. NVIDIA has also improved Isaac Gym for reinforcement learning and updated Isaac Cortex for collaborative robot programming.

+ +

“We are committed to advancing robotics and arguably investing more than anyone else in the world,” Talla said. “We are well on the way to having a thousand to million times more virtual robots for every physical robot deployed.”

+ +

Mercedes-Benz to Create Digital Twins; Foxconn Building EVs on NVIDIA DRIVE; Geforce NOW Streams to Cars
+

+

The NVIDIA DRIVE platform is open and easy to program, said Ali Kani, vice president of automotive at NVIDIA.

+ +

Hundreds of partners across the automotive ecosystem are now developing software on NVIDIA DRIVE, including 20 of the top 30 manufacturers building new energy vehicles, many of the industry’s top tier one manufacturers and software makers, plus eight of the largest 10 trucking and robotaxi companies.

+ +

It’s a number that continues to grow, with Kani announcing a partnership with Foxconn, the world’s largest technology manufacturer and service provider, to build electric vehicles based on NVIDIA DRIVE Hyperion.

+ +
NVIDIA’s Ali Kani, vice president of automotive, announced a partnership with Foxconn, that GeForce NOW will be “coming to screens in your car” and that Mercedes-Benz is using NVIDIA digital twin technology to plan and build more efficient production facilities.
+

“With Hyperion adoption, Foxconn will manufacture vehicles with leading electric range as well as state-of-the-art AV technology while reducing time to market,” Kani said.

+ +

Kani touched on how, as next-generation cars become autonomous and electric, interiors are transformed into mobile living spaces, complete with the same entertainment available at home. GeForce NOW will be “coming to screens in your car,” Kani said.

+ +

Kani also announced several DRIVE partners are integrating GeForce NOW, including Hyundai Motor Group, BYD and Polestar.

+ +

While gamers will enjoy virtual worlds from inside their cars, tools such as the metaverse are critical to the development and testing of new autonomous vehicles.

+ +

Kani announced that Mercedes-Benz is using digital twin technology to plan and build more efficient production facilities. “The applications for Omniverse in the automotive market are staggering,” Kani said.

+ +

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-3-p=61704.md b/_posts/nvidia/2023-1-3-p=61704.md new file mode 100644 index 0000000..0a69004 --- /dev/null +++ b/_posts/nvidia/2023-1-3-p=61704.md @@ -0,0 +1,109 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-03 16:40:52' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/03/studio-laptops-omniverse-ces/ +slug: new-geforce-rtx-40-series-studio-laptops-omniverse-updates-accelerate-ai-powered-content-creation-in-the-nvidia-studio- +title: New GeForce RTX 40 Series Studio Laptops, Omniverse Updates Accelerate AI-Powered + Content Creation ‘In the NVIDIA Studio’ +--- + +

The future of content creation was on full display today during NVIDIA’s virtual special address at CES.

+ +

Fueled by powerful NVIDIA RTX technology and backed by the NVIDIA Studio platform for creators, a creative revolution is underway as a wave of 2D artists moves to 3D, video workflows move to real time and AI tools help artists create content faster.

+ +

For them, and the 110 million professional and hobbying PC creators, there’s NVIDIA Studio. It’s a platform for content creators that supercharges 110 creative apps with RTX GPUs, provides lasting stability with NVIDIA Studio Drivers, and includes a powerful suite of Studio apps — Omniverse, Canvas, Broadcast and RTX Remix.

+ +

Creators can access all of this with any RTX GPU, including the new 40 Series laptops and GeForce RTX 4070 Ti graphics cards, powered by the ultra-efficient Ada Lovelace GPU architecture, which were introduced today at CES.

+ +

The heartbeat of the Studio platform lies in NVIDIA Omniverse, a platform where creators can connect their 3D apps and collaborate in real time. Omniverse is expanding with Blender enhancements, a new suite of experimental generative AI tools for 3D artists and thousands of new, free Universal Scene Description (USD) assets available in the content browser in Omniverse Create.

+ +

Built on Omniverse, NVIDIA RTX Remix is a free modding platform to quickly create mods for classic games with full ray tracing, enhanced materials, DLSS 3 and NVIDIA Reflex. We’ve enabled two amazing community modders to bring RTX to Portal’s most famous mod, Portal Prelude, preserving the timeless gameplay, while relighting it with full ray tracing.

+ +

The NVIDIA Broadcast and Canvas apps, also in the exclusive Studio software suite, have upcoming upgrades. Broadcast is adding a new Eye Contact feature, and Canvas will introduce 360-degree images that can be used as environment maps in 3D apps.

+ +

+ +

Coming to GeForce RTX 40 and 30 Series GPUs next month, the RTX Video Super Resolution feature uses AI to improve the quality of any video watched in a browser by removing blocky compression artifacts and upscaling video resolution. This improves video sharpness and clarity, and lets people watch online content in its native resolution on high-resolution displays. RTX Video Super Resolution will be available in February for Chrome and Edge browsers.

+ +

And to celebrate CES, influential Blender artist CG Geek will take on a three-day design challenge, powered by NVIDIA Studio technologies. Plus, explore how seven freelance artists from across the world created an experimental short film together, in real time, using Studio hardware, Omniverse and the Nucleus Cloud — this week In the NVIDIA Studio.

+ +

From Concept to Completion, Faster With New NVIDIA Studio Laptops

+

GeForce RTX GPUs provide massive speedups in 3D, video and broadcast workflows, as well as a myriad of AI tools, thanks to dedicated ray tracing, AI and video-encoding hardware. New Studio laptops with GeForce RTX 40 Series Laptop GPUs use the NVIDIA Ada Lovelace architecture — with support for DLSS 3, AV1 and more — and fifth-generation Max-Q technologies for maximum performance and efficiency.

+ +

The new laptops provide a quantum leap in performance over the last generation, and a massive boost in efficiency that allows OEMs to include more powerful GPUs in thinner, more portable systems. NVIDIA Studio laptops are purpose-built for creators, with the right combination of hardware for creative workflows, color-accurate displays, and preinstalled Studio Drivers and Studio software.

+ +
The Razer Blade 16 is available with up to a GeForce RTX 4090 Laptop GPU.
+

The instantly recognizable Razer Blade laptop gets an RTX 40 Series upgrade with up to a GeForce RTX 4090 Laptop GPU. The 16-inch, HDR-capable, dual-mode, mini-LED display boasts 1,000 nits peak brightness, which pairs nicely with a Creator mode that enables sharp, ultra-high-definition+ native resolution at 120Hz.

+ +
The MSI Stealth 17 Studio A13V is available with up to a GeForce RTX 4090 Laptop GPU.
+

MSI’s Stealth series will launch with a wide range of Studio options. With 14- to 17-inch models and up to GeForce RTX 4090 Laptop GPUs, creators can pick the laptop that best suits their needs.

+ +
Powered by a GeForce RTX 4070 Laptop GPU, the ASUS Zenbook Pro 14 showcases a new level of performance for a 14-inch laptop.
+

The increased efficiency of RTX 40 Series Laptop GPUs and fifth-generation Max-Q Technologies deliver high performance to laptops as slim as 14 inches, with up to GeForce RTX 4070 Laptop GPUs, a new milestone. The ASUS Zenbook Pro 14 OLED is one example, with a GeForce RTX 4070 Laptop GPU in a super-thin 14-inch design, and accompanied by an all-new, cutting-edge 2.8K 120Hz OLED NanoEdge Dolby Vision display.

+ +

GeForce RTX 4070 Ti: 40 Series Benefits and Faster Than a 3090 Ti

+

For artists who prefer to create from their desktops, the new GeForce RTX 4070 Ti graphics cards, available starting Thursday, Jan. 5, offers incredible performance that’s up to 15% faster than a GeForce RTX 3090 Ti in 3D rendering. Like the RTX 4090 and 4080, the newest RTX 40 Series GPU features dual AV1 video encoders. When enabled in top video-editing and livestreaming apps — such as Adobe Premiere Pro (via the Voukoder plug-in), DaVinci Resolve, OBS and Jianying — export times are cut in half with improved video quality.

+ +
GeForce RTX 4070 Ti graphics cards.
+

3D creators see up to a 70% increase in performance compared to the GeForce RTX 3070 Ti in popular apps like Autodesk Maya, Blender, Chaos V-Ray, Epic Games Unreal Engine and Unity. Users can also unlock creative freedom by unifying 3D assets, libraries and tools in Omniverse.

+ +

And all creators can benefit from the new fourth-generation Tensor Cores for AI tools, providing up to 2x increased performance compared to the previous generation.

+ +

The January Studio Driver, supporting the GeForce RTX 4070 Ti launch, will be available for download later this week.

+ +

Create Easily With Studio Software

+

NVIDIA RTX GPUs unlock exclusive software for creators: Omniverse, RTX Remix, Canvas and Broadcast. They’re all getting upgrades early this year.

+ +

Based on the USD framework, Omniverse enables artists to connect their favorite tools from Adobe, Autodesk, Epic Games, SideFX and more. Creators can see their scenes instantly come together without lengthy import or export cycles. Changes happen in real time across the connected apps — whether a single artist is working in multiple apps simultaneously or collaborating with another artist from across the globe.

+ +

The Blender alpha branch release, part of a series of updates to Omniverse, is now available in the Omniverse Launcher. It enables users to repair geometry, generate automatic UVs and decimate high-resolution CAD data to more usable polycounts.

+ +

Performance updates to Audio2Face, Audio2Gesture and Audio2Emotion — powerful AI tools within Omniverse — better enable instant, realistic animation of characters. The AI ToyBox of experimental tools is now available in the Omniverse Extension Manager. Thousands of new, free 3D assets have also been made available worldwide for users to build and create within Omniverse.

+ +
‘Portal: Prelude RTX’ is coming soon to mod sites ModDB and Nexus Mods.
+

RTX Remix, which is built on Omniverse and was used to create the jaw-dropping Portal with RTX, is approaching its early-access release. Nicolas “NykO18” Grevet — the original creator of Portal: Prelude — is using RTX Remix to remaster his unofficial Portal prequel from 2008. Grevet is working with modder David “Kralich” Driver-Gomm to modernize the assets and relight the game with stunning, full ray tracing. Portal: Prelude RTX will soon become free to download from mod sites like ModDB and Nexus Mods.

+ +

+ +

Canvas allows creators to paint by material, rather than color, using simple brushstrokes and AI to quickly conceptualize a beautiful image. Canvas is getting a new 360 image feature, which will become available in an upcoming free update for RTX users, helping artists create panoramic scenes and export them into any 3D app to use as an environment map. Such maps are used to change the ambient lighting of a 3D scene and appear in reflections for added realism.

+ +

+ +

Later this month, Broadcast will add a new Eye Contact feature that can change the position of the subject’s eyes to appear focused on the camera, as well as a new vignette effect. The update also improves the popular virtual background feature, adding temporal information for increased stability. The virtual background improvements can also be found in the recent OBS Studio 29.0 software release, and will be updated SDKs for developers later this week.

+ +

3D Need for Speed

+

To showcase the speed at which Studio creators flow, talented 3D artist CG Geek has been challenged to create an animated Blender scene during CES. He’ll have just three days, starting Thursday, Jan. 5, to blitz through tough, typically time-consuming tasks.

+ +

He’s equipped with a GeForce RTX 4090 GPU, RTX acceleration and AI features throughout his entire workflow.

+ +

Follow his progress on his YouTube channel or NVIDIA Studio profiles on Instagram and Twitter. Look for the final render and a deep dive on his process in next week’s In the NVIDIA Studio blog.

+ +

In the NVIDIA Studio With Omniverse

+

Several 3D creators, including In the NVIDIA Studio artist Jae Solina, are showcasing the collaborative elements of Omniverse. Ashley Goldstein, Edward McEvenue, Jeremy Lightcap, Pekka Varis, Rafi Nizam and Shangyu Wang joined Solina — each using their favorite 3D tools, NVIDIA Studio laptops, new GeForce RTX GPUs and Omniverse Nucleus Cloud — to build an experimental short film together in real time.

+ +

+ +

The group started by pulling in a pair of Nizam’s characters: “3D” built in ShapesXR and “Figgy” in Maya. Both were dropped into Omniverse seamlessly as USD files. Prompted by Lightcap, Nizam added knitted textures to “3D” using Adobe Substance 3D Painter, and the group immediately saw this through the cloud.

+ +

Solina then offered to help with motion capture using Xsens. As he added the rigging, Lightcap brought in a background created in Blender. Lightcap noted that “RTX is insane,” as his animated western landscape rendered nearly instantly.

+ +
Anticipation for precipitation, McEvenue’s Houdini-based rainy day simulation.
+

With the scene coming together, Varis changed lighting elements, looking to make things “more epic,” he said. To incorporate a storm into the scene, McEvenue added a rain simulation created in Houdini. The film’s characters make a quick escape, hijacking the hot air balloon of Toy Jensen, the AI avatar of NVIDIA founder and CEO Jensen Huang.

+ +

For the ascent, Goldstein used her GeForce RTX 4090 GPU to add steam thrusters using OmniGraph.

+ +

With the characters now in outer space, Wang imported a satellite created in Autodesk Maya. The scene concludes as the space adventurers take off for Jupiter.

+ +

While showcasing their talents, these 3D artists displayed the capabilities of the NVIDIA Studio platform. Powerful RTX GPUs, acceleration in popular 3D apps and exclusive Omniverse software that enabled collaboration from all over the world in real time.

+ +

The future of creativity is shining bright. And the NVIDIA Studio ecosystem is lighting the scene.

+ +

Download monthly NVIDIA Studio Drivers for the latest reliability and performance improvements. Access tutorials and more on the Studio YouTube channel; follow Studio on Instagram, Twitter and Facebook; and get updates directly in your inbox by subscribing to the Studio newsletter.

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-4-p=61452.md b/_posts/nvidia/2023-1-4-p=61452.md new file mode 100644 index 0000000..a179da2 --- /dev/null +++ b/_posts/nvidia/2023-1-4-p=61452.md @@ -0,0 +1,77 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-04 16:00:40' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/04/hpc-ai-quantum-coffee/ +slug: lights-cameras-atoms-scientist-peers-into-the-quantum-future +title: Lights! Cameras! Atoms! Scientist Peers Into the Quantum Future +--- + +

Editor’s note: This is part of a series profiling people advancing science with high performance computing.

+ +

Ryan Coffee makes movies of molecules. Their impacts are huge.

+ +

The senior scientist at the SLAC National Accelerator Laboratory (above) says these visualizations could unlock the secrets of photosynthesis. They’ve already shown how sunlight can cause skin cancer.

+ +

Long term, they may help chemists engineer life-saving drugs and batteries that let electric cars go farther on a charge.

+ +

To make films that inspire that kind of work, Coffee’s team needs high-performance computers, AI and an excellent projector.

+ +

A Brighter Light

+

The projector is called the Linac Coherent Light Source (LCLS). It uses a linear accelerator a kilometer long to pulse X-rays up to 120 times per second.

+ +

That’s good enough for a Hollywood flick, but not fast enough for Coffee’s movies.

+ +

“We need to see how electron clouds move like soap bubbles around molecules, how you can squeeze them in certain ways and energy comes out,” said Coffee, a specialist in the physics at the intersection of atoms, molecules and optics.

+ +

So, an upgrade next year will let the giant instrument take 100,000 frames per second. In two years, another enhancement, called LCLS II, will push that to a million frames a second.

+ +

Sorting the frames that flash by that fast — in random order — is a job for the combination of high performance computing (HPC) and AI.

+ +

AIs in the Audience

+

Coffee’s goal is to sit an AI model in front of the LCLS II. It will watch the ultrafast movies to learn an atomic dance no human eyes could follow.

+ +

The work will require inference on the fastest GPUs available running next to the instrument in Menlo Park, Calif. Meanwhile, data streaming off LCLS II will be used to constantly retrain the model on a bank of NVIDIA A100 Tensor Core GPUs at the Argonne National Laboratory outside Chicago.

+ +

It’s a textbook case for HPC at the edge, and one that’s increasingly common in an era of giant scientific instruments that peer up at stars and down into atoms.

+ +
LCLS instrument for molecular science with HPC + AI
A look at part of the LCLS instrument. (For more details, see this blog.)
+

So far, Coffee’s team has been able to retrain an autoencoder model every 10-20 minutes while it makes inferences 100,000 times a second.

+ +

“We’re already in the realm of attosecond pulses where I can watch the electron bubbles slosh back and forth,” said Coffee, a core member of SLAC’s overall AI initiative.

+ +

A Broader AI Collaboration

+

The next step is even bigger.

+ +

Data from Coffee’s work on molecular movies will be securely shared with data from Argonne’s Advanced Proton Source, a kind of ultra-high-resolution still camera.

+ +

“We can use secure, federated machine learning to pull these two datasets together, creating a powerful, shared transformer model,” said Coffee, who’s collaborating with multiple organizations to make it happen.

+ +
Ryan Coffee HPC AI for molecular science
Coffee in the ‘projection room’ where the light in his next molecular movies will first appear.
+

The transformer will let scientists generate synthetic data for many data-starved applications such as research on fusion reactors.

+ +

It’s an effort specific to science that parallels work in federated learning in healthcare. Both want to build powerful AI models for their fields while preserving data privacy and security.

+ +

“We know people get the best results from large language models trained on many languages,” he said. “So, we want to do that in science by taking diverse views of the same things to create better models,” he said.

+ +

The Quantum Future

+

The atomic forces that Coffee studies may power tomorrow’s computers, the scientist explains.

+ +

“Imagine a stack of electron bubbles all in the same quantum state, so it’s a superconductor,” he said. “When I add one electron at the bottom, one pops to the top instantaneously because there’s no resistance.”

+ +

The concept, called entanglement in quantum computing, means two particles can switch states in lock step even if they’re on opposite sides of the planet.

+ +

That would give researchers like Coffee instant connections between powerful instruments like LCLS II and remote HPC centers training powerful AI models in real time.

+ +

Sounds like science fiction? Maybe not.

+ +

Coffee foresees a time when his experiments will outrun today’s computers, a time that will require alternative architectures and AIs. It’s the kind of big-picture thinking that excites him.

+ +

“I love the counterintuitiveness of quantum mechanics, especially when it has real, measurable results humans can apply — that’s the fun stuff.”

+ +
\ No newline at end of file diff --git a/_posts/nvidia/2023-1-4-p=61631.md b/_posts/nvidia/2023-1-4-p=61631.md new file mode 100644 index 0000000..d23b4df --- /dev/null +++ b/_posts/nvidia/2023-1-4-p=61631.md @@ -0,0 +1,51 @@ +--- +author: NVIDIA High Performance Computing +author_tag: nvidia +blog_subtitle: '' +blog_title: NVIDIA Blog +blog_url: https://blogs.nvidia.com/ +category: nvidia +date: '2023-01-04 14:00:37' +layout: post +original_url: https://blogs.nvidia.com/blog/2023/01/04/university-of-florida-ai/ +slug: uf-provost-joe-glover-on-building-a-leading-ai-university +title: UF Provost Joe Glover on Building a Leading AI University +--- + +

When NVIDIA co-founder Chris Malachowsky approached University of Florida Provost Joe Glover with the offer of an AI supercomputer, he couldn’t have predicted the transformative impact it would have on the university. In just a short time, UF has become one of the top public colleges in the U.S. and developed a groundbreaking neural network for healthcare research.

+ +

In a recent episode of NVIDIA’s AI Podcast, host Noah Kravitz sat down with Glover, who is also senior vice president of academic affairs at UF. The two discussed the university’s efforts to put AI to work across all aspects of higher education, including a public-private partnership with NVIDIA that has helped transform UF into one of the leading AI universities in the country.

+ +

Just a year after the partnership was unveiled in July 2020, UF rose to No. 5 on the U.S. News and World Report’s list of the best public colleges in the U.S. The ranking was, in part, a recognition of UF’s vision for infusing AI into its teaching and research.

+ +

Last March, UF Health, the university’s academic health center, teamed with NVIDIA to develop GatorTron, a neural network that generates synthetic clinical data researchers can use to train other AI models in healthcare.

+ +

According to Glover, the success of UF’s AI initiatives can be attributed to “a combination of generous philanthropy, some good decisions, a little inspiration and a few miracles here and there along the way.” 

+ +

He believes that the university’s AI-powered vision has significantly impacted its teaching and research and will continue to do so in the future.

+ +

+ +
The AI Podcast · UF Provost Joe Glover on Building a Leading AI University – Ep. 185
+ +

You Might Also Like

+

Art(ificial) Intelligence: Pindar Van Arman Builds Robots That Paint

+ +

Pindar Van Arman, an American artist and roboticist, designs painting robots that explore the differences between human and computational creativity. Since his first system in 2005, he has built multiple artificially creative robots. The most famous, Cloud Painter, was awarded first place at Robotart 2018.

+ +

Real or Not Real? Attorney Steven Frank Uses Deep Learning to Authenticate Art

+ +

Steven Frank is a partner at the law firm Morgan Lewis, specializing in intellectual property and commercial technology law. He’s also half of the husband-wife team that used convolutional neural networks to authenticate artistic masterpieces, including da Vinci’s Salvador Mundi, with AI’s help.

+ +

GANTheftAuto: Harrison Kinsley on AI-Generated Gaming Environments

+ +

Humans playing games against machines is nothing new, but now computers can develop games for people to play. Programming enthusiast and social media influencer Harrison Kinsley created GANTheftAuto, an AI-based neural network that generates a playable chunk of the classic video game Grand Theft Auto V.

+ +

Subscribe to the AI Podcast: Now Available on Amazon Music

+

You can now listen to the AI Podcast through Amazon Music.

+ +

Also get the AI Podcast through Apple Music, Google Podcasts, Google Play, Castbox, DoggCatcher, Overcast, PlayerFM, Pocket Casts, Podbay, PodBean, PodCruncher, PodKicker, Soundcloud, Spotify, Stitcher and TuneIn.

+ +

 

+ +
\ No newline at end of file diff --git a/_posts/redhat/2022-12-16-the-consequences-of-pausing-machineconfig-pools-in-openshifts-machine-config-operator-mco.md b/_posts/redhat/2022-12-16-the-consequences-of-pausing-machineconfig-pools-in-openshifts-machine-config-operator-mco.md new file mode 100644 index 0000000..5913a5c --- /dev/null +++ b/_posts/redhat/2022-12-16-the-consequences-of-pausing-machineconfig-pools-in-openshifts-machine-config-operator-mco.md @@ -0,0 +1,22 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-16 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/the-consequences-of-pausing-machineconfig-pools-in-openshifts-machine-config-operator-mco +slug: the-consequences-of-pausing-machineconfig-pools-in-openshift-s-machine-config-operator-mco- +title: The Consequences Of Pausing MachineConfig Pools In OpenShift's Machine Config + Operator (MCO) +--- + + + +

The Machine Config Operator (MCO) in OpenShift exposes a "Pause" feature on its MachineConfigPools that allows users to halt config deployment. We made some changes in 4.11 to try to make that feature "safer", and this blog tries to give some context around what pausing a MachineConfigPool actually does under the hood, the consequences/tradeoffs, and a small glimpse into how the MCO team thinks about exposing and evolving features like this.

+ + \ No newline at end of file diff --git a/_posts/redhat/2022-12-19-interview-with-fords-satish-puranam.md b/_posts/redhat/2022-12-19-interview-with-fords-satish-puranam.md new file mode 100644 index 0000000..865a343 --- /dev/null +++ b/_posts/redhat/2022-12-19-interview-with-fords-satish-puranam.md @@ -0,0 +1,29 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-19 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/interview-with-fords-satish-puranam +slug: interview-with-ford-s-satish-puranam +title: Interview with Ford's Satish Puranam +--- + + + +
+
+
+ +
+ +
+ +
+ + \ No newline at end of file diff --git a/_posts/redhat/2022-12-19-multiple-ways-of-authentication-on-openshift-container-platform-ocp-part-2.md b/_posts/redhat/2022-12-19-multiple-ways-of-authentication-on-openshift-container-platform-ocp-part-2.md new file mode 100644 index 0000000..2d0b3b4 --- /dev/null +++ b/_posts/redhat/2022-12-19-multiple-ways-of-authentication-on-openshift-container-platform-ocp-part-2.md @@ -0,0 +1,22 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-19 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/multiple-ways-of-authentication-on-openshift-container-platform-ocp-part-2 +slug: multiple-ways-of-authentication-on-openshift-container-platform-ocp-part-2 +title: Multiple ways of Authentication on OpenShift Container Platform (OCP), Part + 2 +--- + + + +

Part 2: LDAP Authentication in OpenShift using Red Hat Identity Manager (RH IDM)

+ + \ No newline at end of file diff --git a/_posts/redhat/2022-12-20-how-to-setup-external-secrets-operator-eso-as-a-service.md b/_posts/redhat/2022-12-20-how-to-setup-external-secrets-operator-eso-as-a-service.md new file mode 100644 index 0000000..815d015 --- /dev/null +++ b/_posts/redhat/2022-12-20-how-to-setup-external-secrets-operator-eso-as-a-service.md @@ -0,0 +1,24 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-20 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/how-to-setup-external-secrets-operator-eso-as-a-service +slug: how-to-setup-external-secrets-operator-eso-as-a-service +title: How to Setup External Secrets Operator (ESO) as a Service +--- + + + +

Overview

+
+

"The External Secrets Operator (ESO) extends Kubernetes with Custom Resources, which define where secrets live and how to synchronize them. The controller fetches secrets from an external API and creates Kubernetes secrets. If the secret from the external API changes, the controller will reconcile the state in the cluster and update the secrets accordingly."

+ +
+ \ No newline at end of file diff --git a/_posts/redhat/2022-12-20-how-to-version-bound-images-and-artifacts-for-openshift-operators.md b/_posts/redhat/2022-12-20-how-to-version-bound-images-and-artifacts-for-openshift-operators.md new file mode 100644 index 0000000..c7ff3e7 --- /dev/null +++ b/_posts/redhat/2022-12-20-how-to-version-bound-images-and-artifacts-for-openshift-operators.md @@ -0,0 +1,399 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-20 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/how-to-version-bound-images-and-artifacts-for-openshift-operators +slug: how-to-version-bound-images-and-artifacts-for-openshift-operators +title: How to Version-Bound Images and Artifacts for OpenShift Operators +--- + + + +
+

Background/Purpose

+

A common practice in Telco environments is to rely on using an Offline Registry to store all the container base images for OpenShift deployments. This Offline Registry needs to meet some requirements, and in this article we will focus mainly on how to optimize the storage used by the Offline Registry.

+ +

❗Be advised that the following values are an example representation for the purpose of this article, in your case the values might differ.

+ +

Environment Setup

+

The environment we are using consists of one bare-metal host whose role is the Bastion Node or Provisioning Node, DHCP-server and DNS-server, which will also host the Offline Registry and RHCOS-cache-httpd-server.

+ +

The details contained in this article are independent of the installer used to deploy the cluster.

+ +

Step 0. Installing the required tools

+

Download the oc-mirror cli:

+ +
+
+
$ export VERSION=stable-4.11
$ curl -s https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$VERSION/oc-mirror.tar.gz | tar zxvf - oc-mirror
$ sudo cp oc-mirror /usr/local/bin
+
+ +
+ +

As described in here, the oc-mirror cli becomes GA in channel-4.11.

+ +

The use of the oc-mirror cli is independent of the Offline Registry used (eg. Quay, docker registry, JFROG Artifactory, etc).

+ +

Step 1. Building the imageset-config.yaml

+

Step 1.1 How to check the operator version included in the redhat-operator-index channel

+

In this section we are going to introduce the procedure for obtaining the specific operator version we are going to use in the next section.

+ +

We are going to run the redhat-operator-index for tag 4.10 as a rootless podman:

+ +
+
+
$ mkdir -p ${HOME}/.config/systemd/user
$ podman login registry.redhat.io
$ podman run -d --name redhat-operator-index-4.10 -p 50051:50051 -it registry.redhat.io/redhat/redhat-operator-index:v4.10
$ cd ${HOME}/.config/systemd/user/
$ podman generate systemd --name redhat-operator-index-4.10 >> container-redhat-operator-index.service
$ systemctl --user daemon-reload
$ systemctl --user enable container-redhat-operator-index.service
$ systemctl --user restart container-redhat-operator-index.service
+
+ +
+ +

Validate if the container its running:

+ +
+
+
$ podman ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
ffe3352d17f9 registry.redhat.io/redhat/redhat-operator-index:v4.10 registry serve --... 7 weeks ago Up 2 hours ago 0.0.0.0:50051->50051/tcp redhat-operator-index-4.10
+
+ +
+ +

By creating this container, we will be able to check the content of the channel content for OCPv4.10.

+ +

To determine the versions available we need to query the redhat-operator-index endpoint:

+ +
+
+
$ export LOCAL_RH_OPERATOR_INDEX=inbacrnrdl0101.offline.redhat.lan
$ export LOCAL_RH_OPERATOR_INDEX_PORT=50051
$ grpcurl -plaintext ${LOCAL_RH_OPERATOR_INDEX}:${LOCAL_RH_OPERATOR_INDEX_PORT} api.Registry.ListBundles | jq ' .packageName, .channelName, .bundlePath, .version'

parts of the output omitted

"odf-operator"
"stable-4.10"
"registry.redhat.io/odf4/odf-operator-bundle@sha256:662ec108960703f41652ff47b49e6a509a52fe244d52891d320b821dd9521f55"
"4.10.4"
"odf-operator"
"stable-4.10"
"registry.redhat.io/odf4/odf-operator-bundle@sha256:182d966cb488b188075d2ffd3f6451eec179429ac4bff55e2e26245049953a82"
"4.10.5"

parts of the output omitted

+
+ +
+ +

The grpcurl binary has been obtained from here

+ +

Step 1.2. How to build a Offline Registry [Optional]

+

In this section we are going to highlight an example on how to create an Offline Registry that can be used to highlight the principle of mirroring the container base images.

+ +

Creating the working directory of the Offline Registry:

+ +
+
+
$ mkdir -p ${HOME}/registry/{auth,certs,data}
+
+ +
+ +

Creating the username and password used by the Offline Registry:

+ +
+
+
$ htpasswd -bBc ${HOME}/registry/auth/htpasswd <username><password>
+
+ +
+ +

Please, note that the values for the and should be updated with your particular ones.

+ +

Creating the certificate used by the Offline Registry:

+ +
+
+
$ export host_fqdn=inbacrnrdl0101.offline.redhat.lan
$ cert_c="AT"
$ cert_s="WIEN"
$ cert_l="WIEN"
$ cert_o="TelcoEngineering"
$ cert_ou="RedHat"
$ cert_cn="${host_fqdn}"
$ openssl req \
-newkey rsa:4096 \
-nodes \
-sha256 \
-keyout ${HOME}/registry/certs/domain.key \
-x509 \
-days 365 \
-out ${HOME}/registry/certs/domain.crt \
-addext "subjectAltName = DNS:${host_fqdn}" \
-subj "/C=${cert_c}/ST=${cert_s}/L=${cert_l}/O=${cert_o}/OU=${cert_ou}/CN=${cert_cn}"
+
+ +
+ +

Please, note that the values used in the certificate creation should be updated with your particular ones. Start the Offline Registry container:

+ +
+
+
$ podman run -d --name ocpdiscon-registry -p 5050:5000 \
-e REGISTRY_AUTH=htpasswd \
-e REGISTRY_AUTH_HTPASSWD_REALM=Registry \
-e REGISTRY_HTTP_SECRET=ALongRandomSecretForRegistry \
-e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
-e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \
-e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \
-e REGISTRY_COMPATIBILITY_SCHEMA1_ENABLED=true \
-e REGISTRY_STORAGE_DELETE_ENABLED=true \
-v ${HOME}/registry/data:/var/lib/registry:z \
-v ${HOME}/registry/auth:/auth:z \
-v ${HOME}/registry/certs:/certs:z docker.io/library/registry:2.8.1
+
+ +
+ +

Based on the version list determined section Step 1.1 How to check the operator version included in the redhat-operator-index channel we are going to build the imageset-config.yaml in order to mirror the container base images.

+ +

Step 1.3. Building the credential file for the mirroring process

+

In this section we are going to build the config.json file used in the mirroring process by the oc-mirror cli to gain authorization to the registry.redhat.io and to the Offline Registry.

+ +

In the Offline Registry director create the config.json file:

+ +
+
+
$ touch ${HOME}/registry/config.json
+
+ +
+ +

Open the browser and go to the following link. As described in here, to obtain the pull-secret.json file which we are going to edit and save it under the config.json.

+ +

The config.json file structure should be close to the following format:

+ +
+
+
{
"auths": {
"cloud.openshift.com": {
"auth": "<base64-secret>"
},
"inbacrnrdl0101.offline.redhat.lan:5051": {
"auth": "<base64-secret>"
},
"quay.io": {
"auth": "<base64-secret>"
},
"registry.connect.redhat.com": {
"auth": "<base64-secret>"
},
"registry.fedoraproject.org": {
"auth": "<base64-secret>"
},
"registry.redhat.io": {
"auth": "<base64-secret>"
}
}
}
+
+ +
+ +

Now we will have to let oc-mirror cli use the config.json file:

+ +
+
+
$ export DOCKER_CONFIG=${HOME}/registry/config.json
+
+ +
+ +

Step 2. How to control the storage usage of the mirror

+

Step 2.1. How to mirror container base images with a compact filesystem usage

+

Mirror the container base images operator to the .tar file under the archive directory:

+ +
+
+
$ oc-mirror --config imageset-config.yaml file://archive
+
+ +
+ +

The content of a sample imageset-config.yaml file to be used in command above are :

+ +
+
+
$ cat imageset-config.yaml
apiVersion: mirror.openshift.io/v1alpha2
kind: ImageSetConfiguration
mirror:
operators:
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.10
targetName: 'rh-index'
targetTag: v1-test
full: false
packages:
- name: odf-operator
packages:
- name: odf-operator
minVersion: '4.10.4'
maxVersion: '4.10.4'
channels:
- name: 'stable-4.10'
+
+ +
+ +

Check the archive mirror_seq1_000000.tar size after the entire mirroring process has been finished:

+ +
+
+
$ du -h ./archive/mirror_seq1_000000.tar
6.2G ./archive/mirror_seq1_000000.tar
+
+ +
+ +

Before proceeding with any kind of mirroring steps, we are going to have a closer look at the Offline Registry status:

+ +
+
+
$ tree ${HOME}/registry/
registry/
├── auth
│ └── htpasswd
├── certs
│ ├── domain.crt
│ └── domain.key
└── data

3 directories, 3 files
+
+ +
+ +

As we can observe, the ./data/ directory is empty at this point and we are going to proceed with the container base image mirroring procedure.

+ +

Mirror the container base images from the file mirror_seq1_000000.tar to the offline registry.

+ +
+
+
$ export REGISTRY_NAME=inbacrnrdl0101.offline.redhat.lan
$ export REGISTRY_NAMESPACE=olm-mirror
$ export REGISTRY_PORT=5050
$ oc-mirror --from ./archive docker://${REGISTRY_NAME}:${REGISTRY_PORT}/${REGISTRY_NAMESPACE}
+
+ +
+ +

Check the content of the offline registry:

+ +
+
+
$ curl -X GET -u <username>:<password> https://${REGISTRY_NAME}:${REGISTRY_PORT}/v2/_catalog --insecure | jq .
{
"repositories": [
"karmab/curl",
"karmab/kubectl",
"karmab/mdns-publisher",
"karmab/origin-coredns",
"karmab/origin-keepalived-ipfailover",
"ocp-release",
"olm-mirror/odf4/cephcsi-rhel8",
"olm-mirror/odf4/mcg-core-rhel8",
"olm-mirror/odf4/mcg-operator-bundle",
"olm-mirror/odf4/mcg-rhel8-operator",
"olm-mirror/odf4/ocs-must-gather-rhel8",
"olm-mirror/odf4/ocs-operator-bundle",
"olm-mirror/odf4/ocs-rhel8-operator",
"olm-mirror/odf4/odf-console-rhel8",
"olm-mirror/odf4/odf-csi-addons-operator-bundle",
"olm-mirror/odf4/odf-csi-addons-rhel8-operator",
"olm-mirror/odf4/odf-csi-addons-sidecar-rhel8",
"olm-mirror/odf4/odf-operator-bundle",
"olm-mirror/odf4/odf-rhel8-operator",
"olm-mirror/odf4/rook-ceph-rhel8-operator",
"olm-mirror/odf4/volume-replication-rhel8-operator",
"olm-mirror/openshift4/ose-csi-external-attacher",
"olm-mirror/openshift4/ose-csi-external-provisioner",
"olm-mirror/openshift4/ose-csi-external-resizer",
"olm-mirror/openshift4/ose-csi-external-snapshotter",
"olm-mirror/openshift4/ose-csi-node-driver-registrar",
"olm-mirror/openshift4/ose-kube-rbac-proxy",
"olm-mirror/redhat/rh-index",
"olm-mirror/rhceph/rhceph-5-rhel8",
"olm-mirror/rhel8/postgresql-12"
]
}
+
+ +
+ +

As it can be observed in the above output, we have a header containing base images whose purpose is to be used in the OCP deployment, those images are not usable in the odf-operator installation. This header is the following:

+ +
+
+
$ curl -X GET -u <username>:<password> https://${REGISTRY_NAME}:${REGISTRY_PORT}/v2/_catalog --insecure | jq .                                     
{
"repositories": [
"karmab/curl",
"karmab/kubectl",
"karmab/mdns-publisher",
"karmab/origin-coredns",
"karmab/origin-keepalived-ipfailover",
"ocp-release"
]
}
+
+ +
+ +

This header’s filesystem usage:

+ +
+
+
$ du -h ${HOME}/registry/data/ --max-depth=1
13G registry/data/docker
13G registry/data/
+
+ +
+ +

Highlighted the content of the odf-operator mirrored. Now we are going to evaluate the file system used at this moment by the above content of the offline registry.

+ +
+
+
$ du -h ${HOME}/registry/data/ --max-depth=1
19G registry/data/docker
19G registry/data/
+
+ +
+ +

Step 2.2. How to make use of the Offline Registry content to your OCP cluster

+

Once the mirroring of the operators is finished the process is creating the following directory: oc-mirror-workspace/results-1667747309 for which we will use the following two files to apply it to the OCP cluster:

+ +
+
+
$ cat oc-mirror-workspace/results-1667747309/catalogSource-rh-index.yaml
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
name: rh-index
namespace: openshift-marketplace
spec:
image: inbacrnrdl0101.offline.redhat.lan:5050/olm-mirror/redhat/rh-index:v1-test
sourceType: grpc
+
+ +
+ +
+
+
$ cat oc-mirror-workspace/results-1667747309/imageContentSourcePolicy.yaml
---
apiVersion: operator.openshift.io/v1alpha1
kind: ImageContentSourcePolicy
metadata:
labels:
operators.openshift.org/catalog: "true"
name: operator-0
spec:
repositoryDigestMirrors:
- mirrors:
- inbacrnrdl0101.offline.redhat.lan:5050/olm-mirror/openshift4
source: registry.redhat.io/openshift4
- mirrors:
- inbacrnrdl0101.offline.redhat.lan:5050/olm-mirror/odf4
source: registry.redhat.io/odf4
- mirrors:
- inbacrnrdl0101.offline.redhat.lan:5050/olm-mirror/rhel8
source: registry.redhat.io/rhel8
- mirrors:
- inbacrnrdl0101.offline.redhat.lan:5050/olm-mirror/rhceph
source: registry.redhat.io/rhceph
- mirrors:
- inbacrnrdl0101.offline.redhat.lan:5050/olm-mirror/redhat
source: registry.redhat.io/redhat
+
+ +
+ +

+ +

Looking at the applied catalog source, we can observe that the endpoint corresponds to the offline registry used: inbacrnrdl0101.offline.redhat.lan:5050.

+ +

+ +

In order to validate that there are no dependencies missing for the odf-mirror we will proceed installing the operator on the OCP cluster.

+ +
    +
  • Start the installation of odf-operator:
  • +
+
    +
  • odf-operator is installed:
  • +
+

+ +
    +
  • The installed odf-operator subscription used:
  • +
+
    +
  • odf-operator pods status:
  • +
+
+
+
$ oc get pods -n openshift-storage
NAME READY STATUS RESTARTS AGE
csi-addons-controller-manager-78c75c4c7-wq4p5 2/2 Running 2 (37m ago) 77m
noobaa-operator-6fdd894554-ptnm9 1/1 Running 0 78m
ocs-metrics-exporter-775b6d4bdf-k5d52 1/1 Running 0 78m
ocs-operator-6bf7b6dfc6-zwwzv 1/1 Running 2 (37m ago) 78m
odf-console-6dff658495-dcfxh 1/1 Running 0 78m
odf-operator-controller-manager-54ddd5db9c-mpkwt 2/2 Running 2 (37m ago) 78m
rook-ceph-operator-57bfbcc9d-hb9tk 1/1 Running 0 78m
+
+ +
+ +

Step 2.3. How to mirror container base images with a uncompact filesystem usage

+

Now we evaluate the filesystem usage when the imageset-config.yaml parameter full: true is used, the imageset-config.yaml file will have the following content:

+ +
+
+
$ cat imageset-config.yaml
apiVersion: mirror.openshift.io/v1alpha2
kind: ImageSetConfiguration
mirror:
operators:
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.10
targetName: 'rh-index'
targetTag: v1-test
full: true
packages:
- name: odf-operator
packages:
- name: odf-operator
minVersion: '4.10.4'
maxVersion: '4.10.4'
channels:
- name: 'stable-4.10'
+
+ +
+ +

Proceed to mirror the odf-operator container base images to the .tar file as highlighted above:

+ +
+
+
$ oc-mirror --config imageset-config.yaml file://archive
+
+ +
+ +

Once the mirroring has completed, validate the .tar file size in comparison with the previous check:

+ +
+
+
$ du -h ./archive/mirror_seq1_000000.tar
32G ./archive/mirror_seq1_000000.tar
+
+ +
+ +
    +
  • Filesystem usage of the .tar file comparison:
  • +
+ + + + + + + + + + + + + + + + + + + + +
imageset-config.yaml differencesmirror_seq1_000000.tar [Gb]Notes
with full: true32 
with full: false6.280.625% decrease
+

We can observe from the .tar file size that its size was reduced by 80% for the same content as in the previous, Step 2.1. How to mirror container base images with a compact filesystem usage example.

+ +

Mirror the container base images from the newly created file mirror_seq1_000000.tar to the offline registry. Be advised that the following values are an example representation for the purpose of this article, in your case the values might differ.

+ +
+
+
$ export REGISTRY_NAME=inbacrnrdl0101.offline.redhat.lan
$ export REGISTRY_NAMESPACE=olm-mirror
$ export REGISTRY_PORT=5050
$ oc-mirror --from ./archive docker://${REGISTRY_NAME}:${REGISTRY_PORT}/${REGISTRY_NAMESPACE}
+
+ +
+ +

Check the content of the offline registry:

+ +
+
+
$ curl -X GET -u <username>:<password> https://${REGISTRY_NAME}:${REGISTRY_PORT}/v2/_catalog --insecure | jq .
{
"repositories": [
"karmab/curl",
"karmab/kubectl",
"karmab/mdns-publisher",
"karmab/origin-coredns",
"karmab/origin-keepalived-ipfailover",
"ocp-release",
"olm-mirror/odf4/cephcsi-rhel8",
"olm-mirror/odf4/mcg-core-rhel8",
"olm-mirror/odf4/mcg-operator-bundle",
"olm-mirror/odf4/mcg-rhel8-operator",
"olm-mirror/odf4/ocs-must-gather-rhel8",
"olm-mirror/odf4/ocs-operator-bundle",
"olm-mirror/odf4/ocs-rhel8-operator",
"olm-mirror/odf4/odf-console-rhel8",
"olm-mirror/odf4/odf-csi-addons-operator-bundle",
"olm-mirror/odf4/odf-csi-addons-rhel8-operator",
"olm-mirror/odf4/odf-csi-addons-sidecar-rhel8",
"olm-mirror/odf4/odf-operator-bundle",
"olm-mirror/odf4/odf-rhel8-operator",
"olm-mirror/odf4/rook-ceph-rhel8-operator",
"olm-mirror/odf4/volume-replication-rhel8-operator",
"olm-mirror/openshift4/ose-csi-external-attacher",
"olm-mirror/openshift4/ose-csi-external-provisioner",
"olm-mirror/openshift4/ose-csi-external-resizer",
"olm-mirror/openshift4/ose-csi-external-snapshotter",
"olm-mirror/openshift4/ose-csi-node-driver-registrar",
"olm-mirror/openshift4/ose-kube-rbac-proxy",
"olm-mirror/redhat/rh-index",
"olm-mirror/rhceph/rhceph-5-rhel8",
"olm-mirror/rhel8/postgresql-12"
]
}
+
+ +
+ +

We can observe that the content of the offline registry is identical.

+ +

Validate the file system used by the container base images mirrored to the offline registry:

+ +
+
+
$ du -h ${HOME}/registry/data/ --max-depth=1
44G registry/data/docker
44G registry/data/
+
+ +
+ +
    +
  • Filesystem usage of the uncompressed container base images usage comparison:
  • +
+ + + + + + + + + + + + + + + + + + + + +
imageset-config.yaml differencesmirror_seq1_000000.tar [Gb]Notes
with full: true44 
with full: false1956.8182% decrease
+

We can observe from the offline registry container base image content size was optimized with 56.8182% for the same content in the previous example.

+ +

Conclusions

+

In conclusion, by leveraging the version control and restricting the container base image download we are able to optimize the filesystem usage of the offline registry.

+ +
+ + \ No newline at end of file diff --git a/_posts/redhat/2022-12-21-custom-queries-for-observability-using-grafana-and-apis.md b/_posts/redhat/2022-12-21-custom-queries-for-observability-using-grafana-and-apis.md new file mode 100644 index 0000000..6476c41 --- /dev/null +++ b/_posts/redhat/2022-12-21-custom-queries-for-observability-using-grafana-and-apis.md @@ -0,0 +1,22 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-21 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/custom-queries-for-observability-using-grafana-and-apis +slug: custom-queries-for-observability-using-grafana-and-apis +title: Custom Queries for Observability Using Grafana and APIs +--- + + + +

Introduction

+

Monitoring is an important aspect to understand and gain insight into your environment. Red Hat Advanced Cluster Management for Kubernetes (RHACM) is a great tool to get that visibility into all your clusters. By using the RHACM observability service, you can get critical metrics from all your clusters out-of-the-box without any additional effort.

+ + \ No newline at end of file diff --git a/_posts/redhat/2022-12-21-kbe-insider-interviews-katie-gamanji-of-apple.md b/_posts/redhat/2022-12-21-kbe-insider-interviews-katie-gamanji-of-apple.md new file mode 100644 index 0000000..87052c1 --- /dev/null +++ b/_posts/redhat/2022-12-21-kbe-insider-interviews-katie-gamanji-of-apple.md @@ -0,0 +1,29 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-21 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/kbe-insider-interviews-katie-gamanji-of-apple +slug: kbe-insider-interviews-katie-gamanji-of-apple +title: KBE Insider Interviews Katie Gamanji of Apple +--- + + + +
+
+
+ +
+ +
+ +
+ + \ No newline at end of file diff --git a/_posts/redhat/2022-12-23-kbe-insider-interviews-ronen-dar-of-runai.md b/_posts/redhat/2022-12-23-kbe-insider-interviews-ronen-dar-of-runai.md new file mode 100644 index 0000000..a399e7e --- /dev/null +++ b/_posts/redhat/2022-12-23-kbe-insider-interviews-ronen-dar-of-runai.md @@ -0,0 +1,29 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-23 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/kbe-insider-interviews-ronen-dar-of-runai +slug: kbe-insider-interviews-ronen-dar-of-run-ai +title: KBE Insider Interviews Ronen Dar of Run-AI +--- + + + +
+
+
+ +
+ +
+ +
+ + \ No newline at end of file diff --git a/_posts/redhat/2022-12-26-learn-about-progressive-application-delivery-with-gitops.md b/_posts/redhat/2022-12-26-learn-about-progressive-application-delivery-with-gitops.md new file mode 100644 index 0000000..988e1df --- /dev/null +++ b/_posts/redhat/2022-12-26-learn-about-progressive-application-delivery-with-gitops.md @@ -0,0 +1,29 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-26 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/learn-about-progressive-application-delivery-with-gitops +slug: learn-about-progressive-application-delivery-with-gitops +title: Learn about Progressive Application Delivery with GitOps +--- + + + +
+
+
+ +
+ +
+ +
+ + \ No newline at end of file diff --git a/_posts/redhat/2022-12-28-whats-it-like-to-work-at-red-hat.md b/_posts/redhat/2022-12-28-whats-it-like-to-work-at-red-hat.md new file mode 100644 index 0000000..bde3514 --- /dev/null +++ b/_posts/redhat/2022-12-28-whats-it-like-to-work-at-red-hat.md @@ -0,0 +1,29 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2022-12-28 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/whats-it-like-to-work-at-red-hat +slug: what-s-it-like-to-work-at-red-hat- +title: What's it like to Work at Red Hat? +--- + + + +
+
+
+ +
+ +
+ +
+ + \ No newline at end of file diff --git a/_posts/redhat/2023-1-2-what-is-clair.md b/_posts/redhat/2023-1-2-what-is-clair.md new file mode 100644 index 0000000..31915e0 --- /dev/null +++ b/_posts/redhat/2023-1-2-what-is-clair.md @@ -0,0 +1,21 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2023-01-02 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/what-is-clair +slug: leveraging-chaos-harnessing-containers-in-the-wild-on-quay-io-to-improve-security +title: Leveraging Chaos- Harnessing Containers in the Wild on quay.io to Improve Security +--- + + + +

Clair is a static analyzer that is used to index the contents of container images and provide vulnerability matching based on what it found. Clair is not only bundled with Red Hat Quay and used by Quay.io but also used in various other Red Hat products with adoption increasing in an effort to offer customers a more standardized view of their security issues. As well as being used internally at Red Hat, Clair is used by many external teams and we always have a robust open-source community depending on us and working with us.

+ + \ No newline at end of file diff --git a/_posts/redhat/2023-1-4-deploying-triliovault-for-kubernetes-with-openshift-red-hat-advanced-cluster-management-policies.md b/_posts/redhat/2023-1-4-deploying-triliovault-for-kubernetes-with-openshift-red-hat-advanced-cluster-management-policies.md new file mode 100644 index 0000000..3ddb566 --- /dev/null +++ b/_posts/redhat/2023-1-4-deploying-triliovault-for-kubernetes-with-openshift-red-hat-advanced-cluster-management-policies.md @@ -0,0 +1,24 @@ +--- +author: RedHat Hybrid cloud blog +author_tag: redhat +blog_subtitle: Red Hat open hybrid cloud blog +blog_title: Hybrid cloud blog +blog_url: https://content.cloud.redhat.com/blog +category: redhat +date: '2023-01-04 14:00:00' +layout: post +original_url: https://content.cloud.redhat.com/blog/deploying-triliovault-for-kubernetes-with-openshift-red-hat-advanced-cluster-management-policies +slug: deploying-triliovault-for-kubernetes-with-openshift-red-hat-advanced-cluster-management-policies +title: Deploying TrilioVault For Kubernetes with OpenShift & Red Hat Advanced Cluster + Management Policies +--- + + + +

This article was written by Sachin Kulkarni of Trilio.

+

Introduction

+

Organizations are moving towards Kubernetes as an operating environment, and protecting the data is paramount. It’s their top-most priority to protect the business-critical data, and set up a business continuity plan in case of a disaster. A cloud-native backup and Disaster Recovery (DR) solution is the need of the hour, and the answer is TrilioVault for Kubernetes (TVK).

+ + \ No newline at end of file diff --git a/scripts/generate_posts.py b/scripts/generate_posts.py index 587ab0d..ec4f1f2 100644 --- a/scripts/generate_posts.py +++ b/scripts/generate_posts.py @@ -153,7 +153,7 @@ def parse_feeds(authors, output_dir, test=False): if "feeds" in author: feeds = author['feeds'] else: - feeds = author['feed'] + feeds = [author['feed']] for author_feed in feeds: feed = feedparser.parse(author_feed)