diff --git a/preview-fall2024-info/.gitignore b/preview-fall2024-info/.gitignore new file mode 100644 index 000000000..bcbe11748 --- /dev/null +++ b/preview-fall2024-info/.gitignore @@ -0,0 +1,13 @@ +_site +.jekyll-cache/ +Gemfile.lock + +vendor/ +.DS_Store + +.idea/ +.vscode/ +.vagrant/ + +# The staff/schedules symlink is created by the GHA build_deploy.yml +/staff/schedules diff --git a/preview-fall2024-info/.htaccess b/preview-fall2024-info/.htaccess new file mode 100644 index 000000000..6e5be774a --- /dev/null +++ b/preview-fall2024-info/.htaccess @@ -0,0 +1,8 @@ +# Display 404.html on 404 +ErrorDocument 404 /404.html + +# Redirect ".shtml" requests +RewriteEngine on +RewriteCond %{REQUEST_FILENAME} !-d +RewriteCond %{REQUEST_URI} ^(.+)\.shtml$ +RewriteRule ^ %1 [R=permanent,L] diff --git a/preview-fall2024-info/.well-known/issuer.jwks b/preview-fall2024-info/.well-known/issuer.jwks new file mode 100644 index 000000000..ad50a00de --- /dev/null +++ b/preview-fall2024-info/.well-known/issuer.jwks @@ -0,0 +1,84 @@ +{ + "keys": [ + { + "alg": "ES256", + "crv": "P-256", + "kid": "7b75", + "kty": "EC", + "use": "sig", + "x": "Y8OnqyMma6aACwh2PqA-vDgdt4c-ql15WQJx7-e4y6g=", + "y": "-Z98R8E5OpQHOuoiDfNB_6oLYZIaXWr7Qdhz5HbwYu4=" + }, + { + "alg": "RS256", + "e": "AQAB", + "kid": "a8e6", + "kty": "RSA", + "n": "5H21Wrgx-jQlh344Q7GhsMlWtQKLWuIBzdA_cDZaQytLaMnUAQoMkIQpdkO_dqe8KEFLvFlqSloqe_unkWEQOUPB5F2UJdb8v71lhHnpYkTTuBMScrF-qWBYdbFz20wKfYUhW5avruVRS447ag0yRrkKR9s3jI8XDlmS1t51bhCxcijOxYtIAnqDeYUJLyxaE2_EKvXARfktmCYU_Eu0FP6S45NH3eDFGzFM10eb9ZtyPnoMIZ7nLB1vb4agEIz58koBYFrdc3b87TPan_GQT4LZVYGVPMnIBQCLXd7xVrBcp7rEw14xTYj1q_q38xYknvgwO2BBFY2oyjy8zFKeqw==", + "use": "sig" + }, + { + "alg": "ES256", + "crv": "P-256", + "kid": "5083", + "kty": "EC", + "use": "sig", + "x": "sbr1QP9EIXLir5TkXXqstgVdK8HNizKOvNjQvaBwwHs=", + "y": "-gluJXMb47rycOMX7SD9SZMnNq1qsW3lzIR5rbg_Vg8=" + }, + { + "alg": "ES256", + "crv": "P-256", + "kid": "c048", + "kty": "EC", + "use": "sig", + "x": "e6Ii7padcXnlp_ujUTi1lzHoA06yPQpzomPyKhmyfN4=", + "y": "cudHvNdjdDP9pcCH9ZdcuExf89_CyA5wrr4cqSC8S_g=" + }, + { + "alg": "ES256", + "crv": "P-256", + "kid": "e75f", + "kty": "EC", + "use": "sig", + "x": "AfG15od_pPtQjPHD1RGRWhnZvnZzc8RtjK0ahSeSdfo=", + "y": "e70aPtMe_RlMayju4zuKlOoNvwX3iL4GwrEHpSjSUPY=" + }, + { + "alg": "ES256", + "crv": "P-256", + "kid": "e78e", + "kty": "EC", + "use": "sig", + "x": "jS5cVffYOKP9fXKysPiT4UhLGsXpsooxXW3kZHFM6aM=", + "y": "9LWogFfXPRUyGzcCQ9haXbBv-IKx_M5M6KX50M6zrWM=" + }, + { + "alg": "ES256", + "crv": "P-256", + "kid": "8d9b", + "kty": "EC", + "use": "sig", + "x": "N45Fz5S8OXUvRvkziV3IvRku4o84-hfYdbu9RWdapwY=", + "y": "haNd4VKCj3_8AHe0luUTWx1XJk5sKDqciiLR35_oAHo=" + }, + { + "alg": "ES256", + "crv": "P-256", + "kid": "d402", + "kty": "EC", + "use": "sig", + "x": "eDK50R6M81DUUWU9JIW9obA02U4sRZQSPR44pcFnkSI=", + "y": "P8-SthnBHG5iwfSA1meV5ZV4tvobD4_6Mb_cPgiC9JA=" + }, + { + "alg": "ES256", + "crv": "P-256", + "kid": "ad6b", + "kty": "EC", + "use": "sig", + "x": "Bg08GpOLHvreC49sjFEL3DuCSw00wTdpa8QnqRq0-Rs=", + "y": "o27QrMjGIBXsaZLaGLYBysMO0AoEL24YONrzxPydbQ4=" + } + ] +} diff --git a/preview-fall2024-info/.well-known/openid-configuration b/preview-fall2024-info/.well-known/openid-configuration new file mode 100644 index 000000000..2151cf100 --- /dev/null +++ b/preview-fall2024-info/.well-known/openid-configuration @@ -0,0 +1,24 @@ +{ + "issuer":"https://chtc.cs.wisc.edu", + "jwks_uri":"https://chtc.cs.wisc.edu/.well-known/issuer.jwks", + "token_endpoint": "https://osdf-chtc-issuer.chtc.chtc.io/scitokens-server/token", + "userinfo_endpoint": "https://osdf-chtc-issuer.chtc.chtc.io/scitokens-server/userinfo", + "registration_endpoint": "https://osdf-chtc-issuer.chtc.chtc.io/scitokens-server/oidc-cm", + "device_authorization_endpoint": "https://osdf-chtc-issuer.chtc.chtc.io/scitokens-server/device_authorization", + "token_endpoint_auth_methods_supported": [ + "client_secret_post", + "client_secret_basic" + ], + "scopes_supported": [ + "wlcg", + "storage.read:/", + "storage.write:/", + "org.cilogon.userinfo", + "openid", + "offline_access" + ], + "grant_types_supported": [ + "refresh_token", + "urn:ietf:params:oauth:grant-type:device_code" + ] +} diff --git a/preview-fall2024-info/2024-chtc-fellows.html b/preview-fall2024-info/2024-chtc-fellows.html new file mode 100644 index 000000000..086bbeaab --- /dev/null +++ b/preview-fall2024-info/2024-chtc-fellows.html @@ -0,0 +1,398 @@ + + + + + + +CHTC Launches First Fellow Program + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ CHTC Launches First Fellow Program +

+
+CHTC Team Meeting: Fellow and Intern Project Presentations +
CHTC Team Meeting: Fellow and Intern Project Presentations
+
+ +

As Charles Bowden astutely put it, “summertime is always the best of what might be,” and the Center for High Throughput +Computing (CHTC) couldn’t agree more. Enter the Fellows Program: a new 12-week summer +initiative where participants collaborate with mentors to each deliver a project that will contribute to high throughput +computing in support of the nation’s scientific community.

+ +
+ Will Cram presenting his project +
Will Cram presenting his project,
“Schedd performance analysis”
+
+ +

Aimed at providing extraordinary opportunities for undergraduate and graduate students, this program offers a chance +to collaboratively develop software for high throughput computing and cyberinfrastructure, operate complex service +environments, and facilitate the utilization of large-scale computational services. Coupled with hands-on experience +and training, the fellows will gain technical skills, as well as research and collaboration skills. It offers these +students insight into how scientists employ research computing as a tool to advance their studies.

+ +

The summer program kicked off on June 3rd with 8 fellows, 10 mentors, CHTC leaders and the camaraderie of coffee and +doughnuts. The team was inaugurated by program director Brian Bockelman’s +welcoming address, shortly followed by mentor meetings and digging into the procedures, schedule reviews, HR policies, +and breakout sessions for mentor/fellow onboarding.

+ +
+ Neha Talluri presenting her project, “Where In the World Am I” +
Neha Talluri presenting her project,
“Where In the World Am I”
+
+ +

Three days later, the fellows presented to the CHTC team their first (out of three) presentations, detailing their +projects for the upcoming 12 weeks.

+ +

In addition to the initial presentation during the first week of the program, the fellows will deliver two more talks: +the first at High Throughput Computing 2024 (HTC), +where they will give lightning talks about their projects and the challenges they are addressing, and a final +presentation at the end of the program to share the results of their work and their learnings.

+ +

Out of a deep pool of over 80 applicants, only eight fellows were selected. Among them are Ben Staehle, Kristina Zhao, +Neha Talluri, Patrick Brophy, Pratham Patel, Ryan Boone, Thinh Nguyen, and Wil Cram. You can read more about their +projects here.

+ +
+
+Pratham +
+
+Fellows +
+
+

Fellows at their first presentation, introducing themselves and their projects.

+
+
+ +

Through mentorship and support, the CHTC Fellows program aims to develop the Fellows potential and contribute to +research computing. Whether in research, creativity, or social impact, this fellowship strives to foster the next +generation of budding engineers and scientists.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/404.html b/preview-fall2024-info/404.html new file mode 100644 index 000000000..c184f5f4d --- /dev/null +++ b/preview-fall2024-info/404.html @@ -0,0 +1,341 @@ + + + + + + +404 Page Not Found + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ 404 Page Not Found +

+ +

The page you were looking for was not found.

+ +

Please try the search bar and let us know if you cannot find the page you are looking for at chtc@cs.wisc.edu.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/AMNH-Workshops.html b/preview-fall2024-info/AMNH-Workshops.html new file mode 100644 index 000000000..08f12a13b --- /dev/null +++ b/preview-fall2024-info/AMNH-Workshops.html @@ -0,0 +1,410 @@ + + + + + + +The American Museum of Natural History Ramps Up Education on Research Computing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ The American Museum of Natural History Ramps Up Education on Research Computing +

+

With a multi-day workshop, the museum strives to expand the scale of its educational and training services by bringing additional computing capacity resources to +New York-area researchers and tapping into the power of high throughput computing (HTC).

+ +

+ +

After “falling in love with the system” during the 2023 OSG School, American Museum of Natural History Museum (AMNH) +bioinformatics specialist Dean Bobo wondered if he could jump on an offer to bring New York institutions’ and +researchers’ attention to the OSPool, a pool of computing capacity freely available to U.S.-affiliated institution +researchers. Research Facilitation Lead Christina Koch mentioned the capacity of the National Science Foundation (NSF)-funded Partnership to Advance Throughput Computing (PATh) +project to help institutions put on local trainings. So he reached out to Koch — and indeed the offer did stand!

+ +

The PATh project is committed to advancing the state of the art and adoption of high throughput computing (HTC). As part of this commitment, the project annually offers the OSG +School at UW–Madison, which is open to participants who want to transform their research and scale out utilizing HTC. AMNH wanted to host a shortened version of the OSG School +for their researchers with the help of the PATh team.

+ +

A Successful Workshop

+ +

Through Koch, Bobo connected with Research Computing Facilitator Rachel Lombardi who helped him plan the OSPool workshop on the second day of the museum’s multi-day workshop. +“It was for our own museum community, but for other outside institutions as well,” Bobo says. So, Bobo arranged a computational skills training on November 3 and 6 at the AMNH in +New York, New York. This was the first time the museum arranged a multi-day workshop with one day centered around OSPool resources.

+ +

The first day of the two-day training included a workshop teaching basic computational skills to an audience of students from the museum’s graduate program and graduate students, +as well as researchers from various institutions around New York City. About 20 people chose to attend the second day, which involved training on OSPool resources. That day, Lombardi +led a workshop likened to an OSG School crash course, with lectures covering the topics of software and container basics, principles of job submission, troubleshooting, learning about +the jobs a user is running, and information for the next steps researchers could take.

+ +
+ Rachel Lombardi during her presentation. +
Rachel Lombardi during her presentation. +
+
+ +

The workshop garnered great success, which Bobo measured through the number of eyes it opened, including “folks who are completely new to HTC but also people who are more experienced +with high performance computing on our local HPCs. They realized the utility and the capabilities of the OSPool and the resources therein. Some folks after the workshop said that +they would give it a shot, which is great for me to hear. I feel like all this work was worth it because there are going to be attempts to get their software and pipelines lifted +over to the OSPool.”

+ +

Empowering the HTC Community

+ +

The AMNH is looking to start hosting more OSPool events, bringing an event inspired by the OSG School locally to New York, and this workshop was the first step toward future OSPool +workshops. From leading a section of the workshop, Lombardi learned “what resources [the AMNH] would need from PATh facilitators to run its own OSPool trainings.” The goal is to +“empower them to do these things [conduct training] without necessarily waiting for the annual OSG School,” notes Lombardi. Bobo also picked up a few valuable lessons too. He gained +insights about community outreach and a better understanding of instructing on HTC and utilizing OSPool capacity.

+ +

In this sense, the workshops the AMNH hosted — with support from PATh — reflected the ideal of “training the trainers” to scale out the facilitation effort and share computing +capacity. “It won’t be sustainable to come in person and support a training for everyone who asks, so we’re thinking about how to develop and publish easy-to-use training materials +that people could use on their own, a formal process of (remote) coaching and support, and even a ‘train the trainers’ program where we could build community among people who want +to run an OSPool training,” Koch explains.

+ +

A Continuing Partnership

+ +

Even before arranging the two-day workshop, the AMNH already had a strong partnership with the PATh and the OSG Consortium, which provides distributed HTC +services to the research community, Bobo says. The museum contributes its spare CPU power to the OSPool, and museum staff as well as PATh system administrators and facilitators +communicate regularly. So far the museum has contributed over 15.5 million core hours to the OSPool.

+ +

One way the museum wants to utilize the OSPool capacity is for a genomic surveillance tool that surveys the population dynamics of diseases like COVID-19, RSV, influenza, or other +emerging diseases. “We’ve been using this method of diversity called K Hill. We’re looking to port that software into the OSPool because it’s computationally expensive to do this +every day, but that becomes feasible with the OSPool. We would like to make this tool a public resource, but we would have to work with the PATh facilitators to figure out if this +is logistically possible. We want to make our tools ported to the OSPool so that you don’t need your own dedicated cluster to run an analysis,” Bobo explains.

+ +

Future Directions

+ +

When asked what’s in store for the future of this partnership, Bobo says he wants it to grow by putting on workshops that mirror the OSG School as a means of generating proximity and +convenience for investigators in New York for whom the school may be out of reach. “We are so enthusiastic about building and continuing our relationship with the PATh project. I’m +looking forward to developing a workshop that we run here at the museum. In our first year, getting help from the facilitators whom I’m familiar with would be really helpful, and +this is something that I’m looking forward to doing subsequent to our first workshop to get there. There’s definitely more coming from our collaboration,” Bobo elaborates.

+ +

The PATh facilitators aim to give community members the resources they need to learn about the OSPool and control workload placement at the Access Points, Lombardi explains. +Attending and arranging trainings at this workshop with the AMNH was one of the ways they upheld this goal. “I feel like we hit the nail on the head with this event set up in that +we provided OSPool as a resource and they provided a lot of valuable input and feedback; it’s like a two-way street.”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/ASP.html b/preview-fall2024-info/ASP.html new file mode 100644 index 000000000..ea1d10a4d --- /dev/null +++ b/preview-fall2024-info/ASP.html @@ -0,0 +1,376 @@ + + + + + + +Distributed Computing at the African School of Physics 2022 Workshop + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Distributed Computing at the African School of Physics 2022 Workshop +

+

Over 50 students chose to participate in a distributed computing workshop from the 7th biennial African School of Physics (ASP) 2022 at Nelson Mandela University in Gqeberha, South Africa.

+ +
+ Image obtained from the official [ASP2022 page](https://www.africanschoolofphysics.org/asp2022/) on the African School of Physics website. +
Image obtained from the official ASP2022 page on the African School of Physics website.
+
+ +
+ Dr. Severini helping a student during ASP2022. +
Dr. Severini helping a student during ASP2022.
+
+ +

Almost 200 students from 41 countries were selected to participate in the 7th ASP 2022 at Nelson Mandela University in Gqeberha, South Africa. With the school being shortened to two weeks, a parallel learning system was implemented, where participants could choose lectures to attend to improve their educational growth. Dr. Horst Severini is a Research Scientist and Adjunct Professor in High Energy Physics and Information Technology from the University of Oklahoma (OU) and a co-leader of the high-performance computing workshop. He anticipated maybe 25 students attending on his track, “…we had about that many laptops,” he remarked, “and then we ended up with over 50 students!”

+ +

Severini was first introduced to distributed computing during his postdoc at OU. Then in the spring of 2012, Severini was introduced to Kétévi Assamagan, one of the founders of the ASP. Assamagan met with Severini and invited him and his colleagues to participate, leading to a scramble to create a curriculum for this new lecture series. They were eager to show students how distributed computing could help with their work.

+ +

After a few years of fine-tuning the high throughput classes, Severini has the workshop ironed out. After receiving an introduction to basic commands in Linux, the students started with a basic overview of high-energy physics, why computing is important to high-energy physics, and then some HTCondor basics. “The goal, really, is to teach students the basics of HTCondor, and then let them go off and see what they can do with it,” Severini explained. The workshop was so successful that students worked through coffee breaks and even stuck around at the end to obtain OSG accounts to continue their work.

+ +

A significant improvement for the 2022 high-performance computing workshop was the move from using OSG Connect for training sessions to Jupyter Notebooks. The switch to Jupyter Notebooks for training developed during the middle of 2022. “Jupyter allows people to ‘test drive’ submitting jobs on an HTCondor system without needing to create a full OSPool account,” OSG Research Computing Facilitator Christina Koch clarified. “Moving forward, we hope people can keep using the Jupyter Notebook interface once they get a full OSPool account so that they can move seamlessly from the training experience to all of the OSPool.”

+ +
+ Students working together and listening to a lecture during ASP2022. +
Students working together and listening to a lecture during ASP2022.
+
+ +

“[Jupyter Notebooks] worked quite well,” Severini said, noting that the only issue was that a few people lost their home directories overnight. However, these “beginning glitches” didn’t slow participants down whatsoever. “People enjoyed [the workshop] and showed it by not wanting to leave during breaks; they just wanted to keep working!”

+ +

Severini’s main goal for the high-performance computing workshop is to migrate the material into Jupyter Notebooks. “I’ve always been most familiar with shell scripts, so I always do anything I can in there because I know it’s repeatable…but I’ll adapt, so we’ll work on that for the next one,” he explains.

+ +

Overall, “everything’s been working well, and the students enjoy it; we’ll keep adjusting and going with the times!”

+ +

+ +

More information about scheduling and materials from the 7th ASP 2022. The 8th ASP 2024 will take place in Morocco, Africa. Check this site for more information as it comes out.

+ +

For more information or questions about the switch to Jupyter Notebooks, please email chtc@cs.wisc.edu.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Anirvan-Showcase.html b/preview-fall2024-info/Anirvan-Showcase.html new file mode 100644 index 000000000..e1a6db0f4 --- /dev/null +++ b/preview-fall2024-info/Anirvan-Showcase.html @@ -0,0 +1,367 @@ + + + + + + +Antimatter: Using HTC to study very rare processes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Antimatter: Using HTC to study very rare processes +

+
+ Proton-proton collision +
Two protons colliding. (Image credit: NA61/SHINE collaboration)
+
+ +

The final speaker at the OSG User School Showcase was Anirvan Shukla, a graduate student at the University of Hawai’i Mānoa, and this wasn’t his first school event. In 2016, Anirvan attended as a participant, but today he assumed the role of presenter and had the opportunity to explain how high throughput computing (HTC) has transformed his research in the last five years.

+ +

Anirvan studies antimatter and the extremely rare processes that produce it. Hypothetical dark matter decays into different matter and antimatter particles, like protons, antiprotons, deuterons, and anti-deuterons. When these particles are detected, they suggest that there may be dark matter inside or outside our galaxy. However, these matter and antimatter particles are also produced by the regular collisions of cosmic rays with the particles that make up the interstellar medium.

+ +

Given their rarity, such events can only really be studied with simulations, where they’re still extremely rare. In order to determine whether antimatter particles can be attributed to the decay of dark matter –– or if they’re merely a product of regular cosmic interactions –– Anirvan would need to simulate trillions of collisions.

+ +

Leveraging what he learned at the OSG School, Anirvan knew he would only be able to tackle these computations using the capacity of the Open Science Pool (OSPool). Capturing the impact of the OSG’s computing resources, Anirvan attests, “this project definitely would not have been possible on any other cluster that I have access to.”

+ +

For instance, to observe antihelium particles, a researcher must simulate approximately 100 trillion events, in this case proton-proton collisions. One million of such events typically require about one CPU hour of computation. Therefore, a researcher needs roughly 100 million CPU hours in order to see a few antihelium particles –– that’s equal to 12,000 years on a single CPU. So, Anirvan divided his work into chunks of 10 hour jobs, each containing 10 million simulations. Within each job, the final output file was also analyzed and all the relevant data was extracted and placed in a histogram. This reduces the total size of the output files, which are then transferred over to the server at the University of Hawai’i by an automated workflow that Anirvan created with HTCondor’s DAGMan feature.

+ +

In his presentation at the OSG School, Anirvan noted that over the last two years, he submitted more than 8 million jobs to the OSPool and used nearly 50 million core hours. The results from his simulations generated a spectra that had never been produced before, shown below.

+ +
+ Chart +
Image credit: Shukla, A. Datta, A. Doetinchem, P. Gomez-Coral, D. Kanitz, C. (2020). Large-scale simulations of antihelium production in cosmic-ray interactions. Phys. Rev. D. https://doi.org/10.1103/PhysRevD.102.063004
+
+ +

If Anirvan had tried to run these simulations on his own laptop, he would still be searching for dark matter in the year 14,021. Even the available computing resources at CERN and the University of Hawai’i weren’t enough for this colossal project –– the OSPool was necessary.

+ +

+ +

This article is part of a series of articles from the 2021 OSG Virtual School Showcase. OSG School is an annual education event for researchers who want to learn how to use distributed high throughput computing methods and tools. The Showcase, which features researchers sharing how HTC has impacted their work, is a highlight of the school each year.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Banq.html b/preview-fall2024-info/Banq.html new file mode 100644 index 000000000..e2abf9cd0 --- /dev/null +++ b/preview-fall2024-info/Banq.html @@ -0,0 +1,398 @@ + + + + + + +Centuries of newspapers are now easily searchable thanks to HTCSS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Centuries of newspapers are now easily searchable thanks to HTCSS +

+
+ Newspaper spread +
The Montreal Witness. Monday, January 31, 1848. https://collections.banq.qc.ca/ark:/52327/4182772
+
+ +

The Bibliothèque et Archives nationales du Québec (BAnQ) has been using the HTCondor Software Suite (HTCSS) to help digitize their vast collections of documents +since 2013. Just this year, they built a powerful computing cluster out of staff workstations, using HTCSS’s cycle scavenging capabilities to tackle their largest +computational endeavor yet.

+ +

Anything published in Québec –– books, magazines, newspapers, and more –– is all housed within BAnQ, an institution uniting Québec’s National Library, the +province’s National Archives, and Montreal’s vast public library. “You can imagine the result is a colossal amount of materials,” attests Senior Computer +Technician David Lamarche, “ranging from the discovery of the Americas and the very beginning of the colony, to whatever’s being written in newspapers this week.” +Ultimately, these archives and collections reflect important historical moments, rich cultural heritage, and a tremendous amount of data.

+ +

To tackle this archival mountain, the digital collections team at BAnQ enlists the help of the HTCondor Software Suite (HTCSS) +to transform images of pages into text, which can be analyzed in-house and made available to the public. This was the goal of their largest computational project yet –– +completing text recognition on decades of articles from 114 archived newspapers in order to make them available for full-text search. This feat took them several years, +but on July 12 of this year, the digital collections team finished text recognition on the very last newspaper.

+ +

Now, with full-text search available, users of the BAnQ Digital Archives and Collections have nearly 260 years of cultural and +historical moments at their fingertips. Information that used to be buried in the ink of these newspapers, accessible only through time-consuming searches and +tedious record-keeping, can now be unearthed with mere strokes of a keyboard. This saves users immense amounts of time and elevates the cultural value of the +documents themselves.

+ +

The end result wouldn’t have happened quite as fast without the ability of HTCSS to automate the work across BAnQ’s staff workstations. File analyses, conversions, +and text recognitions that typically took weeks or even months to complete are now completed in the same week, or perhaps even overnight.

+ +

“HTCondor has become nothing less than a central pillar of our team,” attests David Lamarche, the HTCondor administrator for the digital collections team. +“We want to give credit to HTCondor for its role in this project’s success, as we would not have reached that milestone quite so quickly without it!”

+ +

But accelerating digitization was only half the battle. David reflects that the project’s main challenge “was not only to process this backlog of 114 newspapers, +but to do so while minimizing the impact on our daily workflows for newly-digitized titles.” Continuing, he explains two HTCondor features that were vital to the +project’s completion: “The first is HTCondor’s scalability, which allowed us to easily add more workstations to our resource pool. The second is HTCondor’s +resource distribution mechanisms, which we were able to configure to control how many resources could be allocated to processing older titles.”

+ +

Over the course of the project, the team used HTCSS to process over 5 million files. Many of the newspapers span decades, and some centuries, with new issues +published monthly, weekly, or even daily. For every issue, each page is manually scanned before the team uses HTCondor to analyze the file, convert it into a +high-quality version, prepare it for text recognition, conduct text recognition, and finally convert the file into a smaller, lower-quality version that can be +disseminated on a web platform. Throughout the workflow, the team integrated a variety of software tools into their jobs, which ran by cycle scavenging on 50 +workstations when they were not being used by in-office staff.

+ +
+ Cover of La Patrie Newspaper +
Cover of La Patrie newspaper. July 8, 1921
+
+ +

The La Patrie newspaper, which circulated as one of the main news sources in Québec from 1879 to 1978, was one of the larger publications that the team digitized. +Recounting of the Great Depression, both world wars, and a plethora of other important historical events are buried in its –– now digital –– ink. Consisting of +more than 600,000 files, text recognition on La Patrie would take an estimate of 18 years on a single workstation. With HTCondor, this publication was successfully +processed in merely 8 months.

+ +

Digitization –– enabled by the HTCondor Software Suite –– offers a solution to the tradeoff between the preservation of these cultural documents and their +accessibility, and even adds value back into the documents themselves by enabling full-text searches. In the future, BAnQ’s digitization team hopes to expand their +use of HTCSS to text recognition on handwritten documents and perhaps even object recognition in photographs.

+ +

+ +

Browse the newspapers in BAnQ’s digital collections.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/CDIS-bldg.html b/preview-fall2024-info/CDIS-bldg.html new file mode 100644 index 000000000..d36461db0 --- /dev/null +++ b/preview-fall2024-info/CDIS-bldg.html @@ -0,0 +1,347 @@ + + + + + + +Construction Commences on CHTC's Future Home in New CDIS Building + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Construction Commences on CHTC's Future Home in New CDIS Building +

+

Breaking ground is as symbolic as it is exciting – a metaphorical act of consecrating a new location and the start of something new. On April 25th, UW-Madison broke ground on 1240 W. Johnson St., Madison WI, a location that will become the new building site for the School of Computer, Data & Information Sciences and the new home for the Center for High Throughput Computing (CHTC) in 2025.

+ +

“The new CDIS building is the latest crest in a wave of expansion and renewal enhancing the campus landscape to meet the needs of current and future Badgers,” the university reports. This building, expected to be nearly 350000 square feet, will be the most sustainable facility on campus and will create a new center of activity for UW, enabling important connections and establishing a tech corridor from Physics and Chemistry to the Discovery Building to the College of Engineering.

+ +

CHTC Technical Lead Todd Tannenbaum wryly remarks that “while the 1960’s charm of our current old building is endearing at times (isn’t cinder block making a comeback?), I am inspired by the opportunity to work every day in a new and modern building. I am also especially excited by how this will open up new possibilities for collaboration across not only Comp Sci, but also the community of faculty and researchers in the Information School, Statistics, and Data Sciences.”

+ +

Read more about the extensive construction plans ahead, the budget, and how the project is being funded here. Launch a virtual tour of the building here.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/CDIS_eventpage.html b/preview-fall2024-info/CDIS_eventpage.html new file mode 100644 index 000000000..9590446e0 --- /dev/null +++ b/preview-fall2024-info/CDIS_eventpage.html @@ -0,0 +1,14 @@ + + +
+
+
+ OSG Logo +
+
+ Click Here to be Redirected! +
+
+
diff --git a/preview-fall2024-info/CHTC-Facilitation.html b/preview-fall2024-info/CHTC-Facilitation.html new file mode 100644 index 000000000..faf8b987d --- /dev/null +++ b/preview-fall2024-info/CHTC-Facilitation.html @@ -0,0 +1,383 @@ + + + + + + +CHTC Facilitation Innovations for Research Computing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ CHTC Facilitation Innovations for Research Computing +

+

After adding Research Computing Facilitators in 2013-2014, CHTC has expanded its reach to support researchers in all disciplines interested in using large-scale computing to support their research through the shared computing capacity offered by the CHTC.

+ +
+ Research Computing Facilitator Christina Koch with a researcher. +
Research Computing Facilitators Christina Koch (left) and Rachel Lombardi (right).
+
+ +

As the core research computing center at the University of Wisconsin-Madison and the leading high throughput computing (HTC) force nationally, the Center for High Throughput Computing (CHTC), formed in 2014, has always had one simple goal: to help researchers in all fields use HTC to advance their work.

+ +

Soon after its founding, CHTC learned that computing capacity alone was not enough; there needed to be more communication between researchers who used computing and the computer scientists who wanted to help them. To address this gap, the CHTC needed a new, two-way communication model that better understood and advocated for the needs of researchers and helped them understand how to apply computing to transform their research. In 2013, CHTC hired its first Research Computing Facilitator (RCF), Lauren Michael, to implement this new model and provide staff experience in domain research, research computing, and communication/teaching skills. Since then, the team has expanded to include additional facilitators, which today include Christina Koch, now leading the team, Rachel Lombardi, and an additional team member CHTC is actively hiring.

+ +

What is an RCF?

+

An RCF’s job is to understand a new user’s research goals and provide computing options that fit their needs. “As a Research Computing Facilitator, we want to facilitate the researcher’s use of computing,” explains Koch. “They can come to us with problems with their research, and we can advise them on different computing possibilities.”

+ +

Computing facilitators know how to work with researchers and understand research enough to guide the customizations researchers need. More importantly, RCFs are passionate about helping people and solving problems.

+ +

In the early days of CHTC, it was a relatively new idea to hire people with communication and problem-solving skills and apply those talents to computational research. Having facilitators with these skills bridge the gap between research computing organizations and researchers was what was unique to CHTC; in fact, the term “Research Computing Facilitator” was coined at UW-Madison.

+ +

RCF as a part of the CHTC model

+

Research computing facilitators have become an integral part of the CHTC and are a unique part of the model for this center. Koch elaborates that “…what’s unique at the CHTC is having a dedicated role – that we’re not just ‘user support’ responding to people’s questions, but we’re taking this more proactive, collaborative stance with researchers.” Research Computing Facilitators strengthen the CHTC and allow a more diverse range of computing dimensions to be supported. This support gives these researchers a competitive edge that others may not necessarily have.

+ +

The uniqueness of the RFC role allows for customized solutions for researchers and their projects. They meet with every researcher who requests an account to use CHTC computing resources. These individual meetings allow RCFs to have strategic conversations to provide personal recommendations and discuss long-term goals.

+ +

Meetings between the facilitators and researchers also get researchers thinking about what they could do if they could do things faster, at a grander scale, and with less time and effort investment for each project. “We want to understand what their research project is, the goals of that project, and the limitations they’re concerned with to see if using CHTC resources could aid them,” Lombardi explains. “We’re always willing to push the boundaries of our services to try to accommodate to researchers’ needs.” The RCFs must know enough about the researchers’ work to talk to the researchers about the dimensions of their computing requirements in terms they understand.

+ +

Although RCFs are integral to CHTC’s model, that doesn’t mean it doesn’t come without challenges. One hurdle is that they are facilitators, which means they’re ultimately not the ones to make choices for the researchers they support. They present solutions given each researcher’s unique circumstances, and it’s up to researchers to decide what to do. Koch explains that“it’s about finding the balance between helping them make those decisions while still having them do the actual work, even if it’s sometimes hard, because they understand that it will pay off in the long run.”

+ +

Supporting research computing across domains is also a significant CHTC facilitation accomplishment. Researchers used to need a programming background to apply computing to their analyses, which meant the physical sciences typically dominated large-scale computational analyses. Over the years, computing has become a lot more accessible. More researchers in the life sciences, social sciences, and humanities, have access to community software tools they can apply to their research problems. “It’s not about a user’s level of technical skill or what kind of science they do,” Koch says. It’s about asking, “are you using computing, and do you need help expanding?” CHTC’s ability to pull in researchers across new disciplines has been rewarding and beneficial. “When new disciplines start using computing to tackle their problems, they can do some new, interesting research to contribute to their fields,” Koch notes.

+ +

Democratizing Access

+

CHTC’s success can inspire other campuses to rethink their research computing operations to support their researchers better and innovate. Recognized nationally and internationally as an expert in HTC and facilitation, CHTC’s approach has started to make its way onto other campus computing centers.

+ +

CHTC efforts aim to bring broader access to HTC systems. “CHTC has enabled access to computing to a broad spectrum of researchers on campus,” Lombardi explains, “and we strive to help researchers and organizations implement throughput computing capacity.” CHTC is part of national and international efforts to bring that level of computing to other communities through partnerships with organizations, such as the Campus Cyberinfrastructure (CC*) NSF program.

+ +

The CC* program supports campuses across the country that wish to contribute computing capacity to the Open Science Pool (OSPool). These institutions are awarded a grant, and in turn, they agree to donate resources to the OSPool, a mutually beneficial system to democratize computing and make it more accessible to researchers who might not have access to such capacity otherwise.

+ +

The RCF team meets with researchers weekly from around the world (including Africa, Europe, and Asia). They hold OSG Office Hours twice a week for one-on-one support and provide training at least twice a month for new users and on special topics.

+ +

For other campuses to follow in CHTC’s footsteps, they can start implementing facilitation first, even before a campus has any computing systems. In some cases, such as on smaller campuses, they might not even have or need to have a computing center. Having facilitators is crucial to providing researchers with individualized support for their projects.

+ +

The next step would be for campuses to look at how they currently support their researchers, including examining what they’re currently doing and if there’s anything they’d want to do differently to communicate this ethic of supporting researchers.

+ +

Apart from the impact that research computing facilitators have had on the research community, Koch notes what this job means to her, “[w]orking for a more mission-driven organization where I feel like I’m enabling other people’s research success is so motivating.” Now, almost ten years later, the CHTC has gone from having roughly one hundred research groups using the capacity it provides to having several hundred research groups and thousands of users per year. “Facilitation will continue to advise and support these projects to advance the big picture,” Lombardi notes, “we’ll always be available to researchers who want to talk to someone about how CHTC resources can advance their work!”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/CHTC-Philosophy.html b/preview-fall2024-info/CHTC-Philosophy.html new file mode 100644 index 000000000..562d3ea8a --- /dev/null +++ b/preview-fall2024-info/CHTC-Philosophy.html @@ -0,0 +1,384 @@ + + + + + + +The CHTC Philosophy of High Throughput Computing – A Talk by Greg Thain + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ The CHTC Philosophy of High Throughput Computing – A Talk by Greg Thain +

+

HTCondor Core Developer Greg Thain spoke to UW faculty and researchers about research computing and the missions and goals of the Center for High Throughput Computing (CHTC).

+ +
+ Image of a server room by Elias from Pixabay. +
Image of a server room by Elias from Pixabay.
+
+ +

The Center for High Throughput Computing (CHTC) is proud to be home to a breadth of research on campus, with over 300 projects and 20 million core hours used by departments on the University of Wisconsin-Madison campus, ranging from the College of Agriculture and Life Sciences (CALS) to the School of Education, School of Pharmacy, and many more. “The CHTC is known best for being a place to run lots of fast jobs for free, to which we hope to continue democratizing computing across the campus,” Greg Thain began in his talks to UW-Madison researchers and staff on March 9 and 17, organized by UW-Madison Chief Technology Officer Todd Shechter.

+ +

“We like to think of the CHTC like the UW Hospital,” Thain explained, “like the hospital’s main purpose is to train the next generation of health professionals and conduct medical research. In the same way, the CHTC is our research laboratory and is where others can come and conduct their research; we do both research and provide a service.”

+ +

The main asset leveraged by the CHTC is research computing. “Research computing consists of research that happens to use computing and research about computing,” Thain explained, “both of which start and end with people.” Thain then described the two phases researchers go through when they approach the CHTC for help; “first, they seek assistance and guidance on a problem they’re currently facing. Second, they realize they can do something revolutionary with high throughput computing (HTC).”

+ +

A component of research computing using the CHTC tailored to scientists and researchers is that they don’t have to spend time supervising their programs running. Users can configure an HTCondor Access Point to manage all their work, allowing them to essentially “submit it and forget it.” This compute system is similar to others in that any user can understand it and have it be reliable, “except ours has the extra touch of being a ‘submit it and forget it’ system,” Thain clarified.

+ +

Similarly, the CHTC also created software for where the work runs, called an HTCondor Execution Point (EP). These Execution Points may be machines owned by other researcher providers and have different policies.

+ +

Both researchers and research providers may have constraints; the goal then of HTCondor is to “manage and maintain these restraints; there are many users and researcher providers in the real world, and the CHTC is currently working on optimizing these individuals’ wants and needs.”

+ +

“This is a distributed problem,” Thain continued, “not because of the machines; it’s distributed because of the people.” Having distributed authority as opposed to distributed machines means that tools and policies are distributed.

+ +

The implicit assumption is that all work can be divided into smaller, mostly independent jobs. In this way, “the goal is to optimize the time to finish running these jobs instead of the time to run a single one; to do this, we want to break up the jobs as much as possible so they can run in parallel,” Thain explained. The implication of this is there are a lot of different jobs, and how difficult it is to break them up varies.

+ +
+ Research Computing Facilitator Christina Koch with a researcher. +
Research Computing Facilitator Christina Koch with a researcher.
+
+ +

To mitigate this, research computing facilitators (RCFs) work with users and researchers to overcome their specific problems. RCFs are different from a traditional “help desk;” their role is to interface with graduate students, PIs, and other researchers and guide them to find the best-fit solution for their projects. RCFs must have a broad understanding of the basic sciences to communicate with the researchers, understand their work, and give them useful and reasonable recommendations and other technological approaches.

+ +

“The CHTC’s top priority is always reliability, but with all this work going on, the dream for us is scalability,” Thain described. Ideally, more loads would increase performance; in reality, it boosts performance a little, and then it plateaus. To compensate for this, the CHTC goes out of its way to make access points more reliable. “Adding access points helps to scale and allows submission near the user.” Thain notes the mantra: “submit locally, run globally.”

+ +

As the CHTC is our on-campus laboratory for experimenting with distributing computing, the Open Science Pool (OSPool) is a bolder experiment expanding these idea onto a national scale of interconnected campuses.

+ +
+ Institutions using OSPool resources. +
Institutions using OSPool resources.
+
+ +

The OSG and subsequent OSPool provide computing access on a national level in the same way that someone can access an available machine locally. For example, if the machines on campus are unavailable or all being used, users can access machines in the greater OSG Consortium. “But at the end of the day, all this computing, storage and networking research is in service to the needs of people who rely on high throughput computing to accomplish their research,” Thain maintains. “We hope the OSPool will be an accelerator for a broad swath of researchers in all kinds of disciplines, from all over the United States.”

+ +

+ +

The full slideshow can be found here. Please click here for more information about researching computing within the CHTC, or visit this page to contact our RCFs for any questions.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/CHTC-pool-record.html b/preview-fall2024-info/CHTC-pool-record.html new file mode 100644 index 000000000..18412b9a3 --- /dev/null +++ b/preview-fall2024-info/CHTC-pool-record.html @@ -0,0 +1,373 @@ + + + + + + +Over 240,000 CHTC Jobs Hit Record Daily Capacity Consumption + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Over 240,000 CHTC Jobs Hit Record Daily Capacity Consumption +

+

The Center for High Throughput (CHTC) users continue to be hard at work smashing records with high throughput computational workloads. On October 20th, more than 240,000 jobs completed that day, reporting a total consumption of more than 710,000 core hours. This is equivalent to the capacity of 30,000 cores running non-stop for 24 hours.

+ +

What is contributing to these records? One factor likely is UW’s investment in new hardware. +UW-Madison’s research computing hardware recently underwent a substantial hardware refresh, +adding 207 new servers representing over 40,000 “batch slots” of computing capacity.

+ +

However, additional capacity requires researchers ready and capable to use it. +The efforts of the CHTC facilitation team, led by Christina Koch, contributed to +this readiness. Since September 1, CHTC’s Research Computing Facilitators have met +with 70 new users for an introductory consultation, and there have been over 80 +visits to the twice-weekly drop-in office hours hosted by the facilitation team. +Koch notes that “using large-scale computing can require skills and concepts that +are new to most researchers - we are here to help bridge that gap.”

+ +

Finally, the hard work of the researchers themselves is another linchpin to these records. +Over 80 users that span many fields of science contributed to this success, including +these users with substantial usage:

+ +
    +
  • Ice Cube Neutrino Observatory: an observatory operated by University of Wiconsin-Madison, designed to observe the cosmos from deep within the South Pole ice.
  • +
  • ECE_miguel: In the Department of Electrical and Computer Engineering, Joshua San Miguel’s group explores new paradigms in computer architecture.
  • +
  • MSE_Szlufarska: Isabel Szlufarska’s lab focuses on computational materials science, mechanical behavior at the nanoscale using atomic scale modeling to understand and design new materials.
  • +
  • Genetics_Payseur: Genetics professor Bret Payseur’s lab uses genetics and genomics to understand mechanisms of evolution.
  • +
  • Pharmacy_Jiang: Pharmacy professor Jiaoyang Jiang’s interests span the gap between biology and chemistry by focusing on identifying the roles of protein post-translational modifications in regulating human physiological and pathological processes.
  • +
  • EngrPhys_Franck: Jennifer Franck’s group specializes in the development of new experimental techniques at the micro and nano scales with the goal of providing unprecedented full-field 3D access to real-time imaging and deformation measurements in complex soft matter and cellular systems.
  • +
  • BMI_Gitter: In Biostatistics and Computer Sciences, Anthony Gitter’s lab conducts computational biology research that brings together machine learning techniques and problems in biology
  • +
  • DairyScience_Dorea: Joao Dorea’s Animal and Dairy Science group focuses on the development of high-throughput phenotyping technologies.
  • +
+ +

Any UW student or researcher who wants to utilize high throughput of computing resources +towards a given problem can harness the capacity of the CHTC Pool.

+ +

Users can sign up here

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/CLAS12.html b/preview-fall2024-info/CLAS12.html new file mode 100644 index 000000000..4511928cd --- /dev/null +++ b/preview-fall2024-info/CLAS12.html @@ -0,0 +1,498 @@ + + + + + + +Expanding, uniting, and enhancing CLAS12 computing with OSG’s fabric of services + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Expanding, uniting, and enhancing CLAS12 computing with OSG’s fabric of services +

+

A mutually beneficial partnership between Jefferson Lab and the OSG Consortium at both the organizational and individual levels has delivered a prolific impact for the CLAS12 Experiment.

+ +
+ An aerial view of Jefferson Lab. +
An aerial view of Jefferson Lab. (Photo courtesy of Jefferson Lab.)
+
+ +

Twenty-five feet underground within the U.S. Department of Energy’s Thomas Jefferson National Accelerator Facility in Newport News, Virginia, electrons circulating at nearly the speed of light form a beam that’s as narrow as a single strand of human hair. Traveling around a racetrack-shaped accelerator five times in about 22 millionths of a second, electrons in this beam are directed into a target material, where they collide with protons and neutrons that reside inside the nuclei of the target atoms. These collisions produce an array of new particles, which ricochet out of the target material and into a unique detector that measures the particle’s momentum and speed to determine its mass and identity.

+ +
+ Group photo of members of the CLAS Collaboration +
Members of the CLAS Collaboration in March of 2018 at Jefferson Lab. (Photo courtesy of Jefferson Lab.)
+
+ +

At first, these quantum interactions may seem incomprehensible in human dimensions, but these marvels of physics –– and the computational approaches +required to study them –– have brought together people, groups, and institutions across nations and scientific disciplines. The racetrack-shaped +accelerator at Jefferson Lab, officially known as the Continuous Electron Beam Accelerator Facility (CEBAF), attracts approximately 1,500 scientists +from around the world, all visiting Jefferson Lab to conduct experiments. The one-of-a-kind detector known as the CEBAF Large Acceptance Spectrometer, +or the CLAS detector, is the namesake of the CLAS Collaboration, a group of over 200 collaborators from more than 40 institutions that span a total of +8 countries. To manage their ever-growing amounts of data, geographically-distributed collaboration, and complex workflows, the CLAS Collaboration +partners with the OSG Consortium in expanding, uniting, and enhancing their experiment.

+ +

Researchers within this collaboration all strive to understand atomic structure, yet their individual topics of study +are diverse, ranging from the multi-dimensional distribution of quarks and gluons inside a proton, to the binding interactions within a complex nuclei. +In pursuit of this research, scientists in the Collaboration have used 42 million core hours through OSG services in the past year. This number is +impressive in itself, yet the amount of communication and coordination required to achieve this level of computational throughput is far more +extraordinary. These collaborative endeavors have a long history, dating all the way back to the inception of the OSG Consortium.

+ +

The foundations of a partnership

+ +

After ten years of construction, Jefferson Lab began operations in 1997. This marked the beginnings not only of the CLAS experiment, but also the +collection of other physics experiments that call Jefferson Lab home. Soon after their launch, Jefferson Lab contributed as a founding institution for +the OSG Consortium. They participated in the formation of OSG’s bylaws but didn’t leverage OSG’s services because it wasn’t an appropriate fit for their +experiments at the time. In April of 2018, however, Jefferson Lab rejoined the OSG Consortium in full force to pursue opportunities for the GlueX +experiment, and eventually also for the CLAS Collaboration’s new and upgraded experiment called CLAS12.

+ +

This resurgence on the organizational level all stems from the actions of individual people. Before Jefferson Lab rejoined the OSG Consortium, +Richard Jones, a principal investigator (PI) at the University of Connecticut who is involved in the GlueX experiment, began exploring OSG’s services. +Jones not only introduced the benefits of OSG to GlueX, but also to Jefferson Lab more broadly. After OSG’s workflow and infrastructure proved to be +scalable for GlueX, members of the CLAS Collaboration became interested in OSG’s fabric of services too. Frank Würthwein, OSG Executive Director, +interprets this process as a “flow of engagement that followed the social structures that the relevant parties were embedded in. Basically, it’s a campus +word-of-mouth.”

+ +
+ Frank Würthwein +

Frank Würthwein, OSG Executive Director. (Photo by Owen Stanley).
+
+ +

This partnership was cemented when Würthwein visited Jefferson Lab to discuss opportunities for both the GlueX and CLAS12 experiments. The resulting +partnership that exists today has proven to be notably symbiotic. In fact, Würthwein professes that the partnership with Jefferson Lab has been absolutely +central to OSG’s mission: “Jefferson Lab and the CLAS Collaboration have helped us multiply our message, improve our tools, and ultimately advance open +science itself. They have played an important role in making us a better organization.” Likewise, the CLAS Collaboration has been able to expand their +computing capacity, unite their computing resources, and enhance their science as a result of working with OSG.

+ +

Expanding computing resources

+ +

On a fundamental level, OSG’s fabric of services provides the CLAS Collaboration with additional computing power through the Open Science Pool (OSPool) –– +an asset that was vital after transitioning to a new, upgraded version of the experiment in 2018. Compared to the original experiment, the electrons +blasting into the target material in the new experiment carry twice the energy –– 12 billion electron volts to be exact. This new experiment, coined +‘CLAS12’ to signify this energy increase, also engendered a tenfold increase in computing demand. While Jefferson Lab’s in-house computing resources are +extensive, the sheer amount of data produced in the CLAS12 experiment is substantial. Today, the experiment generates about 1 petabyte of data each year.

+ +

To put this number into perspective, 1 petabyte is equivalent to twenty million four-drawer filing cabinets completely filled with text, or 13.3 years of +HD-TV video. That’s a lot of data to manage.

+ +
+ Nathan Baltzell, Jefferson Lab Staff Scientist +

Nathan Baltzell, Jefferson Lab Staff Scientist.
+
+ +

Nathan Baltzell, a Jefferson Lab Staff Scientist who organizes software efforts for CLAS12, describes how staff at Jefferson Lab responded to this data +dilemma: “When this newer era of experiments started four years ago, projections were that we would absorb all our local computing resources crunching +the real, experimental data. It was critical to be able to run simulations somewhere else.”

+ +

That somewhere else became the capacity offered by the OSG. Each job submitted by CLAS12 researchers contains about 10,000 different monte-carlo +simulations and runs for roughly 4-6 hours on a single core. Once submitted to an OSG Access Point, CLAS12 jobs either run on opportunistic or dedicated +resources. Opportunistic resources, or resources contributed to the common good of all open science via the OSPool, have provided the CLAS12 experiment +with roughly 33 million core hours in the past year. On the other hand, dedicated resources –– those exclusively reserved for the CLAS12 experiment –– +supply the Collaboration with about 9 million core hours annually. These dedicated resources have undoubtedly played a role in expanding computing +capacity, but they also have proven instrumental in uniting computing resources of the CLAS Collaboration.

+ +

Uniting geographically-distributed computing resources

+ +

Beyond expanding the computing resources available to the CLAS12 experiment, OSG services have also played a role in uniting the CLAS Collaboration’s +existing computing resources scattered around the globe. Hundreds of collaborators belonging to many different institutions in a collection of countries +translates to more total computing resources at the Collaboration’s disposal. However, accessing this swath of distributed resources, installing the +necessary software, and ensuring everything runs smoothly proved to be a logistical headache that worsened as the CLAS Collaboration’s software evolved +and became more sophisticated.

+ +
+ Raffaella De Vita +

Raffaella De Vita, INFN Staff Scientist and Software Coordinator of the CLAS Collaboration.
+
+ +

Thankfully, OSG’s services could serve as a unified pool that would unite the CLAS Collaboration’s computing resources and bypass the logistical +bottlenecks. Raffaella De Vita, Software Coordinator and former Chair of the CLAS Collaboration, comments on the value of this approach: “The idea of +using OSG services to basically collect resources that our institutions could provide and make them in a unified pool that could be used more efficiently, +became very appealing to us.”

+ +

Today, 6 CLAS Collaborators with their own computing centers have joined the OSPool to provide dedicated resources to the experiment in a more efficient +manner. These institutions include Massachusetts Institute of Technology, Glasgow University, Grille au service de la Recherche en Ile de France (GRIF), +Lamar University, Compute Canada, and Istituto Nazionale di Fisica Nucleare (INFN). De Vita, a Staff Scientist at INFN, was personally involved in +coordinating the addition of INFN’s computing resources to the OSPool. She considers the process to be quite successful from her perspective: “People at +OSG took care of creating the connection and working with our computing center staff, and I basically just had to send some emails.” Zooming out on +impacts to the CLAS Collaboration more broadly, De Vita adds, “it’s been an excellent way to get members of the collaboration to contribute not only with +manpower, but also with computing resources.”

+ +

Enhancing science and improving workflows

+ +

Finally, collaboration among OSG and Jefferson Lab staff has resulted in improved workflows, streamlined submissions, and enhanced science. The HTCondor Software Suite +(HTCSS), which was developed at UW-Madison and is used to automate and manage workloads, coordinates the submission of CLAS12 jobs. Containers, which +function naturally on the OSPool, are used to create custom software environments for CLAS12 jobs.

+ +
+ Maurizio Ungaro +

Maurizio Ungaro, Jefferson Lab Staff Scientist.
+
+ +

When asked about workflows and job submissions, Maurizio Ungaro, a Jefferson Lab Staff Scientist who helps +coordinate CLAS12’s monte-carlo simulations, expresses: “This is actually where OSG services are really useful. Containers allow us to encapsulate the +software that we run, and HTCondor coordinates the submission of our jobs. Because of this, we’re able to solve two problems: one being CPU usage, and +the other being simulation organization.”

+ +

Before they began using OSG Access Points, CLAS Collaborators used to write their own submission scripts, a challenging task that involved many moving +parts and was prone to errors. Now, through coordination with OSG staff, Ungaro and his team have been able to package the array of tools in a user-friendly +web portal. Describing the impacts of this new interface, Ungaro explains: “Now, collaborators are able to submit jobs using the web portal, even from +their phone! They can choose from several experiment configuration options, click the submit button, and within a few hours the results will be here at +Jefferson Lab on their user disk space.” In essence, this web portal streamlines the process of job submission, all so that CLAS Collaborators can grow +and improve their physics.

+ +

A legacy of multi-scale collaboration

+ +

The partnership between Jefferson Lab and the OSG Consortium is a story of many dimensions. Projects of this scale are rarely a seamless production system +in which all components are automated. They require hard work and close coordination, at both the organizational and individual levels.

+ +

On the individual scale, consistent, day-to-day interactions accumulate to instill a lasting impact. OSG staff participate in Jefferson Lab’s weekly +meetings, engage in one-on-one calls, and organize meetings to resolve issues and support the CLAS12 experiment. Reflecting on the culmination of these +interactions, Ungaro would characterize his experience as “nothing short of incredible.” He adds: “I can see not just their technical expertise, but also +how they’re really willing to help, happy to contribute, and grateful to help our science.”

+ +
+ Pascal Paschos +

Pascal Paschos, OSG Collaborations Facilitator.
+
+ +

Pascal Paschos, the OSG Area Coordinator for Collaboration support who works closely with the CLAS12 Collaboration, +sees the experience as an opportunity for growth: “OSG doesn’t merely provide a service to these individual labs; it’s also an opportunity for us to grow +as an organization by identifying what we have done well in our partnership with Jefferson Lab to enable such a prolific production from one of their +experiments.”

+ +

Ultimately, the CLAS experiment as it exists today is a product of cross-coordination between Collaboration members, executive teams, and technical staff +on both sides of the partnership, all working together to make something happen. As Paschos phrases it: “At the end of the day, you’re looking at +partnerships –– not between institutional entities –– but between people.”

+ +

+ +

Learn more about Jefferson Lab and the OSG Consortium, and browse all publications from the CLAS Collaboration.

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/DoIt-Article-Summary.html b/preview-fall2024-info/DoIt-Article-Summary.html new file mode 100644 index 000000000..c2352ebf9 --- /dev/null +++ b/preview-fall2024-info/DoIt-Article-Summary.html @@ -0,0 +1,361 @@ + + + + + + +Solving for the future: Investment, new coalition levels up research computing infrastructure at UW–Madison + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Solving for the future: Investment, new coalition levels up research computing infrastructure at UW–Madison +

+

Original article posted by Corissa Runde on September 21, 2022, on UW-Madison’s Department of Information Technology website.

+ +
+ Image from the original article posted by the UW–⁠Madison Information Technology department. +
Image from the original article posted by the UW–⁠Madison Information Technology department.
+
+ +

UW-Madison’s research computing hardware recently underwent a substantial hardware refresh, adding 207 new servers representing over 40,000 “batch slots” of computing capacity. This refresh and plan of an annual commitment at the campus level to sustain it will allow UW researchers to push the limits of their research on an all-new sustained shared infrastructure. The funding for this was made possible by a $4.3 million investment from the Wisconsin Alumni Research Foundation (WARF), which will remove some of the worries for individual PIs of having to stand up their own facilities. Now, researchers will have a sustainable computational infrastructure to harness more computing capacity and produce computationally heavy research results more efficiently.

+ +

The research computing investments, equipment upgrades, and services to support researchers were made possible by the growing collaboration between:

+ + +

+ +

Read more here.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Dockerfile b/preview-fall2024-info/Dockerfile new file mode 100644 index 000000000..e6dcf9897 --- /dev/null +++ b/preview-fall2024-info/Dockerfile @@ -0,0 +1,12 @@ +FROM ruby:3 + +# Install dependencies +RUN apt-get update -y && apt-get upgrade -y + +# Install Gems +COPY Gemfile /tmp/ +RUN bundle install --gemfile=/tmp/Gemfile --jobs 20 + +WORKDIR /app + +ENTRYPOINT ["bundle", "exec"] \ No newline at end of file diff --git a/preview-fall2024-info/EHT.html b/preview-fall2024-info/EHT.html new file mode 100644 index 000000000..48d9cf0ab --- /dev/null +++ b/preview-fall2024-info/EHT.html @@ -0,0 +1,407 @@ + + + + + + +High-throughput computing as an enabler of black hole science + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ High-throughput computing as an enabler of black hole science +

+
+ Simulated image of Sagittarius A* black hole. Image library credit: EHT Theory Working Group, CK Chan. +
Image library credit: EHT Theory Working Group, CK Chan.
+
+ +

On June 25, 2021, Arizona astrophysicist Feryal Ozel posted an item on Twitter that must have fired up scientific imaginations. She noted that the Open Science Pool (OSPool) just set a single-day record of capacity delivered — churning through more than 1.1 million core hours. Her team’s project was leading the surge.

+ +

“Can you tell something is cooking?” she asked cheekily.

+ +

Almost a year later, the secret is out. The Event Horizon Telescope (EHT) Project, a collaboration of more than 300 astronomers around the world, announced on May 12 it had produced an image of a supermassive black hole at the center of the Milky Way, only the second image of its kind in history.

+ +

EHT made that initial history in 2019 when it shared a dramatic image of a black hole at the center of the M87 galaxy, 55 million light-years from Earth, thereby taking black holes from a theoretical concept to an observable phenomenon.

+ +
+
+
This video depicts all of the black hole simulations that were conducted using the Open Science Pool platform. A small number of these simulations are selected as “best bet models” that help validate the observed telescope data gathered by the Event Horizon project. Visualization credit: Ben Prather, University of Illinois at Urbana-Champaign. Image library credit: EHT Theory Working Group, CK Chan.
+
+ +

For this newest image, EHT harnessed the power of the OSPool that is operated by the OSG Consortium to help with the computational challenge behind this work. This required the execution of more than 5 million computational tasks that consumed more than 20 million core hours. Most of the computations took place over a 3-month period in 2021.

+ +
+ Miron Livny +

Miron Livny
+
+ +

The OSG fabric of services has become the computational backbone for science pursuits of all sizes – from single investigators to international collaborations like EHT. Based on the high-throughput computing (HTC) principles pioneered by UW-Madison computer scientist and Morgridge Institute for Research investigator Miron Livny, the OSG services address the need of research projects to manage workloads that consist of ever-growing ensembles of computational tasks. Researchers can place these workloads at OSG Access Points and harness the capacity of the OSPool that is provided by contributions of more than 50 institutions across the country.

+ +

Over the decades, large international collaborations have been leveraging the OSG services to chase cosmic neutrinos at the South Pole, identify gravitational waves generated billions of miles away in space, and discover the last puzzle piece of particle physics, the Higgs boson.

+ +

Chi-Kwan “CK” Chan, a University of Arizona astronomer who coordinates the EHT simulation work, says the project uses data from 8 telescopes around the world. He says that since getting plugged into the OSG services in 2020, it has become a “critical resource” in producing the millions of simulations that help validate physical properties not directly “seen” by these telescopes — like temperature, density and plasma parameters.

+ +

“And once we pull together these many computed images across many parameters, we’re able to compare our simulations with our observations and develop a truer picture of the actual physics of a black hole,” Chan says.

+ +

“Simulation is especially important in astronomy, because our astrophysical system is so complicated,” he adds. “Using the OSG services allows us to discard hundreds of thousands of parameters and find the configurations that work the best.”

+ +
+

“It improved our science an order of magnitude.” +– CK Chan

+
+ +

Chan adds that the OSG consortium also provides the storage the EHT simulation work needs, which allows data to exist in one place and makes it easier to manage. The bottom line is that OSG greatly improves the effectiveness of the EHT simulation work. Chan estimates that the partnership enabled the EHT scientists to accomplish in three months what might take 3 years with conventional methods.

+ +

“It improved our science an order of magnitude,” Chan adds. “There are so many more parameters of space that we can explore.”

+ +

The EHT collaboration was triggered through contacts at the National Science Foundation (NSF) Office for Advanced Cyberinfrastructure (OAC). “Following our commitment to leverage NSF investments in cyberinfrastructure, we reached out to CK and it turned out to be a perfect match,” Livny says.

+ +

NSF has been a vital supporter of the OSG Consortium since its origin in 2005, and this is a perfect example of a collaboration between two NSF funded activities, Livny says. In 2020, NSF launched the $22.5 million Partnership to Advance Throughput Computing (PATh), with a significant presence at the UW-Madison Computer Sciences Department and the Morgridge Institute for Research. That partnership is helping to expand the adoption of HTC and advance the HTC technologies that power the OSG Services.

+ +

Livny, who serves as principal investigator of PATh, says the EHT computational workload is the equivalent of having several million individual tasks on your to-do list. The HTC principles that underpin the OSG services provide effective means to manage such a long, and sometimes interdependent, to-do list. “Otherwise, it’s like trying to fill up a swimming pool one teaspoon at a time,” he says.

+ +

Chan and his team of researchers at Arizona, Illinois, and Harvard worked closely with the OSG team of research facilitators to optimize the impact of OSG services on their high throughput workloads. Led by UW-Madison facilitator Lauren Michael, the team provided the EHT group with the necessary storage, advised their workload automation policies, and helped them with moving results back to the Arizona campus.

+ +

Livny emphasizes that the OSG services are founded on the principles of sharing and mutual trust. Any U.S. researcher can bring their computational workload to an OSG Access Point and any U.S. institution can contribute computing capacity to the OSPool.

+ +

“I like to say that you don’t have to be a super person to do super high-throughput computing,” says Livny.

+ +

+ +

This article is courtesy of the Morgridge Institute for Research. Find the original article on the Morgridge Institute’s news page.

+ +

To read more about this discovery you can find other articles covering this event below:

+ + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/EOL-OSG.html b/preview-fall2024-info/EOL-OSG.html new file mode 100644 index 000000000..0a2a1e616 --- /dev/null +++ b/preview-fall2024-info/EOL-OSG.html @@ -0,0 +1,359 @@ + + + + + + +Retirements and New Beginnings: The Transition to Tokens + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Retirements and New Beginnings: The Transition to Tokens +

+

May 1, 2022 officially marked the retirement of OSG 3.5, GridFTP, and GSI dependencies. OSG 3.6, up and running since February of 2021, is prepared for usage and took its place, relying on WebDAV and bearer tokens.

+ +

In December of 2019, OSG announced its plan to transition towards bearer tokens and WebDAV-based file transfer, which would ultimately culminate in the retirement of OSG 3.5. Nearly two and a half years later, after significant development and work with collaborators on the transition, OSG marked the end of support for OSG 3.5.

+ +

OSG celebrated the successful and long-planned OSG 3.5 retirement and transition to OSG 3.6, the first version of the OSG Software Stack without any Globus dependencies. Instead, it relies on WebDAV (an extension to HTTP/S allowing for distributed authoring and versioning of files) and bearer tokens.

+ +

Jeff Dost, OSG Coordinator of Operations, reports that the transition “was a big success!” Ultimately, OSG made the May 1st deadline without having to backtrack and put out new fires. Dost notes, however, that “the transition was one of the most difficult ones I can remember in the ten plus years of working with OSG, due to all the coordination needed.”

+ +

Looking back, for nearly fifteen years, communications in OSG were secured with X.509 certificates and proxies via Globus Security Infrastructure (GSI) as an Authentication and Authorization Infrastructure (AAI).

+ +

Then, in June of 2017, Globus announced the end of support for its open-source Toolkit that the OSG depended on. In October, they established the Grid Community Forum (GCF) to continue supporting the Toolkit to ensure that research could continue uninterrupted.

+ +

While the OSG continued contributing to the GCT, the long-term goal was to transition the research community from these approaches to token-based pilot job authentication instead of X.509 proxy authentication.

+ +

A more detailed document of the OSG-LHC GridFTP and GSI migration plans can be found in this document. Please visit the GridFTP and GSI Migration FAQ page if you have any questions. For more information and news about OSG 3.6, please visit the OSG 3.6 News release documentation page.

+ +

+ +

If you have any questions about the retirement of OSG 3.5 or the implementation of OSG 3.6, please contact help@osg-htc.org.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/European-HTCondor-Week.html b/preview-fall2024-info/European-HTCondor-Week.html new file mode 100644 index 000000000..f3ce1db86 --- /dev/null +++ b/preview-fall2024-info/European-HTCondor-Week.html @@ -0,0 +1,361 @@ + + + + + + +Save The Date for the European HTCondor Workshop, September 24-27 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Save The Date for the European HTCondor Workshop, September 24-27 +

+

This year’s European HTCondor Workshop will be held from September 24 to 27th hosted by NIKHEF-Amsterdam, the Dutch +National Institute for Subatomic Physics, in the beautiful Dutch capital city of Amsterdam.

+ +

The workshop will be an excellent occasion for learning from the sources (the developers!) about HTCondor, exchanging +with your colleagues about experiences and plans and providing your feedback to the experts. The HTCondor Compute Entry +point (CE) will be covered as well. Participation is open to all organizations (including companies) and persons interested +in HTCondor (and by no means restricted to particle physics and/or academia!) If you know potentially interested persons, +don’t hesitate to make them aware of this opportunity.

+ +

The workshop will cover both using and administering HTCondor; topics will be chosen to best match participants’ interests. +We would very much like to know about your use of HTCondor, in your project, your experience and your plans. You are warmly +encouraged to propose a short presentation.

+ +

There will also time and space for short, maybe spontaneous interactive participation (“show us your toolbox sessions”) +which proved to be very popular in previous meetings.

+ +

Registration and abstract submission will be opened in due course.

+ +

To ease travel, the workshop will begin Tuesday morning and end around Friday lunchtime.

+ +

View the event website for more details.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Fulvio.html b/preview-fall2024-info/Fulvio.html new file mode 100644 index 000000000..a3f5c40e9 --- /dev/null +++ b/preview-fall2024-info/Fulvio.html @@ -0,0 +1,402 @@ + + + + + + +Using high throughput computing to investigate the role of neural oscillations in visual working memory + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Using high throughput computing to investigate the role of neural oscillations in visual working memory +

+

Jacqueline M. Fulvio, lab manager and research scientist for the Postle Lab at the University of Wisconsin-Madison, explains how she used the HTCondor Software Suite to investigate neural oscillations in visual working memory.

+ +
+ Computer rendering of DNA. +
Photo by geralt on Pixabay.
+
+ +
+ Jacqueline M. Fulvio +
Jacqueline M. Fulvio, lab manager and research scientist for the Postle Lab at the University of Wisconsin-Madison
+
+ +

If you could use a method of analysis that results in better insights into your research, you’d want to use that option. The catch? It can take months to analyze one set of data.

+ +

Jacqueline M. Fulvio, a research scientist for the Postle Lab at the University of Wisconsin-Madison, explained at HTCondor Week 2022 how she overcame this problem using high throughput computing (HTC) in her analysis of neural oscillations’ role in visual working memory.

+ +

The Postle Lab analyzed the patterns of brain waves recorded from participants as they performed working memory tasks using an HTC workflow. Visual working memory is the brain process that temporarily allows us to maintain and manipulate visual information to solve a task. First, participants were given a sample with two images to memorize for two seconds. Then the image disappeared, and, following a five-second delay, participants were given a cue that indicated which item in memory would later be tested. The experimenter then delivered a single pulse of transcranial magnetic stimulation (TMS) to the participants’ scalp on half of the trials. TMS alters brain function, so Fulvio and her collaborators looked for corresponding impacts on participants’ brain waves recorded in an electroencephalogram (EEG). Finally, the participants indicated whether the image shown on the screen matched the original sample item.

+ +
+ Photo from Fulvio's presentation during HTCondor Week 2022 +
Photo from Fulvio's presentation during HTCondor Week 2022
+
+ +

After collecting and processing the data from the EEG, they can analyze the neural oscillations (or brain waves) to understand how they change throughout the task. Previous results have shown that the frequency of neural oscillations is associated with working memory processes.

+ +

“In our current work, we wanted to more deeply investigate the role of these neural oscillations in working memory,” Fulvio states, “we chose to leverage an analysis called spatially distributed phase coupling extraction with a frequency-specific phases model (SPACE-FSP).” This analysis is a multi-way decomposition of the EEG data.

+ +

The number of decomposable networks can’t be determined analytically, so the group estimates it using decomposition. Finding the optimal decomposition is an iterative process that starts with a statistical criterion and a set number of oscillating networks, which incrementally increase until they can no longer achieve the criterion. As a result, a single decomposition can take up to several months to complete.

+ +

Although this method provides better insight into what Fulvio and her group want to analyze, “this remains a largely unused approach in the field.” Fulvio speculates that other scientists in the field often don’t use this kind of analysis because it’s very computationally demanding. “This is where high throughput [computing] came in for us.”

+ +

Fulvio and her team planned to analyze at least 186 data sets, which, at the time, “seemed insurmountable.” The HTC capabilities of HTCondor offered them a solution to this problem by running the decompositions in parallel using the capacity of a campus wide shared facility. They also had the opportunity to utilize the Matlab parallel pool compatibility, which helped scale out the processing.

+ +

The group started following the HTC paradigm because their lab had already used services provided by the UW-Madison Center for High Throughput Computing (CHTC) for some time. Fulvio’s supervisor, Dr. Bradley Postle, suggested setting up a meeting and seeing if what they needed could be achieved using the capacity offered by CHTC.

+ +

Fulvio has an extensive coding history, but when she did run into compiling problems, she found the office hours offered by the CHTC Research Computing Facilitators extremely helpful, “I got useful tips from the staff in figuring out what was going wrong and what I needed to fix!”

+ +

The group ran 42 jobs, each job taking anywhere from two days to two weeks to run. The initial results of the analyses were promising, but the two data analysis pipelines the group tried were insufficient to address some of the critical questions.

+ +

After re-running the analyses using new data Fulvio collected, she overcame some limitations from the prior dataset to address the original questions. For this dataset, the group ran almost twice the amount of jobs – 72 – with each one again taking anywhere from two days to two weeks to run.

+ +

The group updated the analysis once more to increase the data size from 500 milliseconds to 1-second chunks. They also combined the data into a single pipeline instead of having it in different chunks of data for two separate analyses.

+ +

The goal of this update was to increase the amount of data they were sending, which in turn increased the amount of time it took to do these decompositions. More data resulted in a more robust and interpretable statistical result.

+ +

“All versions of the analyses were ultimately successful,” Fulvio comments. “We’ve benefited significantly from this process.” Their final analysis obtained 1,690 components – a “fantastic number” for their data analyses.

+ +

“We had such good support along the way so we could get this going,” Fulvio notes. In addition, what could have been years of computing on their lab machines, was condensed and boiled down into merely months for each analysis iteration.

+ +

The group also conducted one more analysis, as “[this] experience helped us think about a special control analysis,” Fulvio remarks. The group carried out hundreds of jobs within a day using this separate analysis, giving them rapid confirmation through the control analysis results.

+ +

Fulvio reflects, “from our research group’s broad perspective, OSPool capacity accessible via the CHTC have significantly expanded our computational capabilities.” Although computationally demanding, these resources helped the group apply this better-suited analysis method to address their original questions.

+ +

From a more personal perspective, Fulvio notes that learning how to take advantage of these OSPool capacity has improved her skills, including coding. These resources allowed her to work with additional languages and sharpened her ability to optimize code.

+ +

Fulvio concludes that “this has allowed us to help advance our field’s understanding, address key questions in the grant funding the research, and it provides the opportunity to reconsider other established findings and fill gaps in understanding of those studies.”

+ +

+ +

Watch a video recording of Jacqueline M. Fulvio’s talk at HTCondor Week 2022, and browse her slides.

+ + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/GLUE-lab.html b/preview-fall2024-info/GLUE-lab.html new file mode 100644 index 000000000..d0fcdc766 --- /dev/null +++ b/preview-fall2024-info/GLUE-lab.html @@ -0,0 +1,407 @@ + + + + + + +How the GLUE Lab is bringing the potential of HTC to track the movement of cattle and land use change + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ How the GLUE Lab is bringing the potential of HTC to track the movement of cattle and land use change +

+

Researching land use change in the cattle sector is just one of several large projects where the GLUE Lab is working to apply HTC.

+ +
+ Cattle grazing grass on the Cerrado in rural Mato Grosso, Brazil. +
Cattle grazing grass on the Cerrado in rural Mato Grosso, Brazil. +
+
+ +

It was during a Data Science Research Bazaar presentation led by OSG Research Facilitation +Lead Christina Koch in early 2023 when Matthew Christie, the Technical Lead of the Global Land Use and Environment Lab (GLUE) +based in Madison, Wisconsin, says the GLUE Lab became more familiar with the Center for High Throughput Computing (CHTC). “That planted the seed for what +the center [CHTC] could offer us,” Christie says.

+ +
+ GLUE Lab technical lead Matthew Christie. +
GLUE Lab technical
lead Matthew Christie. +
+
+ +

The GLUE Lab studies how land across the world is being used for agriculture and the systems responsible for land use change. Christie — who researches land use in Brazil with a +focus on how the Amazon and Cerrado biomes are changing as natural vegetation recedes — takes data describing the cattle supply chain in Brazil and integrates it into a single +database the GLUE Lab can use for research. With this data, the lab also aims to inform policy decisions by the Brazilian government and international companies.

+ +

In the Amazon, Christie says, one of the main systems causing land use change is in the cattle sector, or the production of cattle. “One of the motivating facts of our research +is that 80% of forest cleared in the Amazon is cleared in order to raise cattle. And so we’re interested in understanding the cattle supply chain, how it operates, and what it +looks like.” The lab gets its data from the Rural Environmental Registry (CAR), which is a public property boundary registry data from Brazil, and the Guide to Animal Transport +(GTA), which records animal movement and sales in Brazil.

+ +

The possibilities of utilizing high throughput computing (HTC) for the lab’s research intrigued Christie, who had some awareness of HTC from the research bazaar and had even started refactoring some of the lab’s +data pipeline before attending, but he wanted to learn more besides what he gained from watching introductory tutorials. Christie was accepted and attended the OSG School +in the summer of 2023. He and other lab members believed their work could benefit from the school training with HTCondor, the workload management application developed by the CHTC for HTC, and the associated big data sets +with a large number of jobs.

+ +

Upon realizing the lab’s work could greatly benefit from the OSG School, Christie used a “test case” project that resembled a standard research project to model a task with many +independent trials, finding how — for the first time — HTC could prove itself resourceful for GLUE Lab research. The specific project Christie worked +on during the School using HTC was to compute simulated journeys of cows through properties in Brazil’s cattle supply chain. By the end of the week-long School, Christie says +using HTC scaled up the modeling project by a factor of 10. In this sense, HTC is the “grease that makes our research run more smoothly.”

+ +

Since attending the School, witnessing the test case’s success with HTC, and discovering ways its other research projects could benefit, the GLUE Lab has begun shifting to applying +HTC. However, this process requires pipeline changes lab members are currently working through. “We have been in the process of working through some of our big projects that we +think really could benefit from these resources, but that in itself has a cost. Currently, we’re still in the process of writing or refactoring our pipelines to use HTC,” Christie +elaborates.

+ +

For a current project, Christie mentions he and other GLUE Lab members are looking at how to adapt their code to HTC without having to rewrite all of it. With the parallelism that +HTC offers compared to the single computing environment the lab used before to run its data pipeline, each job now has its own environment. But it’s complex “leveraging the +parallelism in our database build pipeline. Working on that is an exercise, but with handling data, there are many dependencies, and you have to figure out how to model them.” +Christie says lab members are working on adjusting the workflow to ensure each job has the data it needs before it can run. While this can sometimes be straightforward, +“sometimes a step in the pipeline has special inputs that are unique to it. With many steps in the pipeline, properly tracking and preparing all this data has been the main source +of work to get the pipeline to run fully using HTC.”

+ +

For now, Christie says cutting down the two-day run time of their database build pipeline to just a matter of hours with HTC “would be a wonderful improvement that would accelerate +deployment and testing of this database. It would let us introduce new features and catch bugs faster.”

+ +
+ Smoke rising over recently burned pastures in Alto Boa Vista, Brazil. +
Smoke rising over recently burned pastures in Alto Boa Vista, Brazil. +
+
+ +

Christie recognizes the strength of the CHTC comes from not only its limitless computation power but also the humans who are running it behind the screen and that it’s free for +researchers at UW–Madison, distinguishing it from other platforms and drastically lowering the entry barrier for researchers who want to scale up their research projects — +“Instead of waiting months or years to receive funding for cloud resources, they can request an account and get +started in a matter of weeks,” Christie says.

+ +

Christie values the unique opportunity to attend office hours and meet with facilitators, which makes his experience special. “I would definitely recommend that people look at this +invaluable resource that we have on campus. Whether your work is with high throughput or high performance computing, there are offerings for both that researchers should consider,” +Christie says.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Garcia.html b/preview-fall2024-info/Garcia.html new file mode 100644 index 000000000..eb5eab1f0 --- /dev/null +++ b/preview-fall2024-info/Garcia.html @@ -0,0 +1,386 @@ + + + + + + +Using HTC and HPC Applications to Track the Dispersal of Spruce Budworm Moths + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Using HTC and HPC Applications to Track the Dispersal of Spruce Budworm Moths +

+

Matthew Garcia, a Postdoctoral Research Associate in the Department of Forest & Wildlife Ecology at the University of Wisconsin–Madison, discusses how he used the HTCondor Software Suite to combine HTC and HPC capacity to perform simulations that modeled the dispersal of budworm moths.

+ +
+ Photo from Matthew Garcia's presentation at HTCondor Week 2022. +
Photo from Matthew Garcia's presentation at HTCondor Week 2022.
+
+ +

Spruce budworms are small, caterpillar-like insects that enjoy munching on balsam fir and spruce trees. What the budworms lack in size, they make up for in total forest devastation; within five to six years, the budworm kills the tree entirely. An example of this can be seen in the image above from eastern Canada, with the brown trees being “pretty much dead.”

+ +

Matthew Garcia, a Postdoctoral Research Associate in the Department of Forest & Wildlife Ecology at the University of Wisconsin–Madison, examined the flight behavior of these budworm moths. He aims to determine where the budworms disperse to stop them from causing these mass tree deaths. His research combines high throughput computing (HTC) and high-performance computing (HPC) applications.

+ +
+ Biological process of budworm moths +
Biological process of budworm moths
+
+ +

Garcia’s project takes a closer look at the biological process of the species. He’s looking at the dispersal of adult spruce budworm moths in the summertime, as this is a process least understood by researchers in the field.

+ +

Working with collaborators at the U.S. and Canadian Forest Services, Garcia’s study of budworm dispersal tracks the budworm’s movement from where they grew up defoliating the fir and spruce trees to where they mate and drop their eggs. This biological process is driven mainly by weather and lasts about a year, though the adult phase is the period Garcia has focused on for his work thus far.

+ +

In January 2022, Garcia published “Modeling weather-driven long-distance dispersal of spruce budworm moths. Part 1: Model Description.” This individual-based model of moth behavior was developed in Python and is heavily dependent on weather model outputs. Garcia is currently working on “Part 2: Parameter calibration and feedback” that will supplement the early model results and compare them with radar observations of moth flight events.

+ +

Garcia uses two modeling workflows to obtain the results of his study. He uses a combination of HTC and HPC for the weather modeling workflow, with the main weather model running on the HPC system and numerous pre-and post-processing tasks running on HTC. For the second workflow, he developed a Markov chain Monte Carlo (MCMC) modeling process for the flight simulation currently running at the CHTC.

+ +

For the weather modeling workflow, Garcia runs the pre-processing using HTC, which takes in one month of historical weather data at a time and takes just about a day to complete. The pre-processing provides the initial and boundary conditions to the weather simulations. He then runs the Weather Research & Forecasting (WRF) model as an HPC application, feeding the output from the pre-processing as input to the WRF model, which takes a little over six hours to generate one day of high-resolution output. Finally, the WRF model output returns to HTC for post-processing, reducing the data to just the variables needed for the budworm flight model.

+ +

For the flight modeling workflow, Garcia runs a pre-processing step using HTC to determine the pool of available moths for the flight simulations; each simulation randomly selects a thousand moths out of the pool. He then uses the post-processed temperature and wind fields from the WRF model output to tell the moths when to fly and where to go in the flight model. Garcia runs ensembles of flight simulations to obtain a good sample of the moth population available on a given night. These simulations then run sequentially over the nights in the seasons when moths are emerging, flying, and laying eggs just about everywhere they land.

+ +
+ Garcia's workflow +
Garcia's workflow
+
+ +

“HTCondor developers have been immensely helpful in making sure that I can fit the HPC component into the middle of this larger DAGMan process,” Garcia notes. He uses DAGMan workflow scripts from HTCondor to organize his workflows with mixed submission protocols.

+ +

Garcia combines all the collected information and calculates the moths’ survival likelihood. He has demonstrated that adult dispersal is almost entirely weather-driven and occurs almost nightly during summer and that males and females have different flight capabilities.

+ +

“I love this because I can easily take that pre-processing part of the DAG and make it its node to build in more biological processes for the daytime part of the model,” Garica remarks. “I can then relatively easily expand the scope of the whole DAG to cover more of the seasonal or annual biological cycle model.”

+ +

Garcia concludes, “everything’s going great – there are no pain points, everything is looking good, and my colleagues and I are very excited about the modeling results we’re seeing.”

+ +

+ +

Watch a video recording of Matthew Garcia’s talk at HTCondor Week 2022, and browse his slides.

+ + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Gillett.html b/preview-fall2024-info/Gillett.html new file mode 100644 index 000000000..ee82fa009 --- /dev/null +++ b/preview-fall2024-info/Gillett.html @@ -0,0 +1,376 @@ + + + + + + +UW Statistics Course using HTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ UW Statistics Course using HTC +

+

For the first time, UW Statistics undergraduates could participate in a course teaching high throughput computing (HTC). John Gillett, lecturer of Statistics at the University of Wisconsin-Madison, designed and taught the course with the support of the Center for High Throughput Computing (CHTC).

+ +
+ Photo by Chris Liverani from Unsplash +
Photo by Chris Liverani from Unsplash
+
+ +
+ John Gillett +
John Gillett
+
+ +

This past spring HTC was introduced to a new realm – the inside of an undergraduate statistics course. John Gillett, a lecturer in the Statistics department at the University of Wisconsin-Madison, unveiled a new special topics course, Statistics 479, to undergraduate students in the spring of 2022. The course introduced students with little programming experience to a robust and easy-to-learn approach that they could use to tackle significant computational problems. “The basics of distributed computing are easy to learn and very powerful,” Gillett explained.“[That’s why] it fit with the CHTC – I knew they could give the students and me the computing capabilities and support.”

+ +

This class was created as an undergraduate counterpart to the graduate-level course, Statistics 605, which Gillett has taught since the Spring of 2017. The course includes learning basic distributed computing to analyze data sets too large for a laptop.

+ +

Gillett reached out to research computing facilitator Lauren Michael in 2016. He hoped to learn how he could teach his students easy parallel computing. He settled on HTC, as it was easiest for helping students do large computations. “This was an easy path for me,” the teacher remarked, “and everyone at the CHTC made it easy.”

+ +
+ Christina Koch +
Christina Koch
+
+ +

Research Facilitator Christina Koch guest lectured in 2017 when the graduate class was first offered, and every semester since. She talks to the students about the CHTC and high throughput computing and has them run a few jobs. Koch notes that this partnership between the CHTC and Gillett’s class has been “a win-win; we get to share about our system and how people run things, and he gets to have this interesting, hands-on assignment for his class.”

+ +

Gillett created an assignment that involves using HTC on a real data set with the help of Christy Tremonti, a UW-Madison Astronomy professor. Tremonti had a research problem that required searching through many astronomical spectra (of photos of galaxies) for a particular type corresponding to a gravitationally lensed Lyman-break galaxy. “In the beginning, she gave a lot of good, critical feedback for the research element of this,” Gillett explained. She guided the students through large-scale computations during the first few semesters. As he reflects on this partnership, Gillett beams, “this was exciting too – we were doing unknown statistics on a real research problem. We didn’t know what the right answer was!”

+ +

Gillett remarked that his students enjoy working with the CHTC; “[the students] now understand how to work a parallel computing environment,” he noted. “They get excited about the power they now have to extract solutions from big piles of data.” This course offers students simple, powerful tools to do just that.

+ +

Gillett appreciated the help and support he received from the CHTC in this course development “I needed a little more knowledge and their willingness to help support the students and me.” The technologies and services that the CHTC develops for HTC gave Gillett an easy and accessible way to teach his students programming and computational thinking skills that they’ll be able to carry with them.

+ +

“Students go from being weak programmers to not being intimidated by big data sets and computations that they wouldn’t have been able to consider otherwise. I’m proud about that.” These individuals come out of these classes with a different kind of confidence about data problems – and that is priceless.

+ +

+ +

John Gillett is currently looking for new researchers with whom his students could collaborate. If you are a researcher who can provide a reasonably large and accessible dataset, a question, and guidance, please reach out to jgillett@wisc.edu.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Google-HTCondor.html b/preview-fall2024-info/Google-HTCondor.html new file mode 100644 index 000000000..dec668333 --- /dev/null +++ b/preview-fall2024-info/Google-HTCondor.html @@ -0,0 +1,368 @@ + + + + + + +Google Quantum Computing Utilizing HTCondor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Google Quantum Computing Utilizing HTCondor +

+

Google’s launch of a Quantum Virtual Machine emulates the experience and results of programming one of Google’s quantum computers, managed by an HTCondor system running in Google Cloud.

+ +
+ Quantum AI Logo. Image from Quantum AI Product Manager Catherine Vollgraff Heidweiller’s research blog post. +
Quantum AI Logo. Image from Quantum AI Product Manager Catherine Vollgraff Heidweiller’s research blog post.
+
+ +

The CEO of Google and Alphabet, Sudar Pichai, tweeted out some thrilling news:

+ +

“Excited to launch a Quantum Virtual Machine (QVM) which emulates the experience and results of programming one of our quantum computers. It will make it easier for researchers to prototype new algorithms and help students learn how to program a quantum computer.” – Tweet.

+ +

Today’s “classical” computing systems, from laptops to large supercomputers, are built using circuit behavior defined by classical physics. Quantum computer circuity, still in the early phases of development, harnesses the laws of quantum mechanics to solve computing problems in new ways. Quantum computers offer exponential speedups – over 100 million times faster for specific issues – to produce groundbreaking results. However, quantum computing will require scientists and engineers to revisit many classical algorithms and develop new ones tailored to exploit the benefits of quantum processors. Therefore, the QVM is a helpful tool for quantum algorithms research.

+ +

“The QVM is, in essence, a realistic simulation of a grid on our quantum hardware using classical computers,” Tom Downes, a consultant for High-Performance Computing (HPC) at Google Cloud, explains. Simulating a grid of qubits, the basic unit of quantum information, on a quantum processor requires many trajectory simulations of quantum noise. Downes explains, “quantum computers are noisy, so it is important to test and adjust your quantum circuits in realistic conditions so they can perform well and output the data you are looking for in your research problem. To virtualize a processor, the QVM uses the noise data and topology of Google’s real hardware.” This grid size determines whether a researcher can use their laptop or require a setup utilizing many classical computers to power the simulation. Essentially, research on the QVM is “proof of concept” research.

+ +

To enable researchers to test their algorithms on a larger grid of qubits, Google utilized the HTCondor Software Suite (HTCSS) to organize the capacity of many classical computers to run multiple simulations of a quantum circuit simultaneously. The HTCondor Software Suite enables researchers to easily harness the collective computing power of many classical computers and submit and manage large numbers of computing jobs. Today, HTCSS is used at universities, government labs, and commercial organizations worldwide, including within Google’s own Google Cloud Platform, to power QVM. Downes details, “this ability to test on a 32-qubit grid can extrapolate its performance to a non-simulatable grid more feasible.”

+ +

The new Google Quantum AI tutorial shows users how to use the Cloud HPC Toolkit, which makes it easy for new users to deploy HTCondor pools in Google Cloud. Downes describes that the tutorial “provides the basic elements of an HTCondor pool: a central manager, an access point, and a pool of execute points that scale in size to work through the job queue.”

+ +

The tutorial by Google describes how to:

+
    +
  • Use terraform to deploy an HTCondor cluster in the Google Cloud
  • +
  • Run a multi-node quantum computing simulation using HTCondor
  • +
  • Query cluster information and monitor running jobs in HTCondor
  • +
  • Use terraform to destroy the cluster
  • +
+ +

Please visit this website for more information about the Quantum Virtual Machine and how researchers can use HTCondor for multinode quantum simulations.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/HTC24.html b/preview-fall2024-info/HTC24.html new file mode 100644 index 000000000..ff605185b --- /dev/null +++ b/preview-fall2024-info/HTC24.html @@ -0,0 +1,441 @@ + + + + + + +Save the Dates for Throughput Computing 2024 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | July 8-12 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ Save the Dates for Throughput Computing 2024 +

+
+
+
+ + +
+
+ +

Don’t miss this opportunity to reconnect with colleagues and learn more about HTC.

+ +

Join us for the second annual integrated Throughput Computing event — which combines HTCondor’s former annual event “HTCondor Week” and the OSG’s “All-Hands Meeting” — from July 8-12 to be held at the University of Wisconsin-Madison’s Fluno Center. HTC24 is sponsored by the OSG Consortium, the HTCondor team and the UW-Madison Center for High Throughput Computing.

+ +

Registration will open in March. This will primarily be an in-person event, but remote participation (via Zoom) for the many plenary events will also be offered. +If you register for the in-person event at the University of Wisconsin–Madison, you can attend plenary and non-plenary sessions, mingle with colleagues, and have planned or ad hoc meetings. Evening events are also planned throughout the week.

+ +

The Agenda

+ +

All the topics typically covered by HTCondor Week and the OSG All-Hands Meeting will be included:

+ +
    +
  • Science Enabled by the OSPool and the HTCondor Software Suite (HTCSS)
  • +
  • OSG Technology
  • +
  • HTCondor Technology
  • +
  • HTCondor and OSG Tutorials
  • +
  • State of the OSG
  • +
  • Campus Services and Perspectives
  • +
+ +

Questions and Resources

+ +

For questions about attending, speaking, accommodations, and other concerns please contact us at htc@path-cc.io.

+ +

To learn about this event in more detail, view last year’s schedules for HTC23:

+ + + +
+
+
+ +

Dates

+ +

Monday, July 8 through Friday, July 12, 2024.

+ +

Who

+ +

Organizations, researchers, campuses, facilitators and administrators interested in the HTCondor Software Suite and high throughput computing or the OSG Consortium resources or services (including the OSPool, the Open Science Data Federation, the Pelican Platform, or the PATh Facility.)

+ +

Where

+ +

Fluno Center on the University of Wisconsin-Madison campus and Online via Zoom.

+ +

Registration

+ +

Registration for Throughput Computing 2024 will open in March.

+ +

Questions?

+ +

Please email htc@path-cc.io with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/HTCondorWeek-Photos.html b/preview-fall2024-info/HTCondorWeek-Photos.html new file mode 100644 index 000000000..e7456617f --- /dev/null +++ b/preview-fall2024-info/HTCondorWeek-Photos.html @@ -0,0 +1,434 @@ + + + + + + +A Long-Awaited Reunion: HTCondor Week 2022 in Photos + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ A Long-Awaited Reunion: HTCondor Week 2022 in Photos +

+
+ Collage of photos from HTCondor Week 2022 +
Images courtesy of Jeffrey Peterson and Jaime Frey
+
+ +

HTCondor Week 2022 featured over 40 exciting talks, tutorials, and research spotlights focused on the HTCondor Software Suite (HTCSS). Sixty-three attendees reunited in Madison, Wisconsin for the long-awaited in-person meeting, and 111 followed the action virtually on Zoom. Continue scrolling for a visual recap of the exciting week.

+ +
+ HTCondor Week attendees talking in the Fluno lobby +
Image courtesy of Jeffrey Peterson
+
+ +

To kick off the day, staff and attendees gather in the Fluno Lobby –– where there’s no shortage of coffee, snacks, or conversation.

+ +
+ Miron Livny looks back at his presentation slide, which welcomes attendees from different timezones. +
Image courtesy of Jeffrey Peterson
+
+ +

Miron Livny welcomes participants to HTCondor Week. In-person participants traveled from Illinois, Nebraska, and even Amsterdam. Those who tuned in virtually represented seven different countries.

+ +
+ Eric Wilcots speaking +
Image courtesy of Jeffrey Peterson
+
+ +

Eric Wilcots, Dean of the College of Letters & Science and the Mary C. Jacoby Professor of Astronomy at UW-Madison, delivered an inspiring keynote talk on the impact that high-throughput computing will bring on the future discoveries about our universe.

+ +
+ HTCondor Week Attendees gathered on their bikes, smiling for a picture +
Image courtesy of Jaime Frey
+
+ +

To wrap up the first day of HTCondor Week, staff and attendees embarked on a bike ride around Madison.

+ +
+ Justin Hiemstra presenting +
Image courtesy of Jeffrey Peterson
+
+ +

Justin Hiemstra, a Machine Learning Application Specialist for CHTC’s GPU Lab, describes the testing suite he developed to test for compatibility across ML frameworks and various GPU models in CHTC’s local HTC pool.

+ +
+ Emile listening to a presentation +
Image courtesy of Jeffrey Peterson
+
+ +

Emile Turatsinze, a systems administrator at the Morgridge Institute for Research, thoughtfully listens to a talk from Saqib Haleem about the CMS project’s transition to token-based authentication.

+ +
+ HTCondor Week attendees gathered around a large table at the terrace +
Image courtesy of Jeffrey Peterson
+
+ +

HTCondor Week staff and participants enjoy cold pitchers and tasty food on the Wisconsin Union Terrace during an evening sponsored by Google Cloud.

+ +
+ Yudhajit Pal presenting +
Image courtesy of Jeffrey Peterson
+
+ +

Yudhajit Pal, a member of the Schmidt research group in UW-Madison’s Department of Chemistry, briefly pauses while explaining how he used HTCSS-enabled machine learning to probe photoexcitation of iridium complexes.

+ +
+ Brian holding a microphone +
Image courtesy of Jeffrey Peterson
+
+ +

Brian Bockelman poses a question during the Q&A period following Sam Gelman’s presentation on using HTCSS for high-throughput molecular simulations of the protein sequence-function relationship.

+ +
+ Lauren Michael and Rafael Ferreira conversing +
Image courtesy of Jeffrey Peterson
+
+ +

Lively discussions filled the Fluno Auditorium between sessions. Pictured above are CHTC Research Computing Facilitator Lauren Michael and Ph.D. Candidate Rafael Ferreira of UW-Madison’s Department of Animal and Dairy Sciences.

+ +
+ Todd Tannenbaum, Mary Hester, Brian Bockelman, and Miron Livny standing outside smiling +
Image courtesy of Jeffrey Peterson
+
+ +

Todd Tannenbaum, Mary Hester, Brian Bockelman, and Miron Livny get some fresh air between talks.

+ +
+ Miron Livny speaking with the Audience in the foreground +
Image courtesy of Jeffrey Peterson
+
+ +

Miron Livny expresses closing remarks as the week comes to a close. Thank you to all who participated in HTCondor Week 2022. We hope to see you next year!

+ +

+ +

Watch all of the HTCondor Week 2022 video recordings and browse the presentation slides on the HTCondor website material’s page, and access all materials from past meetings on our website.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Hannah-Showcase.html b/preview-fall2024-info/Hannah-Showcase.html new file mode 100644 index 000000000..be2839765 --- /dev/null +++ b/preview-fall2024-info/Hannah-Showcase.html @@ -0,0 +1,359 @@ + + + + + + +Using HTC for a simulation study on cross-validation for model evaluation in psychological science + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Using HTC for a simulation study on cross-validation for model evaluation in psychological science +

+
+ Brain Model +
Image by Robina Weermeijer on Unsplash
+
+ +

During the OSG School Showcase, Hannah Moshontz, a postdoctoral fellow at UW-Madison’s Department of Psychology, described her experience of using high throughput computing (HTC) for the very first time, when taking on an entirely new project within the field of psychology. While Hannah’s research generally focuses on understanding goal pursuit in everyday life, she and her colleagues had noticed that there seemed to be a lack of “best practices” for evaluating the quality of results from the field’s recent integration of machine learning approaches.

+ +

Describing the motivation behind the project, Hannah explains: “We were seeing a lot of published papers in top outlets that were incorrectly understanding and interpreting cross-validated model performance estimates. These models were described as usable for making diagnoses and clinical decisions.” This project, a simulation study, aimed to understand cross-validated performance estimates in psychology, and give guidance on how future psychological science researchers should use cross validation in their data.

+ +

While a typical machine learning study entails running tens of thousands models –– Hannah’s study required 144,000 times this number in order to evaluate results from numerous studies. With the total estimated compute time for the project being over one million hours, Hannah understood from the beginning that “high throughput computing was going to be essential.”

+ +

The Center for High Throughput Computing at UW-Madison worked with Hannah to help get her team’s simulations distributed on the Open Science Pool. Hannah used the programming software R to simulate data and train, select, and evaluate machine learning models. The output from each simulation batch came in the form of a zipped file that included a summary of the best model performance along with information about the model. Throughout the process, Hannah and her team tracked jobs in a spreadsheet to stay organized.

+ +

Reflecting on the impact of HTC on the study as a whole, she reasons, “without HTC, we couldn’t have conducted this study in my lifetime.” While this project was Hannah’s first taste of HTC, today she’s integrated it into many different facets of her work.

+ +

+ +

This article is part of a series of articles from the 2021 OSG Virtual School Showcase. OSG School is an annual education event for researchers who want to learn how to use distributed high throughput computing methods and tools. The Showcase, which features researchers sharing how HTC has impacted their work, is a highlight of the school each year.

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Hiemstra.html b/preview-fall2024-info/Hiemstra.html new file mode 100644 index 000000000..cf3c4d960 --- /dev/null +++ b/preview-fall2024-info/Hiemstra.html @@ -0,0 +1,402 @@ + + + + + + +Testing GPU/ML Framework Compatibility + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Testing GPU/ML Framework Compatibility +

+

Justin Hiemstra, a Machine Learning Application Specialist for CHTC’s GPU Lab, discusses the testing suite developed to test CHTC’s support for GPU and ML framework compatibility.

+ +
+ Photo by Ali Shah Lakhani on Unsplash. +
Photo by Ali Shah Lakhani on Unsplash.
+
+ +

Researchers at UW–Madison have increasingly required graphics processing units (GPUs) for their work. GPUs are specialized computing hardware that drives different data science technologies, including machine learning (ML). But what actually goes into running an ML job on the UW-Madison Center for High Throughput Computing (CHTC) using GPU capacity?

+ +

Justin Hiemstra, a graduate student in the Department of Electrical and Computer Engineering at the University of Wisconsin-Madison and currently working as an ML Application Specialist for CHTC’s GPU Lab, outlined the steps for running an ML job on CHTC using GPU capacity during HTCondor Week 2022.

+ +

Whenever a researcher has an ML job that they want to run on CHTC with a GPU, they need three things:

+ +

First, the researcher must write their ML code using a deep learning framework, such as PyTorch or TensorFlow.

+ +

Second, the researcher needs to pick a GPU type. “You can run ML jobs on a normal server without GPUs, but certain machine learning processes (e.g., neural networks) run much faster if you use one,” notes Christina Koch, one of Hiemstra’s supervisors for his work. When using the HTCondor Software Suite, the researcher can choose a specific GPU type by specifying a CUDA compute capability in the HTCondor job submit file.

+ +

Third, the researcher has to pick a CUDA runtime library. This library handles communication between the GPU and the application space, allowing the ML code to run its computations.

+ +

For an ML job to complete successfully, these components (ML framework, GPU type, CUDA runtime) must be compatible.

+ +

Some issues come into play with this setup. The first issue is a lack of documentation.“There’s no central resource we can go to to look at and see if different versions of deep learning frameworks, GPUs, and capabilities are compatible with each other,” Hiemstra notes.

+ +

The second issue is that as these frameworks and GPU hardware evolves, Hiemstra and his team have noticed they’ve started to drop support for older frameworks and compute capabilities.

+ +

The third issue is “whenever you have computing resources made up of discrete servers in a computing pool, you run into the issue of heterogeneous server configurations.” This issue adds to the problem and confusion of trying to pick compatible versions.

+ +

Hiemstra has put together a suite of test jobs to explore this compatibility issue. The jobs test whether a single tuple of CUDA runtime, framework, and compute capability versions are compatible on a CHTC resource. He looks at three things to do so:

+ +

First, did the jobs match, meaning, was it able to find the resources that they requested? Second, did the Conda Environment resolve, meaning, was it able to match all of the versions without finding any conflicting dependencies? Finally, was the framework able to communicate with GPUs? The job should run as expected if all three of these things happen. The test job will print out an error file if any of these fail.

+ +

When a job fails, it’ll give some indication as to where the error occurred. These messages get recorded and later reviewed by Hiemstra so he can try to understand better what’s happening on the GPU Servers in the CHTC GPU Lab.

+ +

The goal now for Hiemstra and his team is to “look at all of the different versions we might be interested in combining to see which ranges of compute capabilities, frameworks, and CUDA runtime libraries might work with each other.”

+ +

Issues arise when trying to analyze the entire version space. First, the version space to test grows combinatorially. “On an active system where research is being done, that’ll start gobbling up capacity and taking it away from the researchers,” Hiemstra remarks.

+ +

To prune the version space, so they’re not testing tens of thousands of different versions, they know that the CHTC has certain compute capabilities available. Knowing this, Hiemstra and his team can limit the number of versions they test only to include those that the CHTC has available. In addition, they assume that researchers use tools, such as Conda, to install their software, so they focus on framework and CUDA runtime versions that are available through Conda.

+ +

The second issue is that the team needs some way of automatically collecting the different test parameters. Essentially, the goal is to have it so that someone in CHTC doesn’t have to update this by hand continuously. Each job needs several files to run, so to dynamically generate these files, the team uses Python String formatting to build these files.

+ +

Finally, they’d like to find a way to manage all the jobs since they will continue to “fire off” hundreds of jobs during this process. To do this, they decided on a “timeout” period of 24 hours so that they don’t have scripts running on the CHTC Access Point indefinitely.

+ +

Hiemstra and his team use DAGman, a tool for Directed Acyclic Graph (DAG) workflows, to first spawn a parent process.

+ +

That parent process will do all the version space pruning and file generation. It’ll then submit all the jobs for testing, wait 24 hours for that timeout, and “run a postscript to interpret the output of those jobs.”

+ +

Next, they process all the output files to gain further insight into how the system works.

+ +

Currently, they’re running it quarterly, looking at the output table, and seeing if anything unexpected pops up. Hiemstra explains that going through this process “should give us some tools to debug the system if something suddenly crashes or if different versions a researcher is using are not compatible with each other.

+ +

Hiemstra is curious to see and examine for the future how the choice of versions that a researcher picks affects the runtime of their ML model or whether or not it affects the outcome or performance of that model.

+ +

“Everything about machine learning approaches is diverse and changing quickly,” Koch remarks, “having information about compatible frameworks and GPUs allows us to be more responsive and helpful to researchers who may be new to the field.”

+ +

The implementation of this tool that Hiemstra and his team have developed can be found on the CHTC GitHub Repository.

+ +

+ +

Watch a video recording of Justin Hiemstra’s talk at HTCondor Week 2022, and browse his slides.

+ + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Joe-B-Profile.html b/preview-fall2024-info/Joe-B-Profile.html new file mode 100644 index 000000000..889d6597b --- /dev/null +++ b/preview-fall2024-info/Joe-B-Profile.html @@ -0,0 +1,363 @@ + + + + + + +Meet Joe B. from the CHTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Meet Joe B. from the CHTC +

+

What do you do at the CHTC?

+

As an infrastructure administrator, I operate the various computers that provide the services required by the CHTC. The CHTC Infrastructure Services team handles the behind-the-scenes technology so that researchers can focus on what matters: their research! And, of course, leveraging various technologies to meet their research computing needs. For high throughput computing (HTC), we run HTCondor, developed right here at UW-Madison. For high-performance computing (HPC) needs, we offer a Slurm cluster. It is a great privilege to work down the hall from the development team of HTCondor.

+ +

Can you talk about the new hardware refresh that just occurred?

+

As you might imagine, part of being responsible for running an HTCondor pool is providing a place for the research computing to happen – we call such computers “execute points.” Our newest and most powerful execute points came from the recent “Technology Refresh,” an effort made possible through the generous support of the Office of the Vice Chancellor for Research and Graduate Education with funding from the Wisconsin Alumni Research Foundation. These 207 new computers provide substantially more capacity for researchers across campus to do science with the CHTC. Recently, much of my time and effort has gone into taking these devices from new-in-box machines and turning them into fully functioning execute points. It has been quite a challenge, but it also has been very rewarding.

+ +

What’s been your favorite part about working at the CHTC?

+

I really like the people I work with! Everyone is very friendly and helpful; one can cry for help in the hallway, and team members will almost certainly stop by to lend a hand. Don’t get me wrong – the hardware, the technology, and supporting research are all highlights of being a part of the CHTC, but it is the people around me that I appreciate the most.

+ +

What challenges do you face in your position, and how do you overcome them?

+

Research computing, despite its name, lends itself to a fast-paced environment. It is engaging (sometimes even fun!) but also quite the challenge. Priorities change rapidly, and it takes a good deal of flexibility to keep up. Most often, my days do not go as I plan – and that’s okay! Keeping an eye on the big picture, going with the “flow” of each new day, and working closely with my colleagues is how I overcome the many challenges of being a SysAdmin in the research computing world.

+ +

What’s been one of the most exciting changes that have happened recently at the CHTC?

+

I don’t mean to bang on the Tech Refresh drum, but then, I absolutely do – the tech refresh is an exciting and “refreshing” change. It’s a huge deal to us. The quantity and quality of the new hardware really make a massive difference from my perspective, and I hope that the researchers leveraging CHTC will notice it too. Even more exciting is the hope that the CHTC and research computing are becoming more well-known on campus. For me, the Tech Refresh is evidence that we are moving in the right direction toward that goal.

+ +

What’s your favorite flavor of Babcock ice cream?

+

Blue Moon is always my go-to flavor. Nostalgia may influence my choice, as that’s the flavor we would have while visiting the beach when I was very young.

+ +

What’s your favorite fall activity to do in Madison?

+

My favorite fall activity is going apple picking; the sheer number of apple varieties always impresses me. There are a few local orchards that I particularly enjoy.

+ +

You famously came up with “Caturday,” where people post pictures of their cats every Saturday in our CHTC social chat; can you tell us a little about yours?

+

I’m not sure about “famously,” but who doesn’t like cat pictures? CHTC, as it turns out, is made possible by the many cats that allow their humans to work here. I have two cats named Lilac and Peony. They’re both female orange tabbies, which is interesting because most orange tabbies are males. I adopted them upon moving to Madison. They are a bonded pair, meaning they had to be adopted together, and I am so glad to have two! They keep each other company, play together, and cause trouble together. I wouldn’t have it any other way! I often joke that I work to put food on their plates.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Johri.html b/preview-fall2024-info/Johri.html new file mode 100644 index 000000000..9e01e7886 --- /dev/null +++ b/preview-fall2024-info/Johri.html @@ -0,0 +1,371 @@ + + + + + + +The role of HTC in advancing population genetics research + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ The role of HTC in advancing population genetics research +

+

Postdoctoral researcher Parul Johri uses OSG services, the HTCondor Software Suite, and the population genetics simulation program SLiM to investigate historical patterns of genetic variation.

+ +
+ Computer rendering of DNA. +
Computer rendering of DNA. Image credit: Sangharsh Lohakare (@sangharsh_l) on Unsplash.
+
+ +

Running hundreds of thousands of simulations is no easy task for just any researcher. When Parul Johri was faced with this particular problem, she knew she needed more computational power, which is where the OSG came into play.

+ +
+ Parul Johri, postdoctoral researcher with Jeffrey Jensen at Arizona State University +
Parul Johri, postdoctoral researcher with Jeffrey Jensen at ASU
+
+ +

Johri is a postdoctoral researcher with the Jensen Lab at Arizona State University who recently spoke about using high throughput computing (HTC) in her population genetics work at the recent OSG All-Hands Meeting 2022. Running hundreds of thousands of jobs that harnessed more than nine million computing hours on OSG’s Open Science Pool (OSPool), she shared that OSG services and the HTCondor Software Suite (HTCSS) were essential capabilities: “Without these HTC services and technologies, it would not have been possible to complete any of this work.”

+ +

Population genetics research focuses on understanding the impact of processes like selection and mutation that affect genetic variation within natural populations. However, there are no mathematical expressions to describe patterns of genetic variation in populations with complex histories and selection. Instead, hundreds of thousands of simulations are required to model these complicated evolutionary scenario trajectories, with HTCSS playing a critical role.

+ +

Some HTCSS features and HTC services and technologies were helpful for Johri’s work. First, high-throughput simulations are easy to communicate and execute via an HTCSS Access Point operated as part of the OSG Connect service. Beginning with population parameters that describe the entire population, Johri can create a single HTCSS submit file to simulate hundreds of thousands of gene samples across the genomes for each of these parameters. She then creates hundreds of thousands of evolutionary replicates for each simulation to make inferences about the parameters from a natural population. Each simulation is managed as a single job by HTCSS.

+ +

Additionally, because the OSPool supports the execution of user software within containers, Johri can easily run this work using SLiM, a population-genetic simulator. She and other population genetics researchers use these parameters to create simulations that imitate realistic data, making SLiM a beneficial and convenient program. Christina Koch, a Research Computing Facilitator at the CHTC, helped Johri create a SLiM container, making it easy to run on the OSPool.

+ +

The SLiM software doesn’t require input files, just the parameters Johri passes as commands to SLiM in the HTCSS submit file. HTCSS capabilities are available via the Access Points operated by OSG as part of the OSG Connect service for US-based research projects. After she submits the jobs through an HTCSS Access Point, SLiM performs simulations for each input parameter. It sends back an output file – anything from a simple summary statistic to entire genome samples of individuals from the simulated population.

+ +

Through an HTCSS Access Point, Johri ran three million jobs for examining genetic variation in Drosophila (common fruit flies common to genetics research), 50,000 jobs for influenza, and one and a half million jobs for humans. Using over nine and a half million wall hours in the last three years, Johri has published three manuscripts rich with genetic patterns and findings.

+ +

Looking towards the horizon, Johri views HTC services as a vital resource: “I’m hoping that HTC services and technologies will continue to play a central role in performing evolutionary inferences in the future.” This hope doesn’t only apply to Johri’s research –– it’s reflective of the entire field of population genetics. With dHTC services and technologies like the OSPool and HTCSS at their fingertips, population genetics researchers everywhere can push the field’s boundaries.

+ +

+ +

Watch a video recording of Parul Johri’s talk at the OSG All-Hands Meeting 2022, and browse her slides.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Lightning-Talks.html b/preview-fall2024-info/Lightning-Talks.html new file mode 100644 index 000000000..50eabb133 --- /dev/null +++ b/preview-fall2024-info/Lightning-Talks.html @@ -0,0 +1,484 @@ + + + + + + +OSG User School 2022 Researchers Present Inspirational Lightning Talks + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ OSG User School 2022 Researchers Present Inspirational Lightning Talks +

+

The OSG User School student lightning talks showcased their research, inspiring all the event participants.

+ +
+ Staff and attendees from the OSG User School 2022. +
Staff and attendees from the OSG User School 2022.
+
+ +

Each summer, the OSG Consortium offers a week-long summer school for researchers who want to learn how to use high-throughput computing (HTC) methods and services to handle large-scale computing applications at the heart of today’s cutting-edge science. This past summer the school was back in-person on the University of Wisconsin–Madison campus, attended by 57 students and over a dozen staff.

+ +

Participants from Mali and Uganda, Africa, to campuses across the United States learned through lectures, discussions, and hands-on activities how to apply HTC approaches to handle large ensembles of jobs and large datasets in support of their research work. +“It’s truly humbling to see how much cool work is being done with computing on @CHTC_UW and @opensciencegrid!!” research facilitator Christina Koch tweeted regarding the School.

+ +

One highlight of the School is the closing participants’ lightning talks, where the researchers present their work and plans to integrate HTC, expanding the scope and goals of their research. +The lightning talks given at this year’s OSG User School illustrate the diversity of students’ research and its expanding scope enabled by the power of HTC and the School.

+ +

Note: Applications to attend the School typically open in March. Check the OSG website for this announcement.

+ +
+ Devin Bayly +
Devin Bayly
+
+ +

Devin Bayly, a data and visualization consultant at the University of Arizona’s Research Technologies department, presented “OSG for Vulkan StarForge Renders.” Devin has been working on a multimedia project called Stellarscape, which combines astronomy data with the fine arts. The project aims to pair the human’s journey with a star’s journey from birth to death.

+ +

His goal has been to find a way to support connections with the fine arts, a rarity in the HTC community. After attending the User School, Devin intends to use the techniques he learned to break up his data and entire simulation into tiles and use a low-level graphics API called Vulkan to target and render the data on CPU/GPU capacity. He then intends to combine the tiles into individual frames and assemble them into a video.

+ +
+ 4x5 summary of 500+ time steps of simulation data of ~24e6 gas particles: Batch headless rendering of the Starforge simulation gas position data. +
Rendering of the Starforge
simulation gas position data.
+
+ +

Starforge Anvil of Creation: Grudi’c, Michael Y. et al. “STARFORGE: Toward a comprehensive numerical model of star cluster formation and feedback.” arXiv: Instrumentation and Methods for Astrophysics (2020): n. pag. https://arxiv.org/abs/2010.11254

+ +
+ Mike Nsubuga +
Mike Nsubuga
+
+ +

Mike Nsubuga, a Bioinformatics Research fellow at the African Center of Excellence in Bioinformatics and Data-Intensive Sciences (ACE) within the Infectious Disease Institute (IDI) at Makerere University in Uganda, presented “End-to-End AI data systems for targeted surveillance and management of COVID-19 and future pandemics affecting Uganda.

+ +

Nsubuga noted that in the United States, there are two physicians for every 1000 people; in Uganda, there is only one physician per 25,000 people. Research shows that AI, automation, and data science can support overburdened health systems and health workers when deployed responsibly. +Nsubuga and a team of Researchers at ACE are working on creating AI chatbots for automated and personalized symptom assessments in English and Luganda, one of the major languages of Uganda. He’s training the AI models using data from the public and healthcare workers to communicate with COVID-19 patients and the general public.

+ +

While at the School, Nsubuga learned how to containerize his data into a Docker image, and from that, he built an Apptainer (formerly Singularity) container image. He then deployed this to the Open Science Pool (OSPool) to determine how to mimic the traditional conversation assistant workflow model in the context of COVID-19. The capacity offered by the OSPool significantly reduced the time it takes to train the AI model by eight times.

+ +
+ Jem Guhit +
Jem Guhit
+
+ +

Jem Guhit, a Physics Ph.D. candidate from the University of Michigan, presented “Search for Di-Higgs production in the LHC with the ATLAS Experiment in the bbtautau Final State.” The Higgs boson was discovered in 2012 and is known for the Electroweak Symmetry Breaking (EWSB) phenomenon, which explains how other particles get mass. Since then, the focus of the LHC has been to investigate the properties of the Higgs boson, and one can get more insight into how the EWSB Mechanism works by searching for two Higgs bosons using the ATLAS Detector. The particle detectors capture the resultant particles from proton-proton collisions and use this as data to look for two Higgs bosons.

+ +

DiHiggs searches pose a challenge because the rate at which a particle process occurs for two Higgs bosons is 30x smaller than for a single Higgs boson. Furthermore, the particles the Higgs can decay to have similar particle trajectories to other particles produced in the collisions unrelated to the Higgs boson. Her strategy is to use a machine learning (ML) method powerful enough to handle complex patterns to determine whether the decay products come from a Higgs boson. She plans to use what she’s learned at the User School to show improvements in her machine-learning techniques and optimizations. With these new skills, she has been running jobs on the University of Michigan’s HTCondor system utilizing GPU and CPUs to run ML jobs efficiently and plans to use the OSPool computing cluster to run complex jobs.

+ +
+ Peder Engelstad +
Peder Engelstad
+
+ +

Peder Engelstad, a spatial ecologist and research associate in the Natural Resource Ecology Laboratory at Colorado State University (and 2006 University of Wisconsin-Madison alumni), presented a talk on “Spatial Ecology & Invasive Species.” Engelstad’s work focuses on the ecological importance of natural spatial patterns of invasive species.

+ +

He uses modeling and mapping techniques to explore the spatial distribution of suitable habitats for invasive species. The models he uses combine locations of species with remotely-sensed data, using ML and spatial libraries in R. Recently. he’s taken on the massive task of creating thousands of suitability maps. To do this sequentially would take over three years, but he anticipates HTC methods can help drastically reduce this timeframe to a matter of days.

+ +

Engelstad said it’s been exciting to see the approaches he can use to tackle this problem using what he’s learned about HTC, including determining how to structure his data and break it into smaller chunks. He notes that the nice thing about using geospatial data is that they are often in a 2-D grid system, making it easy to index them spatially and designate georeferenced tiles to work on. Engelstad says that an additional benefit of incorporating HTC methods will be to free up time to work on other scientific questions.

+ +
+ Zachary Baldwin +
Zachary Baldwin
+
+ +

Zachary Baldwin, a Ph.D. candidate in Nuclear and Particle Physics at Carnegie Mellon University, works for the GlueX Collaboration, a particle physics experiment at the Thomas Jefferson National Lab that searches for and studies exotic hybrid mesons. Baldwin presented a talk on “Analyzing hadronic systems in the search for exotic hybrid mesons at GlueX.

+ +

His thesis looks at data collected from the GlueX experiment to possibly discover forbidden quantum numbers found within subatomic particle systems to determine if they exist within our universe. Baldwin’s experiment takes a beam of electrons, speeds them up to high energies, and then collides them with a thin diamond wafer. These electrons then slow down, producing linearly polarized photons. These photons will then collide with a container of liquid hydrogen (protons) within the center of his experiment. Baldwin studies the resulting systems produced within these photon-proton collisions.

+ +

The collision creates billions of particles, leaving Baldwin with many petabytes of data. Baldwin remarks that too much time gets wasted looping through all the data points, and massive processes run out of memory before he can compute results, which is one aspect where HTC comes into play. Through the User School, another major area he’s been working on is simulating Monte Carlo particle reactions using OSPool’s containers which he pushes into the OSPool using HTCondor to simulate events that he believes would happen in the real world.

+ +
+ Olaitan Awe +
Olaitan Awe
+
+ +

Olaitan Awe, a systems analyst in the Information Technology department at the Jackson Laboratory (JAX), presented “Newborn Screening (NBS) of Inborn Errors of Metabolism (IEM).” The goal of newborn screening is that, when a baby is born, it detects early what diseases they might have.

+ +

Genomic Newborn Screenings (gNBS) are generally cheap, detect many diseases, and have a quick turnaround time. The gNBS takes a child’s genome and compares it to a reference genome to check for variations. The computing challenge lies in looking for all variations, determining which are pathogenic, and seeing which diseases they align with.

+ +

After attending the User School, Awe intends to tackle this problem by writing DAGMan scripts to implement parent-child relations in a pipeline he created. He then plans to build custom containers to run the pipeline on the OSPool and stage big data shared across parent-child processes. The long-term goal is to develop a validated, reproducible gNBS pipeline for routine clinical practice and apply it to African populations.

+ +
+ Max Bareiss +
Max Bareiss
+
+ +

Max Bareiss, a Ph.D. Candidate at the Virginia Tech Center for Injury Biomechanics presented “Detection of Camera Movement in Virginia Traffic Camera Video on OSG.” Bareiss used a data set of 1263 traffic cameras in Virginia for his project. His goal was to determine how to document the crash, near-crashes, and normal driving recorded by traffic cameras using his video analysis pipeline. This work would ultimately allow him to detect vehicles and pedestrians and determine their trajectories.

+ +

The three areas he wanted to tackle and obtain help with at the User School were data movement, code movement, and using GPUs for other tasks. For data movement, he used MinIO, a high-performance object storage, so that the execution points could directly copy the videos from Virginia Tech. For code movement, Bareiss used Alpine Linux and multi-stage build, which he learned to implement throughout the week. He learned about using GPUs at the Center for High Throughput Computing (CHTC) and in the OSPool.

+ +

Additionally, he learned about DAGMan, which he noted was “very exciting” since his pipeline was already a directed acyclic graph (DAG).

+ +
+ Matthew Dorsey +
Matthew Dorsey
+
+ +

Matthew Dorsey, a Ph.D. candidate in the Chemical and Biomolecular Engineering Department at North Carolina State University, presented on “Computational Studies of the Structural Properties of Dipolar Square Colloids.

+ +

Dorsey is studying a colloidal particle developed in a research lab at NC State University in the Biomolecular Engineering Department. His research focuses on using computer models to discover what these particles can do. The computer models he has developed explore how different parameters (like the system’s temperature, particle density, and the strength of an applied external field) affect the particle’s self-assembly.

+ +

Dorsey recently discovered how the magnetic dipoles embedded in the squares lead to structures with different material properties. He intends to use the HTCondor Software Suite (HTCSS) to investigate the applied external fields that change with respect to time. “The HTCondor system allows me to rapidly investigate how different combinations of many different parameters affect the colloids’ self-assembly,” Dorsey says.

+ +
+ Ananya Bandopadhyay +
Ananya Bandopadhyay
+
+ +

Ananya Bandopadhyay, a graduate student from the Physics Department at Syracuse University, presented “Using HTCondor to Study Gravitational Waves from Binary Neutron Star Mergers.

+ +

Gravitational waves are created when black holes or neutron stars crash into each other. Analyzing these waves helps us to learn about the objects that created them and their properties.

+ +

Bandopadhyay’s project focuses on LIGO’s ability to detect gravitational wave signals coming from binary neutron star mergers involving sub-solar mass component stars, which she determines from a graph which shows the detectability of the signals as a function of the component masses comprising the binary system.

+ +

The fitting factors for the signals would have initially taken her laptop a little less than a year to run. She learned how to use OSPool capacity from the School, where it takes her jobs only 2-3 days to run. Other lessons that Bandopadhyay hopes to apply are data organization and management as she scales up the number of jobs. Additionally, she intends to implement containers to help collaborate with and build upon the work of researchers in related areas.

+ +
+ Meng Luo +
Meng Luo
+
+ +

Meng Luo, a Ph.D. student from the Department of Forest and Wildlife Ecology at the University of Wisconsin–Madison, presented “Harnessing OSG to project the impact of future forest productivity change on land use change.” Luo is interested in learning how forest productivity increases or decreases over time.

+ +

Luo built a single forest productivity model using three sets of remote sensing data to predict this productivity, coupling it with a global change analysis model to project possible futures.

+ +

Using her computer would take her two years to finish this work. During the User School, Luo learned she could use Apptainer to run her model and multiple events simultaneously. She also learned to use the DAGMan workflow to organize the process better. With all this knowledge, she ran a scenario, which used to take a week to complete but only took a couple of hours with the help of OSPool capacity.

+ +

Tinghua Chen from Wichita State University presented a talk on “Applying HTC to Higgs Boson Production Simulations.” Ten years ago, the ATLAS and CMS experiments at CERN announced the discovery of the Higgs boson. CERN is a research center that operates the world’s largest particle physics laboratory. The ATLAS and CMS experiments are general-purpose detectors at the Large Hadron Collider (LHC) that both study the Higgs boson.

+ +

For his work, Chen uses a Monte Carlo event generator, Herwig 7, to simulate the production of the Higgs boson in vector boson fusion (VBF). He uses the event generator to predict hadronic cross sections, which could be useful for the experimentalist to study the Standard Model Higgs boson. Based on the central limit theorem, the more events Chen can generate, the more accurate the prediction.

+ +

Chen can run ten thousand events on his laptop, but the predictions could be more accurate. Ideally, he’d like to run five billion events for more precision. Running all these events would be impossible on his laptop; his solution is to run the event generators using the HTC services provided by the OSG consortium.

+ +

Using a workflow he built, he can set up the event generator using parallel integration steps and event generation. He can then use the Herwig 7 event generator to build, integrate, and run the events.

+ +

+ +

Thank you to all the researchers who presented their work in the Student Lightning Talks portion of the OSG User School 2022!

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/ML-Demo.html b/preview-fall2024-info/ML-Demo.html new file mode 100644 index 000000000..0dd0adbf2 --- /dev/null +++ b/preview-fall2024-info/ML-Demo.html @@ -0,0 +1,432 @@ + + + + + + +CHTC Hosts Machine Learning Demo and Q+A session + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ CHTC Hosts Machine Learning Demo and Q+A session +

+

Over 60 students and researchers attended the Center for High Throughput Computing (CHTC) machine learning and GPU demonstration on November 16th. UW Madison Associate Professor of Biostatistics and Medical Informatics Anthony Gitter and CHTC Lead Research Computing Facilitator Christina Koch led the demonstration and fielded many questions from the engaged audience.

+ +
+ Koch and Gitter presenting at the demo +
Koch and Gitter presenting at the demo.
+
+ +

CHTC services include a free large scale computing systems solution for campus researchers who have encountered computing issues and outgrown their resources, often a laptop, Koch began. One of the services CHTC provides is the GPU Lab, a resource within the HTC system of CHTC.

+ +

The GPU Lab supports up to dozens of concurrent jobs per user, a variety of GPU types including 40GB and 80GB A100s, runtimes from a few hours up to seven days, significant RAM needs, and space for large data sets.

+ +

Researchers are not waiting to take advantage of these CHTC GPU resources. Over the past two months, 52 researchers ran over 17,000 jobs on GPU hardware. Additionally, the UW-Madison IceCube project alone ran over 70,000 jobs.

+ +

Even more capacity is available. The recent $4.3 million investment from the Wisconsin Alumni Research Foundation (WARF) in UW-Madison’s research computing hardware is a significant contributor to this abundance of resources, Gitter noted.

+ +

There are two main ways to know what GPUs are available and the number of GPUs users may request per job:

+
    +
  • +

    The first is through the CHTC website - which offers up-to-date information. To access this information, go to the CHTC website and enter ‘gpu’ in the search bar. The first result will be the ‘Jobs that Use GPU Overview’ which is the main guide on using GPUs in CHTC. At the very top of this guide is a table that contains information about the kinds of GPUs, the number of servers, and the number of GPUs per server, which limits how many GPUs can be requested per job. Also listed is the GPU memory, which shows the amount of GPU memory and the attribute you would use in the ‘required_gpu’ statement when submitting a job.

    +
  • +
  • +

    A second way is to use the ‘condor_status’ command. To use this command, make sure to set a constraint of ‘Gpus > 0’ to prevent printing out information on every single server we have in the system: condor_status -constraint ‘Gpus > 0’. This gives the names of servers in the pool and their availability status - idle or busy. Users may also add an auto format flag attribute ‘-af’ to print out any desired attribute of the machine. For instance, to access the attributes like those listed in the table of the CHTC guide, users must include the GPUs prefix followed by an underscore and then the name of the column to access.

    +
  • +
+ +

The GPU Lab, due to its expansive potential, can be used in many scenarios. Koch explained this using real-world examples. Researchers might want to seek the CHTC GPU Lab when: +Running into the time limit of an existing GPU while trying to develop and run a machine learning algorithm. +Working with models that require more memory than what is available with a current GPU in use. +Trying to benchmark the performance of a new machine algorithm and realizing that the computing resources available are time-consuming and not equipped for multitasking.

+ +

While GPU Lab users routinely submit many jobs that need a single GPU without issue, users may need to work collaboratively with the CHTC team on extra testing and configuration when handling larger data sets and models and benchmark precise timing. Koch presented a slide outlining what is easy to more challenging on CHTC GPU resources, stressing that, when in doubt about what is feasible, to contact CHTC:

+ +
+ Slide showing what is possible with GPU Lab +
Slide showing what is possible with GPU Lab.
+
+ +

Work that is done in CHTC is run through a job submission. Koch presented a flowchart demonstration on how this works:

+
+ How to run work via job submission +
How to run work via job submission.
+
+ +

She demonstrated the three-step process of

+
    +
  1. login and file upload
  2. +
  3. submission to queue, and
  4. +
  5. job-run execution by HTCondor job scheduler. +This process, she displayed, involves writing up a submit file and utilizing command line syntax to be submitted to the queue. Below are some commands that can be used to submit a file:
  6. +
+
+ Commands to use when submitting jobs +
Commands to use when submitting jobs.
+
+ +

The next part of the demo was led by Gitter. To demonstrate what commands would be needed for specific kinds of job submissions, he explained what a job submit file should look like, some necessary commands, and the importance of listing out commands sequentially.

+
+ How a job submit file should look +
How a job submit file should look.
+
+ +

Gitter also demonstrated how to run jobs using the example GitHub repository with the following steps:

+
    +
  • Connecting a personal user account to a submit server in CHTC
  • +
  • Utilizing the ‘ls’ command to inspect the home directory
  • +
  • Cloning the pre existing template repository with runnable GPU examples
  • +
  • Including a “‘condor_submitinsert-file-name.sub’” command line to define the job the user wants to run
  • +
  • Applying the ‘condor_q’command to monitor the job that has been submitted
  • +
+ +

Users are able to choose GPU related submit file options. Gitter demonstrated ways to access the different options that are needed in the HTCondor submit file in order to access the GPUs in CHTC GPU Lab and beyond. These include:

+
    +
  • ‘Request_gpus’ to enable GPU use
  • +
  • ‘+WantGPULab’ to indicate whether or not to use CHTC’s shared use GPUs
  • +
  • +GPUJobLength’ to indicate which job type the user would like to submit
  • +
  • ‘Require_gpus’ to request specific GPU attributes or CUDA functionality
  • +
+ +

He outlined some other commands for running PyTorch jobs and for exploring available GPUs. All commands from the demo can be accessed here.

+ +

The event concluded with a Q&A session for audience members. Some of these questions prompted a discussion on the availability of default repositories and tools that are able to track the resources a job is using. In addition to interactive monitoring, HTCondor has a log file that provides information about when a job was started, a summary of what was requested – disk, memory, GPUs and CPUs as well as what was allocated and estimated to be used.

+ +

Currently, there is a template GitHub repository that can be cloned and used as a starting point. These PyTorch and TensorFlow examples can be useful to you as a starting point. However, nearly every user is using a slightly different combination of packages for their work. For this reason, users will most likely need to make some manual modifications to either adjust versions, change scripts, attribute different names to your data file, etc.

+ +

These resources will be helpful when getting started:

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/ML-and-Image-Analyses-for-Livestock-Data.html b/preview-fall2024-info/ML-and-Image-Analyses-for-Livestock-Data.html new file mode 100644 index 000000000..22388db65 --- /dev/null +++ b/preview-fall2024-info/ML-and-Image-Analyses-for-Livestock-Data.html @@ -0,0 +1,369 @@ + + + + + + +Machine Learning and Image Analyses for Livestock Data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Machine Learning and Image Analyses for Livestock Data +

+ + +

The vision of the Digital Livestock Lab is to create state-of-the-art +computer vision systems and the largest public database for livestock.

+ +

In this presentation from HTCondor Week 2021, Joao Dorea from the +Digital Livestock Lab explains how +high-throughput computing is used in the field of animal and dairy +sciences. Computer vision systems and sensors collect +animal-level phenotypic data on cows to make more optimized decisions +about what to do with each animal in terms of health, nutrition, +reproduction, and genetics. One challenge of doing this has to do +with the sheer size of data that is collected. Processing and +storing tens of thousands of images of cows requires significant +computational resources.

+ +

By utilizing HTCondor through a collaboration with the Center for High Throughput Computing, the Digital +Livestock Lab has been able to focus their time and money on the livestock. +Specialized to handle computational work that can be split into many pieces +and run in parallel, image analysis aligns well with the ideal HTCSS workload. +HTCondor allows them to run many jobs +and experiments congruently faster, opening the door to larger and larger data sets. +Being able to internalize numerous data sets in parallel has allowed the Digital Livestock Lab +to gain significant insight into livestock systems, all thanks to HTCondor +and collaborations with the faculty at the CHTC!

+ +

Read more about Joao Dorea and his research on the development of +high-throughput phenotyping technologies on his +homepage.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Messick.html b/preview-fall2024-info/Messick.html new file mode 100644 index 000000000..ee491a9b8 --- /dev/null +++ b/preview-fall2024-info/Messick.html @@ -0,0 +1,377 @@ + + + + + + +LIGO's Search for Gravitational Waves Signals Using HTCondor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ LIGO's Search for Gravitational Waves Signals Using HTCondor +

+

Cody Messick, a Postdoc at the Massachusetts Institute of Technology (MIT) working for the LIGO lab, describes LIGO’s use of HTCondor to search for new gravitational wave sources.

+ +
+ Image of two black holes. Photo credit: Cody Messick’s presentation slides. +
Image of two black holes. Photo credit: Cody Messick’s presentation slides.
+
+ +

High-throughput computing (HTC) is critical to astronomy, from black hole research to radial astronomy and beyond. At the 2022 HTCondor Week, another area of astronomy was put in the spotlight by Cody Messick, a researcher working for the LIGO lab and a Postdoc at the Massachusetts Institute of Technology (MIT). His work focuses on a gravitational-wave analysis that he’s been running with the help of HTCondor to search for new gravitational wave signals.

+ +

Starting with general relativity and why it’s crucial to his work, Messick explains that “it tells us two things; first, space and time are not separate entities but are instead part of a four-dimensional object called space-time. Second, space-time is warped by mass and energy, and it’s these changes to the geometry of space-time that we experience as gravity.”

+ +

Messick notes that general relativity is important to his work because it predicts the existence of gravitational waves. These waves are tiny ripples in the curvature of space-time that travel at the speed of light and stretch and compress space. Accelerating non-spherically symmetric masses generate these waves.

+ +

Generating ripples in the curvature of space-time large enough to be detectable using modern ground-based gravitational-wave observatories takes an enormous amount of energy; the observations made thus far have come from the mergers of compact binaries, pairs of extraordinarily dense yet relatively small astronomical objects that spiral into each other at speeds approaching the speed of light. Black holes and neutron stars are examples of these so-called compact objects, both of which are or almost are perfectly spherical.

+ +

Messick and his team first detected two black holes going two-thirds the speed of light right before they collided. “It’s these fantastic amounts of energy in a collision that moves our detectors by less than the radius of a proton, so we need extremely energetic explosions of collisions to detect these things.”

+ +

Messick looks for specific gravitational waveforms during the data analysis. “We don’t know which ones we’re going to look for or see in advance, so we look for about a million different ones.” They then use match filtering to find the probability that the random noise in the detectors would generate something that looks like a gravitational-wave; the first gravitational-wave observation had less than a 1 in 3.5 billion chance of coming from noise and matched theoretical predictions from general relativity extremely well.

+ +

Messick’s work with external collaborators outside the LIGO-Virgo-KAGRA collaboration looks for systems their normal analyses are not sensitive to. Scientists use the parameter kappa to characterize the ability of a nearly spherical object to distort when spinning rapidly or, in simple terms, how squished a sphere will become when spinning quickly.

+ +

LIGO searches are insensitive to any signal with a kappa greater than approximately ten. “There could be [signals] hiding in the data that we can’t see because we’re not looking with the right waveforms,” Messick explains. His analysis has been working on this problem.

+ +

Messick uses HTCondor DAGs to model his workflows, which he modified to make integration with OSG easier. The first job checks the frequency spectrum of the noise. These workflows go into an aggregation of the frequency spectrum, decomposition (labeled by color by type of detector), and finally, the filtering process occurs.

+ +
+ A section of Messick’s DAG workflow. +
A section of Messick’s DAG workflow.
+
+ +

Although Messick’s work is more physics-heavy than computationally driven, he remarks that “HTCondor is extremely useful to us… it can fit the work we’ve been doing very, very naturally.”

+ +

+ +

Watch a video recording of Cody Messick’s talk at HTCondor Week 2022, and browse his slides.

+ + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/NIAID-ACE-students-attend-OSG-User-School.html b/preview-fall2024-info/NIAID-ACE-students-attend-OSG-User-School.html new file mode 100644 index 000000000..faf6549e2 --- /dev/null +++ b/preview-fall2024-info/NIAID-ACE-students-attend-OSG-User-School.html @@ -0,0 +1,394 @@ + + + + + + +NIAID/ACE students attend this year’s OSG User School 2022 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ NIAID/ACE students attend this year’s OSG User School 2022 +

+
+ CHTC's Christina Koch served as one of the facilitators for OSG School 2022. +
CHTC's Christina Koch served as one of the facilitators for OSG School 2022.
+
+ +

This past July, the OSG User School 2022 welcomed students from across the globe to learn how to use high-throughput computing (HTC) in their scientific research.

+ +

The OSG User School has been an annual week-long event hosted at the University of Wisconsin-Madison for over a decade. The program uses lectures and hands-on exercises to introduce and inform students about HTC systems.

+ +

Five students from Makerere University in Uganda and the University Of Sciences, Techniques, and Technologies of Bamako in Mali, Africa, participated as a part of The U.S. National Institute of Allergy and Infectious Diseases (NIAID) and the African Centers for Excellence in Bioinformatics and Data-Intensive Science (ACE) partnership program.

+ +

This event was not the first time NIAID, ACE, and OSG partnered. Back in February, students and faculty in the ACE program engaged in a customized HTC training session over Zoom led by Christina Koch, a research computing facilitator with UW-Madison’s Center for High Throughput Computing.

+ +

HTC makes it easier for researchers with data-intensive or computationally heavy research to manage their work better and more efficiently. Using OSG high throughput computing services, researchers can tackle numerous tasks (like analyzing large amounts of data) that are too resource-intensive to run on just a laptop.

+ +

HTC uses parallel computing; so, when a researcher has a large data set they want to analyze, OSG high throughput computing services allow them to submit jobs in parallel and produce results more quickly.

+ +
+ The five ACE students with OSG’s Research Computing Facilitators. From left to right: Christina Koch, Mike Nsubuga, Aoua Coulibaly, Modibo Goita, Sitapha Coulibaly, Rachel Lombardi, Kangaye Amadou Diallo. +
The five ACE students with OSG’s Research Computing Facilitators. From left to right: Christina Koch, Mike Nsubuga, Aoua Coulibaly, Modibo Goita, Sitapha Coulibaly, Rachel Lombardi, Kangaye Amadou Diallo.
+
+ +

One OSG User School 2022 attendee, Mike Nsubuga, came from Makerere University in Uganda as an MS student in Bioinformatics. Nsubuga also participated in the virtual training session back in February, which he says was a good start for him to have some experience using HTC and to be able to see how he can apply it to his research. To gain more experience, he applied for the continuation of the OSG School this summer.

+ +

In addition to conducting his research on antimicrobial resistance, Nsubuga is a software developer responsible for creating a Covid-19 AI chatbot based in Uganda. And although Nsubuga came to the User School almost certain the application of HTC wouldn’t work within the scope of his research, he admits he was pleasantly proved wrong.

+ +

“I would definitely recommend the OSG User School to others, without a doubt, at least to try,” he says. “It’s just a process of understanding what someone is trying to solve, what challenges they are facing, how they want to be helped—and trying to fit that into the OSG and seeing what it has to offer and what it can’t.”

+ +

Aoua Coulibaly was another participant who had taken the February training. Coulibaly is a Bioinformatics consultant at ACE from the University Of Sciences, Techniques, and Technologies of Bamako in Mali and a Ph.D. student in the same subdiscipline. Her research of interest lies in studying the malaria parasite, Plasmodium.

+ +

Coulibaly had previous working experience with High-Performance Computing (HPC) to evaluate systems. Through the User School, she found the benefits of incorporating HTC with research.

+ +

“The fact that we can submit multiple jobs at once, I think that was really interesting,” she says. “I can apply that to my research so the analysis can go faster.”

+ +

Also continuing training was Modibo Goita, an MS student in Bioinformatics with studies focused on Malian genetic neurological disorders. His thesis is on the concept of genetics with an emphasis on early breast cancer detection screening via germline mutations.

+ +

In genomics, the challenge is that the data size is often immense. Goita learned that with the help of OSG high throughput computing services, he could explore the possibility of scaling up and going beyond the limitations that a single computer cluster could provide.

+ +
+
+

"It’s just a process of understanding what someone is trying to solve, what challenges they are facing, how they want to be helped—and trying to fit that into the OSG and seeing what it has to offer and what it can’t."

+
+ +
+ +

Other trainees in attendance included Sitapha Coulibaly and Kangaye Amadou Diallo, both ACE students who journeyed to the Midwest from Mali. Diallo is a Ph.D. student in Bioinformatics whose research surrounds the potential for rice microbiomes to block damage to pesticide-free plants. Coulibaly is an MS student in Bioinformatics who concentrates on the genetics of crop-damaging soil bacteria.

+ +

As for the ACE students as a collective, they accredit the OSG staff’s willingness to help as a large part of why integrating HTC into their research was more effective and why their experience was worthwhile. Their consensus is that they would recommend the OSG User School to other researchers dealing with computing-intensive science while noting that spreading the word and hosting more collaborations is an essential means to do so.

+ +

The OSG and ACE/NIAID teams are looking forward to continued collaboration. In September 2022, the OSG’s Director, Frank Wuerthwein, and Research Computing Facilitator, Rachel Lombardi, will be traveling to Makerere University in Kampala, Uganda to lead a workshop on using OSG resources at the 2022 ACE Global Consortium Meeting.

+ +

Through this continued partnership, The NIAID/ACE, Morgridge, CHTC, and OSG hope to spread the word of HTC and advance basic research through HTC, with continued support for local and global collaborators—and ultimately helping bring computing resources to all.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/NIAID.html b/preview-fall2024-info/NIAID.html new file mode 100644 index 000000000..379f02665 --- /dev/null +++ b/preview-fall2024-info/NIAID.html @@ -0,0 +1,364 @@ + + + + + + +NIAID/ACE - OSG collaboration leads to a successful virtual training session + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ NIAID/ACE - OSG collaboration leads to a successful virtual training session +

+

The U.S. National Institute of Allergy and Infectious Diseases (NIAID) and the African Centers for Excellence in Bioinformatics and Data-Intensive Science (ACE) partnered with the OSG Consortium to host a virtual high throughput computing training session for graduate students from Makerere University and the University Of Sciences, Techniques, and Technologies of Bamako (USTTB).

+ +
+ Map of Africa with Mali and Uganda Highlighted +
Map of Africa; Mali and Uganda are highlighted where their respective flags point. Image credit: © 2010 Roland Urbanek. Flags are edited in and overlayed on the image. on Flickr.
+
+ +

Five thousand miles and seven time zones were no obstacle for forty-one dedicated researchers from Uganda and Mali participating in their first high throughput computing training session using the OSG high throughput computing services. On February 15, bioinformatics graduate students and faculty members from Makerere University in Uganda and the University Of Sciences, Techniques, and Technologies of Bamako in Mali engaged in a customized training session over Zoom led by Christina Koch, an OSG Research Computing Facilitator.

+ +

Dr. Mariam Quiñones, Dr. Darrell E. Hurt, Mr. Chris Whalen, and the ACE Global Operations Team within NIAID’s Office of Cyber Infrastructure and Computational Biology (OCICB) spearheaded this cross-continent collaboration between the OSG Consortium, the NIAID, and ACE, which supports bioinformatics training for graduate students and other researchers at Makerere University and USTTB. The ACE Global Operations Team works closely with the ACE Center Directors and instructors to identify gaps and provide supplemental hands-on training to the students. The NIAID ACE Global Operations Team recognized a need for additional computing resources to train graduate students and knew precisely where to turn.

+ +

Envisioning the power of a partnership between the OSG Consortium and the ACE community, Quiñones approached OSG Research Facilitation Lead Lauren Michael with the idea of a high throughput computing training session for the students and faculty within the ACE program.

+ +

NIAID’s previous success with running computational work on the Open Science Pool (OSPool) led Quiñones to think the impact might even reach beyond students trained by the ACE program. Predicting the spread of this adoption of OSG services, Quiñones remarks, “[w]e hope some of the faculty and associated staff actively generating data from data-intense research projects will begin to use the OSG services.”

+ +

In preparation for the training, OSG’s Research Facilitation Team planned to go beyond the usual introduction to the OSPool. This time around, the team designed a new tutorial that incorporated the BWA software, a tool commonly used in bioinformatics and familiar to the students. Koch, who led the training session, notes that the “goal of using the tutorial was to give the students hands-on experience using software that would be relevant to the kind of work they are already doing for their research.”

+ +

Building off Koch’s thoughts, Michael explains: “Given the shared bioinformatics needs of the students, we wanted to make sure the content went beyond our general New User Training format by encouraging conversation among training participants and using examples they’d connect with.” Reflecting, she adds: “It seemed to pay off, given the level of engagement.”

+ +

Through numerous public-private partnerships with the NIAID, African institutions, governments, and private-sector companies, ACE aims to enhance access to computational capabilities and infrastructure and provide training in data science and bioinformatics. This access will empower researchers and students to accelerate biomedical research and drive discoveries that could impact the treatment, prevention, and diagnosis of diseases in Africa and across the globe.

+ +

And while high throughput computing and the OSPool can play an essential role in advancing the bioinformatics behind some of these efforts, Michael emphasizes that the benefits are undoubtedly mutual for the OSG consortium:

+ +

“By working with ACE, engaging with participants, and adding documented bioinformatics examples to our resources –– we are better poised to support other researchers doing similar work and flexibly customize our training materials for other domains. We’re deeply grateful for this partnership.”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Natzke.html b/preview-fall2024-info/Natzke.html new file mode 100644 index 000000000..e4ba9b25b --- /dev/null +++ b/preview-fall2024-info/Natzke.html @@ -0,0 +1,400 @@ + + + + + + +Learning and adapting with OSG: Investigating the strong nuclear force + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Learning and adapting with OSG: Investigating the strong nuclear force +

+
+ The GRIFFIN Spectrometer +
The GRIFFIN Spectrometer. (Image credit: Kirk Chantraine, TRIUMF Photowalk 2018).
+
+ +

Connor Natzke’s journey with the OSG Consortium began in 2019 as a student of the OSG User School. Today, nearly three years later, Natzke has executed +600,000 simulations with the help of OSG staff and prior OSG programming. These simulations, each of them submitted as a job, logged over 135,000 core +hours provided by the Open Science Pool (OSPool). Natzke’s history with the OSG Consortium reflects a pattern of learning, adapting, and improving that +translates to the acceleration and expansion of scientific discovery. During the March OSG All-Hands Meeting 2022, +Natzke was presented the David Swanson Memorial Award, which recognized him for his dedication and tenacity since joining the OSG Community.

+ +
+ Connor Natzke +
Connor Natzke
+
+ +

Natzke is a Ph.D. student at the Colorado School of Mines and is currently located at TRIUMF, a particle +physics laboratory in Vancouver, British Columbia. Natzke’s research focuses on the strong nuclear force, a fundamental force in nature that keeps protons and neutrons bound together in a cohesive unit at +the center of an atom. This force exists at subatomic scales. Therefore, Natzke and his team require something quite large to observe it –– the GRIFFIN +spectrometer. Standing at over ten feet tall, GRIFFIN can measure the angle between photons emitted from an unstable atomic nucleus located at the center +of the instrument. This angle reveals important information about nuclear structure, but Natzke relies on numerous simulations to unveil the whole picture.

+ +

Because the gamma-ray detectors that make up GRIFFIN have limits to their measurement capabilities, Natzke and his team use a Monte Carlo simulation +package called GEANT4 to reconstruct the angle between the emitted photons more precisely. This simulation involves mapping a large parameter space –– +an energy surface –– of individual photon energies. Forty-one combinations of photon energies are needed to make one of these maps and three simulations +are run for each of these combinations, with each requiring one billion simulated events. The resulting time required to make just one energy surface map is fifty thousand core hours or roughly five years and nine months if Natzke was relying simply on his laptop’s computational power.

+ +

“With standard computation, this quickly becomes an intractable project,” Natzke explains. “Luckily, I attended the OSG User School in 2019 and learned that +Monte Carlo simulations are essentially the poster child for distributed high-throughput computing.”

+ +

With Monte Carlo simulations, one simulation of one billion events produces results equivalent to one million simulations of one thousand events. +This unique quality transforms what would otherwise be a single lengthy and time-consuming job into many short and quick jobs that can be scaled out to +run in a high-throughput environment. As Natzke sums it up, “It’s frankly beautiful how easily this works.”

+ +

With the help of OSG Research Computing Facilitation Lead Lauren Michael, Natzke used a personal meta-scheduler for HTCondor called +DAGMan (Directed Acyclic Graph Manager) to automate his workflow. He wrote python scripts that created and +submitted the DAG file to automate the process further. In total, this workflow took roughly 24 hours to produce one of 41 points on the energy surface +map. Before using DAGMan, each point took one week.

+ +

But Natzke didn’t stop there. In 2021, he attended the OSG All-Hands Meeting and learned about Pegasus, an HTCondor-integrated +workflow system that is offered by OSG’s Access Points. With support from OSG Facilitator and Pegasus developer, Mats Rynge, Natzke remodeled his workflow +using Pegasus to improve file management, transfers, and error handling. The additional automation that Natzke had written around his DAGMan workflow was +already provided by Pegasus, and it was enhanced. Natzke humbly jokes, “It’s written by computer scientists, rather than physicists masquerading as +computer scientists.” His resulting workflow only takes three commands and finishes in merely four hours, a forty-fold increase compared to Natzke’s +capabilities before OSG services.

+ +

With this new workflow, Natzke can expand upon what’s possible in terms of his research: “Every time I run this, I’m amazed at how much time and effort +I’ve saved, and just the pure automation and capacity that I have access to with OSG. It’s just mind-blowing to me.”

+ +

+ +
+ David Swanson +
David Swanson
+
+ +

The OSG David Swanson Award was established to honor our late colleague and chair of the OSG Consortium, David Swanson. David contributed to campus +research across the country by advancing distributed high-throughput computing (dHTC) and the OSG. Learn more about David’s legacy and past recipients of his namesake award.

+ +

Watch a video recording of Connor Natzke’s presentation at the OSG All-Hands Meeting 2022, and browse his slides.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/OSG-School.html b/preview-fall2024-info/OSG-School.html new file mode 100644 index 000000000..f5f32a207 --- /dev/null +++ b/preview-fall2024-info/OSG-School.html @@ -0,0 +1,371 @@ + + + + + + +OSG School mission: Don’t let computing be a barrier to research + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ OSG School mission: Don’t let computing be a barrier to research +

+

+ +

Most applicants to the annual OSG School share a common challenge: obstacles within their research that they would like to overcome. Answering this need, the OSG Consortium holds an annual weeklong School each summer for researchers and facilitators to expand their adoption of high throughput computing (HTC) methodologies. Instructors teach students through a combination of lectures and hands-on activities, starting out with the basics to accommodate all experience levels.

+ +

This year the 11th OSG School took place in August, with over 50 participants from across the nation as well as 5 attendees from Uganda and Mali, representing over 30 campuses or institutions and 35 research domains.

+ +

Online applications to attend the School open in March. Applicants are considered based on how large-scale computing could benefit their research. Over 100 applications are submitted each year, with around 60 being admitted. All of the participants’ travel and accommodation expenses are covered with funding from the Partnership to Advance Throughput Computing (PATh) NSF award.

+ +

The OSG School Director Tim Cartwright believes this year’s participants had as diverse computing experiences as they do backgrounds. “Some had never heard about large-scale computing until they saw the School announcements,” he said, “and others had been using it and recognized they were not getting as much out of it as they could.”

+ +

The obstacles researchers encountered that motivated their application to the School varied. Political Methodology Ph.D. candidate at the University of Wisconsin–Madison Saloni Bhogale attended this year’s School after applying HTC methods to her research for almost a year. Her research — which analyzes factors affecting access to justice in India — requires computation over millions of court cases and complaints. Bhogale found that her jobs kept abruptly halting throughout the year, and she was puzzled about how to resolve the problem and how the HTC services were operating. “There were too many hiccups I was constantly running into,” Bhogale said, “I felt like I was more confused than I should be.” When she saw a flier for the OSG School, she decided some extra help was in order.

+ +

Assistant Professor Xiaoyuan (Sue) Suo works in the Department of Math and Computer Science at Webster University and decided to attend the OSG School because she wanted to know more about HTC and its applications. “I never had systematic training,” she explained, “I felt training would be beneficial to me.”

+ +

Another participant at this year’s user school was Paulina Grekov, a doctoral student in Educational Psychology at the University of Wisconsin–Madison. She works in the quantitative methods program and runs complex statistical models of educational studies. Grekov originally tried to run computations without HTC, but it was taking a toll on her personal computer. “Some of the modeling I was doing, specifically statistical modeling, was just frying my computer. The battery was slowly breaking — it was a disaster — my computer was constantly on overdrive,” Grekov recalled.

+ +

During the School, participants were taught the basics of HTC. They were guided through step-by-step instructions and lectures, discussing everything from HTCondor job execution to troubleshooting. Each topic was accompanied by hands-on exercises that allowed attendees to experience the power of HTC. The School also delved into extra topics that could be useful to students, like workflows with DAGMan and GPUs.

+ +

Bhogale recalls that she appreciated the time participants were given to work on their own science applications and the ease of finding an expert to answer her questions. “I was running a pilot of the processes that I would want to do during the School — everyone was right there. So if I ran into an issue, I could just talk to someone,” she said.

+ +

On the last day of the School, the students had an opportunity to showcase what they learned during the week by presenting lightning talks on how they plan to apply HTC in their research. From tracing the evolution of binary black holes to estimating the effect of macroeconomic policies on the economy, ten participants presented ways in which their work could benefit from HTC.

+ +

Postdoctoral Ecologist Researcher Kristin Davis from New Mexico State University gave a lightning talk on how she would utilize HTC to run her large environmental datasets concerning the American Kestrel faster. Yujie Wan from the astronomy department at the University of Illinois Urbana-Champaign talked about how HTC could help her create astronomical maps using a submit file for each observation. Wan said she could then make a DAG file that combines her submit files and have all her maps in just two hours. Cyril Versoza, a graduate research assistant for the Pfeifer Lab at Arizona State University, discussed how the OSG would be a suitable system to implement a mutational spectrum pipeline for his work in evolutionary biology.

+ +

Lightning presentations like these open the door for researchers to hear from those outside of their fields. Participants also had the opportunity to hear from researchers who have already made progress in their research applying HTC. “I remember coming back almost every day and talking to my friends and saying there’s fascinating research happening,” Bhogale said.

+ +

The 2023 OSG School also marked the second year that the School collaborated with the African Centers of Excellence in Bioinformatics and Data-Intensive Science (ACE) Program facilitated by the National Institute of Allergies and Infectious Diseases (NIAID). ACE aims to bring large-scale computing to Africa. Joint NIAID and PATh support enabled five ACE students from Mali and Uganda as well as two staff members from NIAID to come to the School. “To work with the students and work with the staff from NIAID, it makes things feel more complete,” Cartwright said.

+ +

After the school ended, some of this year’s attendees provided advice for prospective OSG School students. Grekov recommended that those who attend come in with a goal and a research question in mind. She believes it would lead students to ask the right questions and focus on particular aspects. “Come with an idea you want to solve,” she said. Bhogale recommended any potential student who is concerned about the difficulty of the School to simply “go all in.” She hopes to see more of the social science crowd, like herself, incorporating HTC into their research.

+ +

The 2023 OSG School was one event among a variety of activities that have furthered the spread of large-scale computing in the research world. Tim Cartwright says the goal of the School goes beyond selective expansion, however. “The big picture is always focused on the democratization of access to computing for research,” he said. “We’re trying to make it available to everyone in higher education, regardless of the scale of their computational needs.”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/OSG-User-School-Concludes.html b/preview-fall2024-info/OSG-User-School-Concludes.html new file mode 100644 index 000000000..c749fc4a7 --- /dev/null +++ b/preview-fall2024-info/OSG-User-School-Concludes.html @@ -0,0 +1,360 @@ + + + + + + +OSG User School Concludes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ OSG User School Concludes +

+
+ Photo Collage of the User School +
Image highlights of the OSG User School provided by Jeff Peterson
+
+ +

Thank you to all those who attended the OSG User School 2022. Throughout the week, students +learned how to use HTC systems to run large-scale computing applications through lectures, +discussions, and hands-on activities.

+ +

All materials and lesson plans can be found on the +School’s website.

+ +
+
+

Thinking about applying next year?

+
+ We will begin taking applications near the beginning of 2023, + please check back to then to the OSG Website for more details! +
+
+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/OSPool-Cores.html b/preview-fall2024-info/OSPool-Cores.html new file mode 100644 index 000000000..a90b18c7d --- /dev/null +++ b/preview-fall2024-info/OSPool-Cores.html @@ -0,0 +1,356 @@ + + + + + + +OSPool's Growing Number of Cores Reaching New Levels + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ OSPool's Growing Number of Cores Reaching New Levels +

+

Campuses contributing to the capacity of the OSPool led to record breaking number of cores this December, 2022. On December 9th, the OSPool, which provides computing resources to researchers across the country, crossed the 70,000 cores line –– for the very first time.

+ +
+ Cores crossed the 70,000 line –– for the very first time. +
On December 9th total cores crossed the 70,000 line. The chart shows total cores in the OSPool rising from September (right) to December (left), 2022. The blue trend line is the 7 days moving average. +
+
+


+ +

It is no small feat to top over 70,000 cores in a single day. Over 50 campuses and organizations freely contributed their resources to the OSPool in support of Open Science. These campuses and organizations are dedicated to their mission to support research computing on their own campus and across the country.

+ +

Each year additional campuses and organizations add their contributions to the OSPool. Campuses newly adding computing capacity to the OSPool this year come in all sizes and include Cardiff University, Kansas State, New Mexico State University, University of South Dakota, University of Maine and more.

+ +

The contributions to the OSPool this year supported the research of 180 science projects and over 75 million computing jobs.

+ +

Campuses interested in contributing to Open Science may submit a NSF funded Campus Cyberinfrastructure Proposal or contact us at support@opensciencegrid.org.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Opotowsky.html b/preview-fall2024-info/Opotowsky.html new file mode 100644 index 000000000..4168113a1 --- /dev/null +++ b/preview-fall2024-info/Opotowsky.html @@ -0,0 +1,377 @@ + + + + + + +Expediting Nuclear Forensics and Security Using High Throughput Computing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Expediting Nuclear Forensics and Security Using High Throughput Computing +

+

Arrielle C. Opotowsky, a 2021 Ph.D. graduate from the University of Wisconsin-Madison’s Department of Engineering Physics, describes how she utilized high throughput computing to expedite nuclear forensics investigations.

+ +
+ Computer rendering of DNA. +
Photo by Dan Myers on Unsplash.
+
+ +
+ Arrielle C. Opotowsky, 2021 Ph.D. graduate from the University of Wisconsin-Madison's Department of Engineering Physics +
Arrielle C. Opotowsky, 2021 Ph.D. graduate from the University of Wisconsin-Madison's Department of Engineering Physics
+
+ +

“Each year, there can be from two to twenty incidents related to the malicious use of nuclear materials,” including theft, sabotage, illegal transfer, and even terrorism, Arrielle C. Opotowsky direly warned. Opotowsky, a 2021 Ph.D. graduate from the University of Wisconsin-Madison’s Department of Engineering Physics, immediately grabbed the audience’s attention at HTCondor Week 2022.

+ +

Opotowsky’s work focuses on nuclear forensics. Preventing nuclear terrorism is the primary concern of nuclear security, and nuclear forensics is “the response side to a nuclear event occurring,” Opotowsky explains. Typically in a nuclear forensics investigation, specific measurements need to be processed; unfortunately, some of these measurements can take months to process. Opotowsky calls this “slow measure” general mass spectrometry. Although this measurement can help point investigators in the right direction, they wouldn’t be able to do until long after the incident has occurred.

+ +

In trying to learn how she could expedite a nuclear forensics investigation, Opotowsky wanted to see if Gamma Spectroscopy, a “fast measurement”, could be the solution. This measure can potentially point investigators in the right direction, but in days rather than months.

+ +

To test whether this “fast measurement” could expedite a nuclear forensics investigation compared to a “slow measurement”, Opotowsky created a workflow and compared the two measurements.

+ +

While Opotowsky was a graduate student working on this problem, the workflow she created was running on her personal computer and suddenly stopped working. In a panic, she went to her advisor, Paul Wilson, for help, and he pointed her to the UW-Madison Center for High Throughput Computing (CHTC).

+ +

CHTC Research Computing Facilitators came to her aid, and “the support was phenomenal – there was a one-on-one introduction and a tutorial and incredible help via emails and office hours…I had a ton of help along the way.”

+ +

She needed capacity from the CHTC because she used a machine-learning workflow and 10s of case variations. She had a relatively large training database because she used several algorithms and hyperparameter variations and wanted to predict several labels. The sheer magnitude of these training databases is the leading reason why Opotowsky needed the services of the CHTC.

+ +

She used two computation categories, the second of which required a specific capability offered by the CHTC - the ability to scale out a large problem into an ensemble of smaller jobs running in parallel. With 500,000 total entries in the databases and a limit of 10,000 jobs per case submission, Opotowsky split the computations into fifty calculations per job. This method resulted in lower memory needs per job, each taking only a few minutes to run.

+ +

“I don’t think my research would have been possible” without High Throughput Computing (HTC), Opotowsky noted as she reflected on how the CHTC impacted her research. “The main component of my research driving my need [for the CHTC] was the size of my database. It would’ve had to be smaller, have fewer parameter variations, and that ‘fast’ measurement was like a ‘real-world’ scenario; I wouldn’t have been able to have that.”

+ +

Little did Opotowsky know that her experience using HTC would also benefit her professionally. Having HTC experience has helped Opotowsky in job interviews and securing her current position in nuclear security. As a nuclear methods software engineer, “knowledge of designing code and interacting with job submission systems is something I use all the time,” she comments, “[learning HTC] was a wonderful experience to gain” from both a researcher and professional point of view.

+ +

+ +

Watch a video recording of Arrielle C. Opotowsky’s talk at HTCondor Week 2022, and browse her slides.

+ + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/PATh-Facility.html b/preview-fall2024-info/PATh-Facility.html new file mode 100644 index 000000000..58172adbc --- /dev/null +++ b/preview-fall2024-info/PATh-Facility.html @@ -0,0 +1,380 @@ + + + + + + +Introducing the PATh Facility: A Unique Distributed High Throughput Computing Service + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Introducing the PATh Facility: A Unique Distributed High Throughput Computing Service +

+

Researchers can now request credits on the PATh Facility, the PATh project’s new service intended for distributed high throughput computing workflows supporting NSF science.

+ +
+ Worker nodes +
PATh facility worker nodes destined for Syracuse University Research Computing
+
+ +

With the launch of the new PATh Facility, the PATh project will soon begin providing the partnership’s first dedicated High Throughput Computing (HTC) capacity directly to researchers with NSF-funded projects. This milestone opens the door to longer runtimes, larger jobs, and greater customization for researchers. PATh is a partnership between the OSG Consortium and the University of Wisconsin-Madison’s Center for High Throughput Computing (CHTC). Jointly, the two entities have provided distributed high-throughput computing services and technologies to the S&E community for several decades.

+ +
+ Map of PATh Facility sites +
The sites that make up the PATh Facility
+
+ +

The National Science Foundation (NSF) awards credits to access the PATh Facility, making it well-integrated in the nation’s cyberinfrastructure. Researchers can request computing credits associated with their NSF award, which they ‘cash in’ when they run HTC workloads using the PATh Facility’s services. There are currently two mechanisms to request such credit: researchers can request PATh credits within new proposals, +or primary investigators (PIs) with existing awards can email their program officer to add to their award. In both cases, researchers outline the kind of HTC capacity they need; PATh’s experts are available to help researchers estimate the different requirements of their HTC workloads.

+ +

Just like the partnership, the PATh Facility is distributed and will eventually include computational resources distributed over six different sites across the nation: the Center for High Throughput Computing at the University of Wisconsin-Madison, the Holland Computing Center at the University of Nebraska-Lincoln, Syracuse University’s Research Computing group, the San Diego Supercomputing Center at University of California San Diego, the Texas Advanced Computing Center at the University of Texas at Austin, and Florida International University’s AMPATH network in Miami. This uniquely distributed resource is intended to handle HTC workloads, all for the support and advancement of NSF-funded open science. With access to the PATh Facility, researchers will have approximately 35,000 modern cores and up to 44 A100 GPUs at their fingertips.

+ +

While the PATh credit ecosystem is still growing, any PATh Facility capacity not used for credit will be available to the Open Science Pool (OSPool) to benefit all open science under a Fair-Share allocation policy. In fact, for researchers familiar with the OSPool, running HTC workloads on the PATh Facility should feel second-nature. Like the OSPool, the PATh Facility is nationally-spanning, geographically distributed, and ideal for HTC workloads. But while resources on the OSPool belong to a diverse range of campuses and organizations that have generously donated their resources to open science, the allocation of capacity in the PATh Facility is managed by the PATh Project itself.

+ +

This distinction enables longer runtimes and larger jobs otherwise infeasible on the OSPool opportunistic resources. This higher degree of control also empowers the PATh team to provide researchers with a more customized level of support. Brian Bockelman, Co-PI of the PATh Project, notes: “With the PATh Facility, we can work with researchers to come up with more bespoke solutions. Whether it’s the configuration of the hardware, the runtime, IPv6 connectivity, or whatever it is that’s not working out –– we have far more ability to change it.”

+ +

Initial facility hardware is ready for immediate use by researchers, and the remainder of the hardware is enroute to its future home. Wisconsin serves as a central hub for testing and development, and PATh Facility resources are tested there before being shipped off to their final destinations. For example, Nebraska’s share of the PATh Facility has already been shipped and is running opportunistic backfill jobs. The lights are beginning to turn on, and as Bockelman likes to say, “we’re turning electrons into science.”

+ +

However, the effort required to make the PATh Facility possible goes beyond shipping hardware and plugging in cables. To truly turn electrons into science, +creativity and problem-solving will be instrumental. While the NSF is trying out new, innovative ways to award credits, PATh is responsible for credit +management and tracking. This task has blossomed into an internal service development project –– the PATh development team is working on ensuring that +the HTCondor Software Suite (HTCSS) can effectively track credit usage across the facility. Additionally, containers are being used as an enabling technology to provide uniform software environments across PATh Facility resources. Kubernetes, an open-source system for automating management of containerized applications, will allow PATh staff to maintain containers not just individually, but site-wide.

+ +

Marking a monumental moment for the PATh Project, the PATh Facility provides dedicated resources directly to researchers for the first time ever. The project has always been focused on advancing and democratizing access to HTC computing at all scales, and the launch of the PATh Facility makes this goal more attainable than ever. Perhaps Bockelman characterizes the facility’s impact best: “I think the unique part is the distributed aspect and the focus on high throughput computing. It extends that vision of HTC as a mechanism that can make an outsized impact on how researchers leverage computing capacity to advance their science.”

+ +

To hear more about the PATh Facility, listen to Brian Bockelman’s talk from the 2022 OSG All-Hands Meeting in March:

+ + + +

+ +

Request credits for the PATh Facility by contacting NSF. PATh Research Computing Facilitators are here to help –– please reach out to mailto:credit-accounts@path-cc.io with questions about PATh resources, using HTC, or estimating credit needs. +Learn more about the PATh Facility, credit accounts, and view the 2022 Charge Listing.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/README.md b/preview-fall2024-info/README.md new file mode 100644 index 000000000..7cef552f7 --- /dev/null +++ b/preview-fall2024-info/README.md @@ -0,0 +1,159 @@ +# CHTC Website + +Source repository for CHTC website + +![Build Status](https://github.com/CHTC/chtc-website-source/workflows/Build%2Fdeploy%20web%20pages/badge.svg) + +## Research Computing Guides Guide + +[View Research Computing Guides Guide Here](./_uw-research-computing/README.md) + +## How to Edit + +### Setup (one time, or anytime you want to start fresh) + +1. "Fork" the Github source repository (look for the fork button at the +top right of this page: https://github.com/CHTC/chtc-website-source). +1. Clone the source repository to your own computer. + + git clone https://github.com/CHTC/chtc-website-source +1. `cd` into the `chtc-website-source` folder and add your Github fork to the list of +remotes: + + git remote add mycopy https://github.com/myusername/chtc-website-source + +### Submit a Pull Request (each major change) + +1. Create a branch for new work and switch to it: + + git branch feature-name + git checkout feature-name + Your changes will now be saved in this branch. +1. Make changes to files and add/commit them, following the usual git add/commit workflow. You +can test your changes at any time by following the [instructions below](#testing-changes-locally). +1. Once you're satisfied with your changes and have committed them, push the branch +to **your fork**: + + git push mycopy feature-name +1. On Github, go to your fork of the repo. There will likely be a message prompting you +to open and submit a pull request. + +If you need to update the pull requests, make the necessary changes on your computer, +commit them, and then push the same branch to your fork. + +### Update your copy + +To update your local copy of the source repository, make sure that you're on the `master` +branch; then pull from the original CHTC Github repository: + + git checkout master + git pull origin master + +## Testing Changes on Remote + +:exclamation: This is a new feature! + +To test changes on a publicly viewable development location do the following steps. + +- Populate a branch with the changes you would like to preview and prepend the name of the branch with 'preview-' + - For this example we will call our branch 'preview-helloworld' +- Push the branch to the remote repository at 'https://github.com/CHTC/chtc-website-source.git' +- View the changes at: + - https://chtc.github.io/web-preview// + - In this demo we would look in https://chtc.github.io/web-preview/preview-helloworld/ + +**You can continue to push commits to this branch and have them populate on the preview at this point!** + +- When you are satisfied with these changes you can create a PR to merge into master +- Delete the preview branch and Github will take care of the garbage collection! + +## Testing Changes Locally + +### Quickstart (Unix Only) + +1. Install Docker if you don't already have it on your computer. +2. Open a terminal and `cd` to your local copy of the `chtc-website-source` repository +3. Run the `./edit.sh` script. +4. The website should appear at [http://localhost:8080](http://localhost:8080). Note that this system is missing the secret sauce of our setup that converts +the pages to an `.shtml` file ending, so links won't work but just typing in the name of a page into the address bar (with no +extension) will. + +### Run via Ruby + +```shell +bundle install +bundle exec jekyll serve --watch -p +``` + +### Run Docker Manually + +At the website root: + +``` +docker run -it -p 8001:8000 -v $PWD:/app -w /app ruby:2.7 /bin/bash +``` + +This will utilize the latest Jekyll version and map port `8000` to your host. Within the container, a small HTTP server can be started with the following command: + +``` +bundle install +bundle exec jekyll serve --watch --config _config.yml -H 0.0.0.0 -P 8000 +``` + +## Formatting + +### Markdown Reference and Style + +This is a useful reference for most common markdown features: https://daringfireball.net/projects/markdown/ + +To format code blocks, we have the following special formatting tags: + + ``` + Pre-formatted text / code goes here + ``` + {:.sub} + +`.sub` will generate a "submit file" styled block; `.term` will create a terminal style, and `.file` can +be used for any generic text file. + +We will be using the pound sign for headers, not the `==` or `--` notation. + +For internal links (to a header inside the document), use this syntax: +* header is written as + ``` + ## A. Sample Header + ``` +* the internal link will look like this: + ``` + [link to header A](#a-sample-header) + ``` + +### Converting HTML to Markdown + +Right now, most of our pages are written in html and have a `.shtml` extension. We are +gradually converting them to be formatted with markdown. To easily convert a page, you +can install and use the `pandoc` converter: + + pandoc hello.shtml --from html --to markdown > hello.md + +You'll still want to go through and double check / clean up the text, but that's a good starting point. Once the +document is converted from markdown to html, the file extension should be `.md` instead. If you use the +command above, this means you can just delete the `.shtml` version of the file and commit the new `.md` one. + + +### Adding "Copy Code" Button to code blocks in guides + +Add .copy to the class and you will have a small button in the top right corner of your code blocks that +when clicked, will copy all of the code inside of the block. + +### Adding Software Overview Guide + +When creating a new Software Guide format the frontmatter like this: + +software_icon: /uw-research-computing/guide-icons/miniconda-icon.png +software: Miniconda +excerpt_separator: <!--more--> + +Software Icon and software are how the guides are connected to the Software Overview page. The +excerpt_seperator must be <!--more--> and can be placed anywhere in a document and all text +above it will be put in the excerpt. \ No newline at end of file diff --git a/preview-fall2024-info/Record.html b/preview-fall2024-info/Record.html new file mode 100644 index 000000000..6843c9727 --- /dev/null +++ b/preview-fall2024-info/Record.html @@ -0,0 +1,357 @@ + + + + + + +OSPool Hits Record Number of Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ OSPool Hits Record Number of Jobs +

+

The OSPool processed over 2.6 million jobs during the week of April 14th - 17th this year and ran over half a million jobs on two separate days that week.

+ +

OSPool users and collaborators are smashing records. In April, researchers submitted a record-breaking number of jobs during the week of April 14th – 2.6 million, to be exact. The OSPool also processed over 500k jobs on two separate days during that same week, another record!

+ +

Nearly 60 projects from different fields contributed to the number of jobs processed during this record-breaking week, including these with substantial usage:

+
    +
  • BioMedInfo: University of Pittsburgh PI Erik Wright of the Wright Lab, develops and applies software tools to perform large-scale biomedical informatics on microbial genome sequence data.
  • +
  • Michigan_Riles: University of Michigan PI Keith Riles leads the Michigan Gravitational Wave Group, researching continuous gravitational waves.
  • +
  • chemml: PI Olexandr Isayev from Carnegie-Mellon University, whose group develops machine learning (ML) models for molecular simulations.
  • +
  • CompBinFormMod: Researcher PI Geoffrey Hutchison from the University of Pittsburgh, looking at data-driven ML as surrogates for quantum chemical methods to improve existing processes and next-generation atomistic force fields.
  • +
+ +

Any researcher tackling a problem that can run as many self-contained jobs can harness the capacity of the OSPool. If you have any questions about the Open Science Pool or how to create an account, please visit the FAQ page on the OSG Help Desk website. Descriptions of active OSG projects can be found here.

+ +

+ +

Learn more about the Open Science Pool.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Resilience.html b/preview-fall2024-info/Resilience.html new file mode 100644 index 000000000..b2a5e1de2 --- /dev/null +++ b/preview-fall2024-info/Resilience.html @@ -0,0 +1,348 @@ + + + + + + +Resilience: How COVID-19 challenged the scientific world + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Resilience: How COVID-19 challenged the scientific world +

+

In the face of the pandemic, scientists needed to adapt. +The article below by the Morgridge Institute for Research provides a thoughtful look into how researchers have pivoted in these challenging times to come together and contribute meaningfully in the global fight against COVID-19. +One of these pivots occurred in the spring of 2020, when Morgridge and the CHTC issued a call for projects investigating COVID-19, resulting in five major collaborations that leveraged the power of HTC.

+ +

For a closer look into how the CHTC and researchers have learned, grown, and adapted during the pandemic, read the full Morgridge article:

+ +

Resilience: How COVID-19 challenged the scientific world

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Science-Gateway.html b/preview-fall2024-info/Science-Gateway.html new file mode 100644 index 000000000..031282a14 --- /dev/null +++ b/preview-fall2024-info/Science-Gateway.html @@ -0,0 +1,385 @@ + + + + + + +OSG fuels a student-developed computing platform to advance RNA nanomachines + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ OSG fuels a student-developed computing platform to advance RNA nanomachines +

+

How undergraduates at the University of Nebraska-Lincoln developed a science gateway that enables researchers to build RNA nanomachines for therapeutic, engineering, and basic science applications.

+ +

UNL students on graduation

+ +

The UNL students involved in the capstone project, on graduation day. Order from left to right: Evan, Josh, Dan, Daniel, and Conner.

+ +

When a science gateway built by a group of undergraduate students is deployed this fall, it will open the door for researchers to leverage the capabilities of advanced software and the capacity of the Open Science Pool (OSPool). Working under the guidance of researcher Joe Yesselman and longtime OSG contributor Derek Weitzel, the students united advanced simulation technology and a national, open source of high throughput computing capacity –– all within an intuitive, web-accessible science gateway.

+ +

Joe, a biochemist, has been fascinated by computers and mathematical languages for as long as he can remember. Reminiscing to when he first adopted computer programming and coding as a hobby back in high school, he reflects: “English was difficult for me to learn, but for some reason mathematical languages make a lot of sense to me.”

+ +

Today, he is an Assistant Professor of Chemistry at the University of Nebraska-Lincoln (UNL), and his affinity for computer science hasn’t waned. Leading the Yesselman Lab, he relies on the interplay between computation and experimentation to study the unique structural properties of RNA.

+ +

In September of 2020, Joe began collaborating with UNL’s Holland Computing Center (HCC) and the OSG to accelerate RNA nanostructure research everywhere by making his lab’s RNAMake software suite accessible to other scientists through a web portal. RNAMake enables researchers to build nanomachines for therapeutic, engineering, and basic science applications by simulating the 3D design of RNA structures.

+ +

Five UNL undergraduate students undertook this project as part of a year-long computer science capstone experience. By the end of the academic year, the students developed a science gateway –– an intuitive web-accessible interface that makes RNAMake easier and faster to use. Once it’s deployed this fall, the science gateway will put the Yesselman Lab’s advanced software and the shared computing resources of the OSPool into the hands of researchers, all through a mouse and keyboard.

+ +

The gateway’s workflow is efficient and simple. Researchers upload their input files, set a few parameters, and click the submit button –– no command lines necessary. Short simulations will take merely a few seconds, while complex simulations can last up to an hour. Once the job is completed, an email appears in their inbox, prompting them to analyze and download the resulting RNA nanostructures through the gateway.

+ +

This was no small feat. Collaboration among several organizations brought this seemingly simple final product to fruition.

+ +

To begin the process, the students received a number of startup allocations from the Extreme Science and Engineering Discovery Environment (XSEDE). When it was time to build the application, they used Apache Airavata to power the science gateway and they extended this underlying software in some notable ways. In order to provide researchers with more intuitive results, they implemented a table viewer and a 3D molecule visualization tool. Additionally, they added the ability for Airavata to submit directly to HTCondor, making it possible for simulations to be distributed across the resources offered by the OSPool.

+ +

The simulations themselves are small, short, and can be run independently. Furthermore, many of these simulations are needed in order to discover the right RNA nanostructures for each researcher’s purpose. Combined, these qualities make the jobs a perfect candidate for the OSPool’s distributed high throughput computing capabilities, enabled by computing capacity from campuses across the country.

+ +

Commenting on the incorporation of OSG resources, project sponsor Derek Weitzel explains how the gateway “not only makes it easier to use RNAMake, but it also distributes the work on the OSPool so that researchers can run more RNAMake simulations at the same time.” If the scientific process is like a long road trip, using high throughput computing isn’t even like taking the highway –– it’s like skipping the road entirely and taking to the skies in a high-speed jet.

+ +

The science gateway has immense potential to transform the way in which RNA nanostructure research is conducted, and the collaboration required to build it has already made lasting impacts on those involved. The group of undergraduate students are, in fact, no longer undergraduates. The team’s student development manager, Daniel Shchur, is now a software design engineer at Communication System Solutions in Lincoln, Nebraska. Reflecting on the capstone project, he remarks, “I think the most useful thing that my teammates and I learned was just being able to collaborate with outside people. It was definitely something that wasn’t taught in any of our classes and I think that was the most invaluable thing we learned.”

+ +

But learning isn’t just exclusive to students. Joe notes that he gained some unexpected knowledge from the students and Derek. “I learned a ton about software development, which I’m actually using in my lab,” he explains. “It’s very interesting how people can be so siloed. Something that’s so obvious, almost trivial for Derek is something that I don’t even know about because I don’t have that expertise. I loved that collaboration and I loved hearing his advice.”

+ +

In the end, this collaboration vastly improved the accessibility of RNAMake, Joe’s software suite and the focus of the science gateway. Perhaps he explains it best with an analogy: ”RNAMake is basically a set of 500 different LEGO® pieces. Using enthusiastic gestures, Joe continues by offering an example: “Suppose you want to build something from this palm to this palm, in three-dimensional space. It [RNAMake] will find a set of LEGO® pieces that will fit there.”

+ +

Example of how RNAMake works

+ +

A demonstration of how RNAMake’s design algorithm works. Credit: Yesselman, J.D., Eiler, D., Carlson, E.D. et al. Computational design of three-dimensional RNA structure and function. Nat. Nanotechnol. 14, 866–873 (2019). https://doi.org/10.1038/s41565-019-0517-8

+ +

Since the possible combinations of these LEGO® pieces of RNA are endless, this tool saves users the painstaking work of predicting the structures manually. However, the installation and use of RNAMake requires researchers to have a large amount of command line knowledge –– something that the average biochemist might not have.

+ +

Ultimately, the science gateway makes this previously complicated software suddenly more accessible, allowing researchers to easily, quickly, and accurately design RNA nanostructures.

+ +

These structures are the basis for RNA nanomachines, which have a vast range of applications in society. Whether it be silencing RNAs that are used in clinical trials to cut cancer genes, or RNA biosensors that effectively bind to small molecules in order to detect contaminants even at low concentrations –– the RNAMake science gateway can help researchers design and build these structures.

+ +

Perhaps the most relevant and pressing applications are RNA-based vaccines like Moderna and Pfizer. These vaccines continue to be shipped across cities, countries, and continents to reach people in need, and it’s crucial that they remain in a stable form throughout their journey. Insight from RNA nanostructures can help ensure that these long strands of mRNA maintain stability so that they can eventually make their way into our cells.

+ +

Looking to the future, a second science gateway capstone project is already being planned for next year at UNL. Although it’s currently unclear what field of research it will serve, there’s no doubt that this project will foster collaboration, empower students and researchers, and impact society –– all through a few strokes on a keyboard.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Showcase.html b/preview-fall2024-info/Showcase.html new file mode 100644 index 000000000..7c2547570 --- /dev/null +++ b/preview-fall2024-info/Showcase.html @@ -0,0 +1,360 @@ + + + + + + +Transforming research with high throughput computing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Transforming research with high throughput computing +

+

During the OSG Virtual School Showcase, three different researchers shared how high throughput computing has made lasting impacts on their work.

+ +

OSG Virtual School 2021 Logo

+ +

Over 40 researchers and campus research computing staff were selected to attend this year’s OSG Virtual School, all united by a shared desire to learn how high throughput computing can advance their work. During the first two weeks of August, school participants were busy attending lectures, watching demonstrations, and completing hands-on exercises; but on Wednesday, August 11, participants had the chance to hear from researchers who have successfully used high throughput computing (HTC) to transform their work. Year after year, this event –– the HTC Showcase –– is one highlight of the experience for many User School participants. This year, three different researchers in the fields of structural biology, psychology, and particle physics shared how HTC impacted their work. Read the articles below to learn about their stories.

+ +

Scaling virtual screening to ultra-large virtual chemical libraries – Spencer Ericksen, Carbone Cancer Center, University of Wisconsin-Madison

+ +

Using HTC for a simulation study on cross-validation for model evaluation in psychological science – Hannah Moshontz, Department of Psychology, University of Wisconsin-Madison

+ +

Antimatter: Using HTC to study very rare processes – Anirvan Shukla, Department of Physics, University of Hawai’i Mānoa

+ +

Collectively, these testimonies demonstrate how high throughput computing can transform research. In a few years, the students of this year’s User School might be the next Spencer, Hannah, and Anirvan, representing the new generation of researchers empowered by high throughput computing.

+ +

+ +

Visit the materials page to browse slide decks, exercises, and recordings of public lectures from OSG Virtual School 2021.

+ +

Established in 2010, OSG School, typically held each summer at the University of Wisconsin–Madison, is an annual education event for researchers who want to learn how to use distributed high throughput computing methods and tools. We hope to return to an in-person User School in 2022.

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Spencer-Showcase.html b/preview-fall2024-info/Spencer-Showcase.html new file mode 100644 index 000000000..94541f151 --- /dev/null +++ b/preview-fall2024-info/Spencer-Showcase.html @@ -0,0 +1,360 @@ + + + + + + +Scaling virtual screening to ultra-large virtual chemical libraries + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Scaling virtual screening to ultra-large virtual chemical libraries +

+
+ Liquid Handler +
Image by the National Cancer Institute on Unsplash
+
+ +

Kicking off the OSG User School Showcase, Spencer Ericksen, a researcher at the University of Wisconsin-Madison’s Carbone Cancer Center, described how high throughput computing (HTC) has made his work in early-stage drug discovery infinitely more scalable. Spencer works within the Small Molecule Screening Facility, where he partners with researchers across campus to search for small molecules that might bind to and affect the behavior of proteins they study. By using a computational approach, Spencer can help a researcher inexpensively screen many more candidates than possible through traditional laboratory approaches. With as many as 1033 possible molecules, the best binders from computational ‘docking’ might even be investigated as potential drug candidates.

+ +

With traditional laboratory approaches, researchers might test just 100,000 individual compounds using liquid handlers like the one pictured above. However, this approach is expensive, imposing limits both on the number of molecules tested and the number of researchers able to pursue potential binders of the proteins they study.

+ +

Spencer’s use of HTC allows him to take a different approach with virtual screening. By using computational models and machine learning techniques, he can inexpensively filter the masses of molecules and predict which ones will have the highest potential to interfere with a certain biological process. This reduces the time and money spent in the lab by selecting a subset of binding candidates that would be best to study experimentally.

+ +

“HTC is a fabulous resource for virtual screening,” Spencer attests. “We can now effectively validate, develop, and test virtual screening models, and scale to ever-increasing ultra-large virtual chemical libraries.” Today, Spencer is able to screen approximately 3.5 million molecules each day thanks to HTC.

+ +

There are a variety of virtual screening programs, but none of them are all that reliable individually. Instead of opting for a single program, Spencer runs several programs on the Open Science Pool (OSPool) and calculates a consensus score for each potential binder. “It’s a pretty old idea, basically like garnering wisdom from a council of fools,” Spencer explains. “Each program is a weak discriminator, but they do it in different ways. When we combine them, we get a positive effect that’s much better than the individual programs. Since we have the throughput, why not run them all?”

+ +

And there’s nothing stopping the Small Molecule Screening Facility from doing just that. Spencer’s jobs are independent from each other, making them “pleasantly parallelizable” on the OSPool’s distributed resources. To maximize throughput, Spencer splits the compound libraries that he’s analyzing into small increments that will run in approximately 2 hours, reducing the chances of a job being evicted and using the OSPool more efficiently.

+ +

+ +

This article is part of a series of articles from the 2021 OSG Virtual School Showcase. OSG School is an annual education event for researchers who want to learn how to use distributed high throughput computing methods and tools. The Showcase, which features researchers sharing how HTC has impacted their work, is a highlight of the school each year.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Technology-Refresh.html b/preview-fall2024-info/Technology-Refresh.html new file mode 100644 index 000000000..f74df429c --- /dev/null +++ b/preview-fall2024-info/Technology-Refresh.html @@ -0,0 +1,385 @@ + + + + + + +Technology Refresh + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Technology Refresh +

+

Thanks to the generous support of the Office of the Vice Chancellor for Research and Graduate Education with funding from the Wisconsin Alumni Research Foundation, CHTC has been able to execute a major refresh of hardware. This provided 207 new servers for our systems, representing over 40,000 batch slots of computing capacity. Most of this hardware arrived over the summer and we have started adding them to CHTC systems.

+ +

Continue reading to learn more about the types of servers we are adding and how to access them.

+ +

HTC System

+ +

On the HTC system, we are adding 167 servers of new capacity, representing 36,352 job slots and 40 high-end GPU cards.

+ +

The new servers will be running CentOS Linux 8 – CHTC users should see our website page about how to test your jobs and +take advantage of servers running CentOS Stream 8. Details on user actions needed for this change can be found on the +OS transition page.

+ +

New Server specs

+ +
PowerEdge ​R6525
+ +
    +
  • 157 servers with 128 cores / 256 job slots using the AMD Epyc 7763 processor
  • +
  • 512 GB RAM per server
  • +
+ +
PowerEdge XE8545
+ +
    +
  • 10 servers, each with four A100 SXM4 80GB GPU cards
  • +
  • 128 cores per server
  • +
  • 512GB RAM per server
  • +
+ +

HPC Cluster

+ +

For the HPC cluster, we are adding 40 servers representing 5,120 cores. These servers have arrived but have not yet been added to the HPC cluster. In most cases, when we add them, they will form a new partition and displace some of our oldest servers, currently in the “univ2” partition.

+ +

New server specs:

+ +
Dell Poweredge R6525
+ +
    +
  • 128 cores using the AMD Epyc 7763 processor
  • +
  • 512GB of memory
  • +
+ +

Users interested in early access to AMD processors before all 40 servers are installed should contact CHTC at chtc@cs.wisc.edu.

+ +

We have also obtained hardware and network infrastructure to completely replace the HPC cluster’s underlying file system and infiniband network fabric. We will be sending more updates to the chtc-users mailing list as we schedule specific transition dates for these major cluster components.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Throughput-Computing-2023.html b/preview-fall2024-info/Throughput-Computing-2023.html new file mode 100644 index 000000000..d2e3f8b65 --- /dev/null +++ b/preview-fall2024-info/Throughput-Computing-2023.html @@ -0,0 +1,370 @@ + + + + + + +Save the dates for Throughput Computing 2023 - a joint HTCondor/OSG event + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Save the dates for Throughput Computing 2023 - a joint HTCondor/OSG event +

+

Don't miss these in-person learning opportunities in beautiful Madison, Wisconsin!

+ +

Save the dates for Throughput Computing 2023! For the first time, HTCondor Week and the OSG All-Hands Meeting will join together as a single, integrated event from July 10–14 to be held at the University of Wisconsin–Madison’s Fluno Center. Throughput Computing 2023 is sponsored by the OSG Consortium, the HTCondor team, and the UW-Madison Center for High Throughput Computing.

+ +

This will primarily be an in-person event, but remote participation (via Zoom) for the many plenary events will also be offered. Required registration for both components will open in March 2023.

+ +

If you register for the in-person event at the University of Wisconsin–Madison, you can attend plenary and non-plenary sessions, mingle with colleagues, and have planned or ad hoc meetings. Evening events are also planned throughout the week.

+ +

All the topics typically covered by HTCondor Week and the OSG All-Hands Meeting will be included:

+ +
    +
  • Science Enabled by the OSPool and the HTCondor Software Suite (HTCSS)
  • +
  • OSG Technology
  • +
  • HTCSS Technology
  • +
  • HTCSS and OSG Tutorials
  • +
  • State of the OSG
  • +
  • Campus Services and Perspectives
  • +
+ +

The U.S. ATLAS and U.S. CMS high-energy physics projects are also planning parallel OSG-related topics during the event on Wednesday, July 12. (For other attendees, Wedneday’s schedule will also include parallel HTCondor and OSG tutorials and OSG Collaborations sessions.)

+ +

For questions, please contact us at events@osg-htc.org or htcondor-week@cs.wisc.edu.

+ +

View last year’s schedules for

+ + + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Using-HTCondor-For-Large-File-Transfer.html b/preview-fall2024-info/Using-HTCondor-For-Large-File-Transfer.html new file mode 100644 index 000000000..f4d8b6ffb --- /dev/null +++ b/preview-fall2024-info/Using-HTCondor-For-Large-File-Transfer.html @@ -0,0 +1,366 @@ + + + + + + +How to Transfer 460 Terabytes? A File Transfer Case Study + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ How to Transfer 460 Terabytes? A File Transfer Case Study +

+

When Greg Daues at the National Center for Supercomputing Applications (NCSA) needed to transfer 460 Terabytes of NCSA files from the National Institute of Nuclear and Particle Physics (IN2P3) in Lyon, France to Urbana, Illinois, for a project they were working with FNAL, CC-IN2P3 and the Rubin Data Production team, he turned to the HTCondor High Throughput system, not to run computationally intensive jobs, as many do, but to manage the hundreds of thousands of I/O bound transfers.

+ +

The Data

+ +

IN2P3 made the data available via https, but the number of files and their total size made the management of the transfer an engineering challenge. There were two kinds of files to be transferred, with 3.5 million files with a median size of roughly 100 Mb, and another 3.5 million smaller files, with a median size of about 10 megabytes. Total transfer size is roughly 460 Terabytes.

+ +

The Requirements

+ +

The requirement for this transfer was to reliably transfer all the files in a reasonably performant way, minimizing the human time to set up, run, and manage the transfer. Note the noni-goal of optimizing for the fastest possible transfer time – reliability and minimizing the human effort take priority here. Reliability, in this context implies:

+ +

Failed transfers are identified and re-run (with millions of files, a failed transfer is almost inevitable) +Every file will get transferred +The operation will not overload the sender, the receiver, or any network in between

+ +

The Inspiration

+ +

Daues presented unrelated work at the 2017 HTCondor Week workshop. At this workshop, he heard about the work of Phillip Papodopolous at UCSD, and his international Data Placement Lab (iDPL). iDPL used HTCondor jobs solely for transferring data between international sites. Daues re-used and adapted some of these ideas for NCSA’s needs.

+ +

The Solution

+

First, Daues installed a “mini-condor”, an HTCondor pool entirely on one machine, with an access point and eight execution slots on that same machine. Then, given a single large file containing the names of all the files to transfer, he ran the Unix split command to create separate files with either 50 of the larger files, or 200 of the smaller files. Finally, using the HTCondor submit file command

+ +

Queue filename matching files *.txt

+ +

the condor_submit command creates one job per split file, which runs the wget2 command and passes the list of filenames to wget2. The HTCondor access point can handle tens of thousands of idle jobs, and will schedule these jobs on the eight execution slots. While more slots would yield more overlapped i/o, eight slots were chosen to throttle the total network bandwidth used. Over the course of days, this machine with eight slots maintained roughly 600 MB/seconds.

+ +

(Note that the machine running HTCondor did not crash during this run, but if it had, all the jobs, after submission, were stored reliably on the local disk, and at such time as the crashed machine restarted, and the init program restarted the HTCondor system, all interrupted jobs would be restarted, and the process would continue without human intervention.)

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/Vagrantfile b/preview-fall2024-info/Vagrantfile new file mode 100644 index 000000000..308f54f89 --- /dev/null +++ b/preview-fall2024-info/Vagrantfile @@ -0,0 +1,102 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Comment this out if not using a host-only network +class VagrantPlugins::ProviderVirtualBox::Action::Network + def dhcp_server_matches_config?(dhcp_server, config) + true + end +end + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + config.vm.define "chtcsite" + + config.vm.hostname = "chtcsite.vm" + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.box = "ubuntu/bionic64" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + config.vm.synced_folder ".", "/chtc-website-source", type: "rsync", + rsync__args: ["--verbose", "--archive", "--delete"] + + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + config.vm.provision "shell", inline: <<-SHELL + apt-get update + apt-get install -y make gcc g++ + echo 'export NO_PUSH=1' > /etc/profile.d/NO_PUSH.sh + snap install --classic ruby + gem install bundle + cd /chtc-website-source + runuser -u vagrant -- bundle install + runuser -u vagrant -- git config --global user.name "Vagrant" + runuser -u vagrant -- git config --global user.email "vagrant@chtcsite.vm" + echo + echo + echo =============================================================================== + echo "Setup complete!" + echo + echo "The repo checkout is in /chtc-website-source." + echo + echo "Run 'script/cibuild to build the pages, and script/cideploy to deploy them." + echo "(cideploy will run all deploy steps except for the actual push.)" + echo "" + echo "Set BRANCH and TARGET_REPO to test deploying to a different branch" + echo "or GitHub repo." + echo + echo "If you make changes to files outside of the image, run 'vagrant reload'" + echo "to restart the VM with these new changes." + SHELL + +end diff --git a/preview-fall2024-info/Wilcots.html b/preview-fall2024-info/Wilcots.html new file mode 100644 index 000000000..ec70af5e4 --- /dev/null +++ b/preview-fall2024-info/Wilcots.html @@ -0,0 +1,439 @@ + + + + + + +The Future of Radio Astronomy Using High Throughput Computing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ The Future of Radio Astronomy Using High Throughput Computing +

+

Eric Wilcots, UW-Madison dean of the College of Letters & Science and the Mary C. Jacoby Professor of Astronomy, dazzles the HTCondor Week 2022 audience.

+ +
+ Image of the black hole in the center of our Milky Way galaxy. +
Image of the black hole in the center of our Milky Way galaxy.
+
+ +
+ Eric Wilcots +
Eric Wilcots
+
+ +

“My job here is to…inspire you all with a sense of the discoveries to come that will need to be enabled by” high throughput computing (HTC), Eric Wilcots opened his keynote for HTCondor Week 2022. Wilcots is the UW-Madison dean of the College of Letters & Science and the Mary C. Jacoby Professor of Astronomy.

+ +

Wilcots points out that the black hole image (shown above) is a remarkable feat in the world of astronomy. “Only the third such black hole imaged in this way by the Event Horizon Telescope,” and it was made possible with the help of the HTCondor Software Suite (HTCSS).

+ +

Beginning to build the future

+ +

Wilcots described how in the 1940s, a group of universities recognized that no single university could build a radio telescope necessary to advance science. To access these kinds of telescopes, the universities would need to have the national government involved, as it was the only one with this capability at that time. In 1946, these universities created Associated Universities Incorporated (AUI), which eventually became the management agency for the National Radio Astronomy Observatory (NRAO).

+ +

Advances in radio astronomy rely on current technology available to experts in this field. Wilcots explained that “the science demands more sensitivity, more resolution, and the ability to map large chunks of the sky simultaneously.” New and emerging technologies must continue pushing forward to discover the next big thing in radio astronomy.

+ +

This next generation of science requires more sensitive technology with higher spectra resolution than the Karl G. Jansky Very Large Array (JVLA) can provide. It also requires sensitivity in a particular chunk of the spectrum that neither the JVLA nor Atacama Large Millimeter/submillimeter Array (ALMA) can achieve. Wilcots described just what piece of technology astronomers and engineers need to create to reach this level of sensitivity. “We’re looking to build the Next Generation Very Large Array (ngVLA)…an instrument that will cover a huge chunk of spectrum from 1 GHz to 116 GHz.”

+ +

The fundamentals of the ngVLA

+ +

“The unique and wonderful thing about interferometry, or the basis of radio astronomy,” Wilcots discussed, “is the ability to have many individual detectors or dishes to form a telescope.” Each dish collects signals, creating an image or spectrum of the sky when combined. Because of this capability, engineers working on these detectors can begin to collect signals right away, and as more dishes get added, the telescope grows larger and larger.

+ +

Many individual detectors also mean lots of flexibility in the telescope arrays built, Wilcots explained. Here, the idea is to do several different arrays to make up one telescope. A particular scientific case drives each of these arrays:

+
    +
  • Main Array: a dish that you can control and point accurately but is also robust; it’ll be the workhorse of the ngVLA, simultaneously capable of high sensitivity and high-resolution observations.
  • +
  • Short Baseline Array: dishes that are very close together, which allows you to have a large field of view of the sky.
  • +
  • Long Baseline Array: spread out across the continental United States. The idea here is the longer the baseline, the higher the resolution. Dishes that are well separated allow the user to get spectacular spatial resolution of the sky. For example, the Event Horizon Telescope that took the image of the black hole is a telescope that spans the globe, which is the longest baseline we can get without putting it into orbit.
  • +
+ +
+ The ngVLA will be spread out over the southwest United States and Mexico. +
The ngVLA will be spread out over the southwest United States and Mexico.
+
+ +

A consensus study report called Pathways to Discovery in Astronomy and Astrophysics for the 2020s (Astro2020) identified the ngVLA as a high priority. The construction of this telescope should begin this decade and be completed by the middle of the 2020s.

+ +

Future of radio astronomy: planet formation

+ +

An area of research that radio astronomers are interested in examining in the future is imaging the formation of planets, Wilcot notes. Right now, astronomers can detect a planet’s presence and deduce specific characteristics, but being able to detect a planet directly is the next huge priority.

+ +
+ A planetary system forming +
A planetary system forming
+
+ +

One place astronomers might be able to do this with something like the ngVLA is in the early phases of planet formation within a planetary system. The thermal emissions from this process are bright enough to be detected by a telescope like the ngVLA. So the idea is to use this telescope to map an image of nearby planetary systems and begin to image the early stages of planet formation directly. A catalog of these planets forming will allow astronomers to understand what happens when planetary systems, like our own, form.

+ +

Future of radio astronomy: molecular systems

+ +

Wilcots explains that radio astronomers have discovered the spectral signature of innumerable molecules within the past fifty years. The ngVLA is being designed to probe, detect, catalog, and understand the origin of complex molecules and what they might tell us about star and planet formation. Wilcots comments in his talk that “this type of work is spawning a new type of science…a remarkable new discipline of astrobiology is emerging from our ability to identify and trace complex organic molecules.”

+ +

Future of radio astronomy: galaxy completion

+ +

Next, Wilcots discusses that radio astronomers want to understand how stars form in the first place and the processes that drive the collapse of clouds of gas into regions of star formations.

+ +
+ An image of a blue spiral from the VLA of a nearby spiral galaxy is on the left. On the right an optical extent of the galaxy. +
An image of a blue spiral from the VLA of a nearby spiral galaxy is on the left. On the right an optical extent of the galaxy.
+
+ +

The gas in a galaxy tends to extend well beyond the visible part of the galaxy, and this enormous gas reservoir is how the galaxy can make stars.

+ +

Astronomers like Wilcots want to know where the gas is, what drives that process of converting the gas into stars, what role the environment might play, and finally, what makes a galaxy stop creating stars.

+ +

ngVLA will be able to answer these questions as it combines the sensitivity and spatial resolution needed to take images of gas clouds in nearby galaxies while also capturing the full extent of that gas.

+ +

Future of radio astronomy: black holes

+ +

Wilcots’ look into the future of radio astronomy finishes with the idea and understanding of black holes.

+ +

Multi-messenger astrophysics helps experts recognize that information about the universe is not simply electromagnetic, as it is known best; there is more than one way astronomers can look at the universe.

+ +

More recently, astronomers have been looking at gravitational waves. In particular, they’ve been looking at how they can find a way to detect the gravitational waves produced by two black holes orbiting around one another to determine each black hole’s mass and learn something about them. As the recent EHT images show, we need radio telescopes’ high resolution and sensitivity to understand the nature of black holes fully.

+ +

A look toward the future

+ +

The next step is for the NRAO to create a prototype of the dishes they want to install for the telescope. Then, it’s just a question of whether or not they can build and install enough dishes to deliver this instrument to its full capacity. Wilcots elaborates, “we hope to transition to full scientific operations by the middle of next decade (the 2030s).”

+ +

The distinguished administrator expressed that “something that’s haunted radio astronomy for a while is that to do the imaging, you have to ‘be in the club,’ ” meaning that not just anyone can access the science coming out of these telescopes. The goal of the NRAO moving forward is to create science-ready data products so that this information can be more widely available to anyone, not just those with intimate knowledge of the subject.

+ +

This effort to make this science more accessible has been part of a budding collaboration between UW-Madison, the NRAO, and a consortium of Historically Black Colleges and Universities and other Minority Serving Institutions in what is called Project RADIAL.

+ +

“The idea behind RADIAL is to broaden the community; not just of individuals engaged in radio astronomy, but also of individuals engaged in the computing that goes into doing the great kind of science we have,” Wilcots explains.

+ +

On the UW-Madison campus in the Summer of 2022, half a dozen undergraduate students from the RADIAL consortium will be on campus doing summer research. The goal is to broaden awareness and increase the participation of communities not typically involved in these discussions in the kind of research in the radial astronomy field.

+ +

“We laid the groundwork for a partnership with a number of these institutions, and that partnership is alive and well,” Wilcots remarks, “so stay tuned for more of that, and we will be advancing that in the upcoming years.”

+ +

+ +

Watch a video recording of Eric Wilcots’ talk at HTCondor Week 2022.

+ + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/about.html b/preview-fall2024-info/about.html new file mode 100644 index 000000000..f8082a7ab --- /dev/null +++ b/preview-fall2024-info/about.html @@ -0,0 +1,359 @@ + + + + + + +About + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ About +

+ +
+
+
+ +
+
+
+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/abstracts-open-european-htcondor-workshop.html b/preview-fall2024-info/abstracts-open-european-htcondor-workshop.html new file mode 100644 index 000000000..88a7ff417 --- /dev/null +++ b/preview-fall2024-info/abstracts-open-european-htcondor-workshop.html @@ -0,0 +1,343 @@ + + + + + + +European HTCondor Workshop: Abstract Submission Open + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ European HTCondor Workshop: Abstract Submission Open +

+

Share your experiences with HTCSS at the European HTCondor Workshop in Amsterdam!

+ +

See this recent post in htcondor-users for details.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/adjacent-tissues-paper.html b/preview-fall2024-info/adjacent-tissues-paper.html new file mode 100644 index 000000000..e022b2bd0 --- /dev/null +++ b/preview-fall2024-info/adjacent-tissues-paper.html @@ -0,0 +1,393 @@ + + + + + + +Using HTC expanded scale of research using noninvasive measurements of tendons and ligaments + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Using HTC expanded scale of research using noninvasive measurements of tendons and ligaments +

+

With this technique and the computing power of high throughput computing (HTC) combined, researchers can obtain thousands of simulations to study the pathology of tendons +and ligaments.

+ +

A recent paper published in the Journal of the Mechanical Behavior of Biomedical Materials by former Ph.D. +student in the Department of Mechanical Engineering (and current post-doctoral researcher at the University of Pennsylvania) +Jonathon Blank and John Bollinger Chair of Mechanical Engineering +Darryl Thelen used the Center for High Throughput Computing (CHTC) to obtain their results. +Results that, Blank says, would not have been obtained at the same scale without HTC. “[This project], and a number of other projects, would have had a very small snapshot of the +problem at hand, which would not have allowed me to obtain the understanding of shear waves that I did. Throughout my time at UW, I ran tens of thousands of simulations — probably +even hundreds of thousands.”

+ +
+ Post-doctoral researcher at the University of Pennsylvania Jonathon Blank. +
Post-doctoral researcher at the University of Pennsylvania Jonathon Blank.
+
+ +

Using noninvasive sensors called shear wave tensiometers, researchers on this project applied HTC to study tendon structure and function. Currently, research in this field is hard +to translate because most assessments of tendon and ligament structure-function relationships are performed on the benchtop in a lab, Blank explains. To translate the benchtop +experiments into studying tendons in humans, the researchers use tensiometers as a measurement tool, and this study developed from trying to better understand these measurements +and how they can be applied to humans. “Tendons are very complex materials from an engineering perspective. When stretched, they can bear loads far exceeding your body weight, and +interestingly, even though they serve their roles in transmitting force from muscle to bone really well, the mechanisms that give rise to injury and pathology in these tissues aren’t +well understood.”

+ +
+ John Bollinger Chair of Mechanical Engineering Darryl Thelen. +
John Bollinger Chair of Mechanical Engineering Darryl Thelen.
+
+ +

In living organisms, researchers have used tensiometers to study the loading of muscles and tendons, including the triceps surae, which connects to the Achilles tendon, Blank notes. +Since humans are variable regarding the size, stiffness, composition, and length of their tendons or ligaments, it’s “challenging to use a model to accurately represent a parameter +space of human biomechanics in the real world. High throughput computing is particularly useful for our field just because we can readily express that variability at a large scale” +through HTC. With Thelen and Orthopedics and Rehabilitation assistant professor Josh Roth, Blank developed a pipeline for +simulating shear wave propagation in tendons and ligaments with HTC, which Blank and Thelen used in the paper.

+ +

With HTC, the researchers of this paper were able to further explore the mechanistic causes of changes in wave speed. “The advantage of this technique is being able to fully explore +an input space of different stiffnesses, geometries, microstructures, and applied forces. The advantage of the capabilities offered by the CHTC is that we can fill the entire input +space, not just between two data points, and thereby study changes in shear wave speed due to physiological factors and the mechanical underpinning driving those changes,” Blank +elaborates.

+ +

It wasn’t challenging to implement, Blank states, since facilitators were readily available to help and meet with him. When he first started using HTC, Blank attended the CHTC +office hours to get answers to his questions, even during COVID-19; during this time, there were also numerous one-on-one meetings. Having this backbone of support from the CHTC +research facilitators propelled Blank’s research and made it much easier. “For a lot of modeling studies, you’ll have this sparse input space where you change a couple of parameters +and investigate the sensitivity of your model that way. But it’s hard to interpret what goes on in between, so the CHTC quite literally saved me a lot of time. There were some +1,000 simulations in the paper, and HTC by scaling out the workload turned a couple thousand hours of simulation time into two or three hours of wall clock time. It’s a unique tool +for this kind of research.”

+ +

The next step from this paper’s findings, Blank describes, is providing subject-specific measurements of wave speeds. This involves “understanding if when we use a tensiometer on +someone’s Achilles tendon, for example, can we account for the tendon’s shape, size, injury status, etcetera — all of these variables matter when measuring shear wave speeds.” +Researchers from the lab can then use wearable tensiometers to measure tension in the Achilles and other tendons to study human movement in the real world.

+ +

From his CHTC-supported studies, Blank learned how to design computational research, diagnose different parameter spaces, and manage data. “For my field, it [HTC] is very important +because people are extremely variable — so our models should be too. The automation and capacity enabled by HTC makes it easy to understand whether our models are useful, and if +they are, how best to tune them to inform human biomechanics,” Blank says.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/approach.html b/preview-fall2024-info/approach.html new file mode 100644 index 000000000..651e20e68 --- /dev/null +++ b/preview-fall2024-info/approach.html @@ -0,0 +1,401 @@ + + + + + + +About Our Approach + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ About Our Approach +

+

CHTC’s specialty is High +Throughput Computing (HTC), which involves breaking up a single large +computational task into many smaller tasks for the fastest overall +turnaround. Most of our users find HTC to be invaluable +in accelerating their computational work and thus their research. +We support thousands of multi-core computers and use the task +scheduling software called HTCondor, developed right here in Madison, to +run thousands of independent jobs on as many total processors as +possible. These computers, or “machines”, are distributed across several +collections that we call pools (similar to “clusters”). Because machines are +assigned to individual jobs, many users can be running jobs on a pool at any +given time, all managed by HTCondor.

+ +

The diagram below shows some of the largest pools on campus and also +shows our connection to the US-wide OS Pool where UW computing +work can “backfill” available computers all over the country. The number +under each resource name shows an approximate number of computing hours +available to campus researchers for a typical week in Fall 2013. As +demonstrated in the diagram, we help users to submit their work not only +to our CHTC-owned machines, but to improve their throughput even further +by seamlessly accessing as many available computers as possible, all +over campus AND all over the country.

+ +

The vast majority of the computational work that campus researcher have +is HTC, though we are happy to support researchers with a variety of +beyond-the-desktop needs, including tightly-coupled computations (e.g. +MPI), high-memory work (e.g. metagenomics), and specialized +hardware like GPUs.

+ +

chtc-pools

+ +

What kinds of applications run best in the CHTC?

+ +

“Pleasantly parallel” tasks, where many jobs can run independently, +is what works best in the CHTC, and is what we can offer the greatest +computational capacity for. +Analyzing thousands of images, inferring statistical significance of hundreds of +thousands of samples, optimizing an electric motor design with millions +of constraints, aligning genomes, and performing deep linguistic search +on a 30 TB sample of the internet are a few of the applications that +campus researchers run every day in the CHTC. If you are not sure if +your application is a good fit for CHTC resources, get in +touch and we will be happy to help you figure it out.

+ +

Within a single compute system, we also support GPUs, high-memory +servers, and specialized hardware owned by individual research groups. +For tightly-coupled computations (e.g. MPI and similar programmed +parallelization), our resources include an HPC Cluster, with faster +inter-node networking.

+ +

How to Get Access

+ +

While you may be excited at the prospect of harnessing 100,000 compute +hours a day for your research, the most valuable thing we offer is, +well, us. We have a small, yet dedicated team of professionals who eat, +breathe and sleep distributed computing. If you are a UW-Madison Researcher, you can request an +account, and one of our dedicated Research Computing +Facilitators will follow up to provide specific recommendations to +accelerate YOUR science.

+ +

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/assets/bibtex/chtc.bib b/preview-fall2024-info/assets/bibtex/chtc.bib new file mode 100644 index 000000000..e8242154e --- /dev/null +++ b/preview-fall2024-info/assets/bibtex/chtc.bib @@ -0,0 +1,8 @@ +@misc{https://doi.org/10.21231/gnt1-hw21, + doi = {10.21231/GNT1-HW21}, + url = {https://chtc.cs.wisc.edu/}, + author = {{Center for High Throughput Computing}}, + title = {Center for High Throughput Computing}, + publisher = {Center for High Throughput Computing}, + year = {2006} +} \ No newline at end of file diff --git a/preview-fall2024-info/assets/css/bootstrap-v2.css b/preview-fall2024-info/assets/css/bootstrap-v2.css new file mode 100644 index 000000000..e79b15c1d --- /dev/null +++ b/preview-fall2024-info/assets/css/bootstrap-v2.css @@ -0,0 +1,9819 @@ +/*! + * Bootstrap v4.3.1 (https://getbootstrap.com/) + * Copyright 2011-2019 The Bootstrap Authors + * Copyright 2011-2019 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */ + + +a { + color: #007bff; + text-decoration: none; + background-color: transparent; +} + +a:hover { + color: #0056b3; + text-decoration: underline; +} + +a:not([href]):not([tabindex]) { + color: inherit; + text-decoration: none; +} + +a:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus { + color: inherit; + text-decoration: none; +} + +a:not([href]):not([tabindex]):focus { + outline: 0; +} + +pre, +code, +kbd, +samp { + font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; + font-size: 1em; +} + +pre { + margin-top: 0; + margin-bottom: 1rem; + overflow: auto; +} + +figure { + margin: 0 0 1rem; +} + +img { + vertical-align: middle; + border-style: none; +} + +svg { + overflow: hidden; + vertical-align: middle; +} + +table { + border-collapse: collapse; +} + +caption { + padding-top: 0.75rem; + padding-bottom: 0.75rem; + color: #6c757d; + text-align: left; + caption-side: bottom; +} + +th { + text-align: inherit; +} + +label { + display: inline-block; + margin-bottom: 0.5rem; +} + +button { + border-radius: 0; +} + +button:focus { + outline: 1px dotted; + outline: 5px auto -webkit-focus-ring-color; +} + +input, +button, +select, +optgroup, +textarea { + margin: 0; + font-family: inherit; + font-size: inherit; + line-height: inherit; +} + +button, +input { + overflow: visible; +} + +button, +select { + text-transform: none; +} + +select { + word-wrap: normal; +} + +button, +[type="button"], +[type="reset"], +[type="submit"] { + -webkit-appearance: button; +} + +button:not(:disabled), +[type="button"]:not(:disabled), +[type="reset"]:not(:disabled), +[type="submit"]:not(:disabled) { + cursor: pointer; +} + +button::-moz-focus-inner, +[type="button"]::-moz-focus-inner, +[type="reset"]::-moz-focus-inner, +[type="submit"]::-moz-focus-inner { + padding: 0; + border-style: none; +} + +input[type="radio"], +input[type="checkbox"] { + box-sizing: border-box; + padding: 0; +} + +input[type="date"], +input[type="time"], +input[type="datetime-local"], +input[type="month"] { + -webkit-appearance: listbox; +} + +textarea { + overflow: auto; + resize: vertical; +} + +fieldset { + min-width: 0; + padding: 0; + margin: 0; + border: 0; +} + +legend { + display: block; + width: 100%; + max-width: 100%; + padding: 0; + margin-bottom: .5rem; + font-size: 1.5rem; + line-height: inherit; + color: inherit; + white-space: normal; +} + +progress { + vertical-align: baseline; +} + +[type="number"]::-webkit-inner-spin-button, +[type="number"]::-webkit-outer-spin-button { + height: auto; +} + +[type="search"] { + outline-offset: -2px; + -webkit-appearance: none; +} + +[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} + +::-webkit-file-upload-button { + font: inherit; + -webkit-appearance: button; +} + +output { + display: inline-block; +} + +summary { + display: list-item; + cursor: pointer; +} + +template { + display: none; +} + +[hidden] { + display: none !important; +} + + +hr { + margin-top: 1rem; + margin-bottom: 1rem; + border: 0; + border-top: 1px solid rgba(0, 0, 0, 0.1); +} + +small, +.small { + font-size: 80%; + font-weight: 400; +} + +mark, +.mark { + padding: 0.2em; + background-color: #fcf8e3; +} + +.list-unstyled { + padding-left: 0; + list-style: none; +} + +.list-inline { + padding-left: 0; + list-style: none; +} + +.list-inline-item { + display: inline-block; +} + +.list-inline-item:not(:last-child) { + margin-right: 0.5rem; +} + +.initialism { + font-size: 90%; + text-transform: uppercase; +} + +.blockquote { + margin-bottom: 1rem; + font-size: 1.25rem; +} + +.blockquote-footer { + display: block; + font-size: 80%; + color: #6c757d; +} + +.blockquote-footer::before { + content: "\2014\00A0"; +} + +.img-fluid { + max-width: 100%; + height: auto; +} + +.img-thumbnail { + padding: 0.25rem; + background-color: #fff; + border: 1px solid #dee2e6; + border-radius: 0.25rem; + max-width: 100%; + height: auto; +} + +.figure { + display: inline-block; +} + +.figure-img { + margin-bottom: 0.5rem; + line-height: 1; +} + +.figure-caption { + font-size: 90%; + color: #6c757d; +} + +code { + font-size: 1rem; + color: #e83e8c; + word-break: break-word; +} + +a > code { + color: inherit; +} + +kbd { + padding: 0.2rem 0.4rem; + font-size: 87.5%; + color: #fff; + background-color: #212529; + border-radius: 0.2rem; +} + +kbd kbd { + padding: 0; + font-size: 100%; + font-weight: 700; +} + +pre { + display: block; + font-size: 87.5%; + color: #212529; +} + +pre code { + font-size: inherit; + color: inherit; + word-break: normal; +} + +.pre-scrollable { + max-height: 340px; + overflow-y: scroll; +} + +.container { + width: 100%; + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} + +@media (min-width: 576px) { + .container { + max-width: 540px; + } +} + +@media (min-width: 768px) { + .container { + max-width: 720px; + } +} + +@media (min-width: 992px) { + .container { + max-width: 960px; + } +} + +@media (min-width: 1200px) { + .container { + max-width: 1140px; + } +} + +.container-fluid { + width: 100%; + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} + +.row { + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + margin-right: -15px; + margin-left: -15px; +} + +.no-gutters { + margin-right: 0; + margin-left: 0; +} + +.no-gutters > .col, +.no-gutters > [class*="col-"] { + padding-right: 0; + padding-left: 0; +} + +.col-1, .col-2, .col-3, .col-4, .col-5, .col-6, .col-7, .col-8, .col-9, .col-10, .col-11, .col-12, .col, +.col-auto, .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12, .col-sm, +.col-sm-auto, .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12, .col-md, +.col-md-auto, .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12, .col-lg, +.col-lg-auto, .col-xl-1, .col-xl-2, .col-xl-3, .col-xl-4, .col-xl-5, .col-xl-6, .col-xl-7, .col-xl-8, .col-xl-9, .col-xl-10, .col-xl-11, .col-xl-12, .col-xl, +.col-xl-auto { + position: relative; + width: 100%; + padding-right: 15px; + padding-left: 15px; +} + +.col { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; +} + +.col-auto { + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: 100%; +} + +.col-1 { + -ms-flex: 0 0 8.333333%; + flex: 0 0 8.333333%; + max-width: 8.333333%; +} + +.col-2 { + -ms-flex: 0 0 16.666667%; + flex: 0 0 16.666667%; + max-width: 16.666667%; +} + +.col-3 { + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; +} + +.col-4 { + -ms-flex: 0 0 33.333333%; + flex: 0 0 33.333333%; + max-width: 33.333333%; +} + +.col-5 { + -ms-flex: 0 0 41.666667%; + flex: 0 0 41.666667%; + max-width: 41.666667%; +} + +.col-6 { + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; +} + +.col-7 { + -ms-flex: 0 0 58.333333%; + flex: 0 0 58.333333%; + max-width: 58.333333%; +} + +.col-8 { + -ms-flex: 0 0 66.666667%; + flex: 0 0 66.666667%; + max-width: 66.666667%; +} + +.col-9 { + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; +} + +.col-10 { + -ms-flex: 0 0 83.333333%; + flex: 0 0 83.333333%; + max-width: 83.333333%; +} + +.col-11 { + -ms-flex: 0 0 91.666667%; + flex: 0 0 91.666667%; + max-width: 91.666667%; +} + +.col-12 { + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; +} + +.order-first { + -ms-flex-order: -1; + order: -1; +} + +.order-last { + -ms-flex-order: 13; + order: 13; +} + +.order-0 { + -ms-flex-order: 0; + order: 0; +} + +.order-1 { + -ms-flex-order: 1; + order: 1; +} + +.order-2 { + -ms-flex-order: 2; + order: 2; +} + +.order-3 { + -ms-flex-order: 3; + order: 3; +} + +.order-4 { + -ms-flex-order: 4; + order: 4; +} + +.order-5 { + -ms-flex-order: 5; + order: 5; +} + +.order-6 { + -ms-flex-order: 6; + order: 6; +} + +.order-7 { + -ms-flex-order: 7; + order: 7; +} + +.order-8 { + -ms-flex-order: 8; + order: 8; +} + +.order-9 { + -ms-flex-order: 9; + order: 9; +} + +.order-10 { + -ms-flex-order: 10; + order: 10; +} + +.order-11 { + -ms-flex-order: 11; + order: 11; +} + +.order-12 { + -ms-flex-order: 12; + order: 12; +} + +.offset-1 { + margin-left: 8.333333%; +} + +.offset-2 { + margin-left: 16.666667%; +} + +.offset-3 { + margin-left: 25%; +} + +.offset-4 { + margin-left: 33.333333%; +} + +.offset-5 { + margin-left: 41.666667%; +} + +.offset-6 { + margin-left: 50%; +} + +.offset-7 { + margin-left: 58.333333%; +} + +.offset-8 { + margin-left: 66.666667%; +} + +.offset-9 { + margin-left: 75%; +} + +.offset-10 { + margin-left: 83.333333%; +} + +.offset-11 { + margin-left: 91.666667%; +} + +@media (min-width: 576px) { + .col-sm { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + .col-sm-auto { + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: 100%; + } + .col-sm-1 { + -ms-flex: 0 0 8.333333%; + flex: 0 0 8.333333%; + max-width: 8.333333%; + } + .col-sm-2 { + -ms-flex: 0 0 16.666667%; + flex: 0 0 16.666667%; + max-width: 16.666667%; + } + .col-sm-3 { + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + .col-sm-4 { + -ms-flex: 0 0 33.333333%; + flex: 0 0 33.333333%; + max-width: 33.333333%; + } + .col-sm-5 { + -ms-flex: 0 0 41.666667%; + flex: 0 0 41.666667%; + max-width: 41.666667%; + } + .col-sm-6 { + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + .col-sm-7 { + -ms-flex: 0 0 58.333333%; + flex: 0 0 58.333333%; + max-width: 58.333333%; + } + .col-sm-8 { + -ms-flex: 0 0 66.666667%; + flex: 0 0 66.666667%; + max-width: 66.666667%; + } + .col-sm-9 { + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + .col-sm-10 { + -ms-flex: 0 0 83.333333%; + flex: 0 0 83.333333%; + max-width: 83.333333%; + } + .col-sm-11 { + -ms-flex: 0 0 91.666667%; + flex: 0 0 91.666667%; + max-width: 91.666667%; + } + .col-sm-12 { + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + .order-sm-first { + -ms-flex-order: -1; + order: -1; + } + .order-sm-last { + -ms-flex-order: 13; + order: 13; + } + .order-sm-0 { + -ms-flex-order: 0; + order: 0; + } + .order-sm-1 { + -ms-flex-order: 1; + order: 1; + } + .order-sm-2 { + -ms-flex-order: 2; + order: 2; + } + .order-sm-3 { + -ms-flex-order: 3; + order: 3; + } + .order-sm-4 { + -ms-flex-order: 4; + order: 4; + } + .order-sm-5 { + -ms-flex-order: 5; + order: 5; + } + .order-sm-6 { + -ms-flex-order: 6; + order: 6; + } + .order-sm-7 { + -ms-flex-order: 7; + order: 7; + } + .order-sm-8 { + -ms-flex-order: 8; + order: 8; + } + .order-sm-9 { + -ms-flex-order: 9; + order: 9; + } + .order-sm-10 { + -ms-flex-order: 10; + order: 10; + } + .order-sm-11 { + -ms-flex-order: 11; + order: 11; + } + .order-sm-12 { + -ms-flex-order: 12; + order: 12; + } + .offset-sm-0 { + margin-left: 0; + } + .offset-sm-1 { + margin-left: 8.333333%; + } + .offset-sm-2 { + margin-left: 16.666667%; + } + .offset-sm-3 { + margin-left: 25%; + } + .offset-sm-4 { + margin-left: 33.333333%; + } + .offset-sm-5 { + margin-left: 41.666667%; + } + .offset-sm-6 { + margin-left: 50%; + } + .offset-sm-7 { + margin-left: 58.333333%; + } + .offset-sm-8 { + margin-left: 66.666667%; + } + .offset-sm-9 { + margin-left: 75%; + } + .offset-sm-10 { + margin-left: 83.333333%; + } + .offset-sm-11 { + margin-left: 91.666667%; + } +} + +@media (min-width: 768px) { + .col-md { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + .col-md-auto { + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: 100%; + } + .col-md-1 { + -ms-flex: 0 0 8.333333%; + flex: 0 0 8.333333%; + max-width: 8.333333%; + } + .col-md-2 { + -ms-flex: 0 0 16.666667%; + flex: 0 0 16.666667%; + max-width: 16.666667%; + } + .col-md-3 { + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + .col-md-4 { + -ms-flex: 0 0 33.333333%; + flex: 0 0 33.333333%; + max-width: 33.333333%; + } + .col-md-5 { + -ms-flex: 0 0 41.666667%; + flex: 0 0 41.666667%; + max-width: 41.666667%; + } + .col-md-6 { + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + .col-md-7 { + -ms-flex: 0 0 58.333333%; + flex: 0 0 58.333333%; + max-width: 58.333333%; + } + .col-md-8 { + -ms-flex: 0 0 66.666667%; + flex: 0 0 66.666667%; + max-width: 66.666667%; + } + .col-md-9 { + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + .col-md-10 { + -ms-flex: 0 0 83.333333%; + flex: 0 0 83.333333%; + max-width: 83.333333%; + } + .col-md-11 { + -ms-flex: 0 0 91.666667%; + flex: 0 0 91.666667%; + max-width: 91.666667%; + } + .col-md-12 { + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + .order-md-first { + -ms-flex-order: -1; + order: -1; + } + .order-md-last { + -ms-flex-order: 13; + order: 13; + } + .order-md-0 { + -ms-flex-order: 0; + order: 0; + } + .order-md-1 { + -ms-flex-order: 1; + order: 1; + } + .order-md-2 { + -ms-flex-order: 2; + order: 2; + } + .order-md-3 { + -ms-flex-order: 3; + order: 3; + } + .order-md-4 { + -ms-flex-order: 4; + order: 4; + } + .order-md-5 { + -ms-flex-order: 5; + order: 5; + } + .order-md-6 { + -ms-flex-order: 6; + order: 6; + } + .order-md-7 { + -ms-flex-order: 7; + order: 7; + } + .order-md-8 { + -ms-flex-order: 8; + order: 8; + } + .order-md-9 { + -ms-flex-order: 9; + order: 9; + } + .order-md-10 { + -ms-flex-order: 10; + order: 10; + } + .order-md-11 { + -ms-flex-order: 11; + order: 11; + } + .order-md-12 { + -ms-flex-order: 12; + order: 12; + } + .offset-md-0 { + margin-left: 0; + } + .offset-md-1 { + margin-left: 8.333333%; + } + .offset-md-2 { + margin-left: 16.666667%; + } + .offset-md-3 { + margin-left: 25%; + } + .offset-md-4 { + margin-left: 33.333333%; + } + .offset-md-5 { + margin-left: 41.666667%; + } + .offset-md-6 { + margin-left: 50%; + } + .offset-md-7 { + margin-left: 58.333333%; + } + .offset-md-8 { + margin-left: 66.666667%; + } + .offset-md-9 { + margin-left: 75%; + } + .offset-md-10 { + margin-left: 83.333333%; + } + .offset-md-11 { + margin-left: 91.666667%; + } +} + +@media (min-width: 992px) { + .col-lg { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + .col-lg-auto { + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: 100%; + } + .col-lg-1 { + -ms-flex: 0 0 8.333333%; + flex: 0 0 8.333333%; + max-width: 8.333333%; + } + .col-lg-2 { + -ms-flex: 0 0 16.666667%; + flex: 0 0 16.666667%; + max-width: 16.666667%; + } + .col-lg-3 { + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + .col-lg-4 { + -ms-flex: 0 0 33.333333%; + flex: 0 0 33.333333%; + max-width: 33.333333%; + } + .col-lg-5 { + -ms-flex: 0 0 41.666667%; + flex: 0 0 41.666667%; + max-width: 41.666667%; + } + .col-lg-6 { + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + .col-lg-7 { + -ms-flex: 0 0 58.333333%; + flex: 0 0 58.333333%; + max-width: 58.333333%; + } + .col-lg-8 { + -ms-flex: 0 0 66.666667%; + flex: 0 0 66.666667%; + max-width: 66.666667%; + } + .col-lg-9 { + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + .col-lg-10 { + -ms-flex: 0 0 83.333333%; + flex: 0 0 83.333333%; + max-width: 83.333333%; + } + .col-lg-11 { + -ms-flex: 0 0 91.666667%; + flex: 0 0 91.666667%; + max-width: 91.666667%; + } + .col-lg-12 { + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + .order-lg-first { + -ms-flex-order: -1; + order: -1; + } + .order-lg-last { + -ms-flex-order: 13; + order: 13; + } + .order-lg-0 { + -ms-flex-order: 0; + order: 0; + } + .order-lg-1 { + -ms-flex-order: 1; + order: 1; + } + .order-lg-2 { + -ms-flex-order: 2; + order: 2; + } + .order-lg-3 { + -ms-flex-order: 3; + order: 3; + } + .order-lg-4 { + -ms-flex-order: 4; + order: 4; + } + .order-lg-5 { + -ms-flex-order: 5; + order: 5; + } + .order-lg-6 { + -ms-flex-order: 6; + order: 6; + } + .order-lg-7 { + -ms-flex-order: 7; + order: 7; + } + .order-lg-8 { + -ms-flex-order: 8; + order: 8; + } + .order-lg-9 { + -ms-flex-order: 9; + order: 9; + } + .order-lg-10 { + -ms-flex-order: 10; + order: 10; + } + .order-lg-11 { + -ms-flex-order: 11; + order: 11; + } + .order-lg-12 { + -ms-flex-order: 12; + order: 12; + } + .offset-lg-0 { + margin-left: 0; + } + .offset-lg-1 { + margin-left: 8.333333%; + } + .offset-lg-2 { + margin-left: 16.666667%; + } + .offset-lg-3 { + margin-left: 25%; + } + .offset-lg-4 { + margin-left: 33.333333%; + } + .offset-lg-5 { + margin-left: 41.666667%; + } + .offset-lg-6 { + margin-left: 50%; + } + .offset-lg-7 { + margin-left: 58.333333%; + } + .offset-lg-8 { + margin-left: 66.666667%; + } + .offset-lg-9 { + margin-left: 75%; + } + .offset-lg-10 { + margin-left: 83.333333%; + } + .offset-lg-11 { + margin-left: 91.666667%; + } +} + +@media (min-width: 1200px) { + .col-xl { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + .col-xl-auto { + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: 100%; + } + .col-xl-1 { + -ms-flex: 0 0 8.333333%; + flex: 0 0 8.333333%; + max-width: 8.333333%; + } + .col-xl-2 { + -ms-flex: 0 0 16.666667%; + flex: 0 0 16.666667%; + max-width: 16.666667%; + } + .col-xl-3 { + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + .col-xl-4 { + -ms-flex: 0 0 33.333333%; + flex: 0 0 33.333333%; + max-width: 33.333333%; + } + .col-xl-5 { + -ms-flex: 0 0 41.666667%; + flex: 0 0 41.666667%; + max-width: 41.666667%; + } + .col-xl-6 { + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + .col-xl-7 { + -ms-flex: 0 0 58.333333%; + flex: 0 0 58.333333%; + max-width: 58.333333%; + } + .col-xl-8 { + -ms-flex: 0 0 66.666667%; + flex: 0 0 66.666667%; + max-width: 66.666667%; + } + .col-xl-9 { + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + .col-xl-10 { + -ms-flex: 0 0 83.333333%; + flex: 0 0 83.333333%; + max-width: 83.333333%; + } + .col-xl-11 { + -ms-flex: 0 0 91.666667%; + flex: 0 0 91.666667%; + max-width: 91.666667%; + } + .col-xl-12 { + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + .order-xl-first { + -ms-flex-order: -1; + order: -1; + } + .order-xl-last { + -ms-flex-order: 13; + order: 13; + } + .order-xl-0 { + -ms-flex-order: 0; + order: 0; + } + .order-xl-1 { + -ms-flex-order: 1; + order: 1; + } + .order-xl-2 { + -ms-flex-order: 2; + order: 2; + } + .order-xl-3 { + -ms-flex-order: 3; + order: 3; + } + .order-xl-4 { + -ms-flex-order: 4; + order: 4; + } + .order-xl-5 { + -ms-flex-order: 5; + order: 5; + } + .order-xl-6 { + -ms-flex-order: 6; + order: 6; + } + .order-xl-7 { + -ms-flex-order: 7; + order: 7; + } + .order-xl-8 { + -ms-flex-order: 8; + order: 8; + } + .order-xl-9 { + -ms-flex-order: 9; + order: 9; + } + .order-xl-10 { + -ms-flex-order: 10; + order: 10; + } + .order-xl-11 { + -ms-flex-order: 11; + order: 11; + } + .order-xl-12 { + -ms-flex-order: 12; + order: 12; + } + .offset-xl-0 { + margin-left: 0; + } + .offset-xl-1 { + margin-left: 8.333333%; + } + .offset-xl-2 { + margin-left: 16.666667%; + } + .offset-xl-3 { + margin-left: 25%; + } + .offset-xl-4 { + margin-left: 33.333333%; + } + .offset-xl-5 { + margin-left: 41.666667%; + } + .offset-xl-6 { + margin-left: 50%; + } + .offset-xl-7 { + margin-left: 58.333333%; + } + .offset-xl-8 { + margin-left: 66.666667%; + } + .offset-xl-9 { + margin-left: 75%; + } + .offset-xl-10 { + margin-left: 83.333333%; + } + .offset-xl-11 { + margin-left: 91.666667%; + } +} + +.table { + width: 100%; + margin-bottom: 1rem; + color: #212529; +} + +.table th, +.table td { + padding: 0.75rem; + vertical-align: top; + border-top: 1px solid #dee2e6; +} + +.table thead th { + vertical-align: bottom; + border-bottom: 2px solid #dee2e6; +} + +.table tbody + tbody { + border-top: 2px solid #dee2e6; +} + +.table-sm th, +.table-sm td { + padding: 0.3rem; +} + +.table-bordered { + border: 1px solid #dee2e6; +} + +.table-bordered th, +.table-bordered td { + border: 1px solid #dee2e6; +} + +.table-bordered thead th, +.table-bordered thead td { + border-bottom-width: 2px; +} + +.table-borderless th, +.table-borderless td, +.table-borderless thead th, +.table-borderless tbody + tbody { + border: 0; +} + +.table-striped tbody tr:nth-of-type(odd) { + background-color: rgba(0, 0, 0, 0.05); +} + +.table-hover tbody tr:hover { + color: #212529; + background-color: rgba(0, 0, 0, 0.075); +} + +.table-primary, +.table-primary > th, +.table-primary > td { + background-color: #b8daff; +} + +.table-primary th, +.table-primary td, +.table-primary thead th, +.table-primary tbody + tbody { + border-color: #7abaff; +} + +.table-hover .table-primary:hover { + background-color: #9fcdff; +} + +.table-hover .table-primary:hover > td, +.table-hover .table-primary:hover > th { + background-color: #9fcdff; +} + +.table-secondary, +.table-secondary > th, +.table-secondary > td { + background-color: #d6d8db; +} + +.table-secondary th, +.table-secondary td, +.table-secondary thead th, +.table-secondary tbody + tbody { + border-color: #b3b7bb; +} + +.table-hover .table-secondary:hover { + background-color: #c8cbcf; +} + +.table-hover .table-secondary:hover > td, +.table-hover .table-secondary:hover > th { + background-color: #c8cbcf; +} + +.table-success, +.table-success > th, +.table-success > td { + background-color: #c3e6cb; +} + +.table-success th, +.table-success td, +.table-success thead th, +.table-success tbody + tbody { + border-color: #8fd19e; +} + +.table-hover .table-success:hover { + background-color: #b1dfbb; +} + +.table-hover .table-success:hover > td, +.table-hover .table-success:hover > th { + background-color: #b1dfbb; +} + +.table-info, +.table-info > th, +.table-info > td { + background-color: #bee5eb; +} + +.table-info th, +.table-info td, +.table-info thead th, +.table-info tbody + tbody { + border-color: #86cfda; +} + +.table-hover .table-info:hover { + background-color: #abdde5; +} + +.table-hover .table-info:hover > td, +.table-hover .table-info:hover > th { + background-color: #abdde5; +} + +.table-warning, +.table-warning > th, +.table-warning > td { + background-color: #ffeeba; +} + +.table-warning th, +.table-warning td, +.table-warning thead th, +.table-warning tbody + tbody { + border-color: #ffdf7e; +} + +.table-hover .table-warning:hover { + background-color: #ffe8a1; +} + +.table-hover .table-warning:hover > td, +.table-hover .table-warning:hover > th { + background-color: #ffe8a1; +} + +.table-danger, +.table-danger > th, +.table-danger > td { + background-color: #f5c6cb; +} + +.table-danger th, +.table-danger td, +.table-danger thead th, +.table-danger tbody + tbody { + border-color: #ed969e; +} + +.table-hover .table-danger:hover { + background-color: #f1b0b7; +} + +.table-hover .table-danger:hover > td, +.table-hover .table-danger:hover > th { + background-color: #f1b0b7; +} + +.table-light, +.table-light > th, +.table-light > td { + background-color: #fdfdfe; +} + +.table-light th, +.table-light td, +.table-light thead th, +.table-light tbody + tbody { + border-color: #fbfcfc; +} + +.table-hover .table-light:hover { + background-color: #ececf6; +} + +.table-hover .table-light:hover > td, +.table-hover .table-light:hover > th { + background-color: #ececf6; +} + +.table-dark, +.table-dark > th, +.table-dark > td { + background-color: #c6c8ca; +} + +.table-dark th, +.table-dark td, +.table-dark thead th, +.table-dark tbody + tbody { + border-color: #95999c; +} + +.table-hover .table-dark:hover { + background-color: #b9bbbe; +} + +.table-hover .table-dark:hover > td, +.table-hover .table-dark:hover > th { + background-color: #b9bbbe; +} + +.table-active, +.table-active > th, +.table-active > td { + background-color: rgba(0, 0, 0, 0.075); +} + +.table-hover .table-active:hover { + background-color: rgba(0, 0, 0, 0.075); +} + +.table-hover .table-active:hover > td, +.table-hover .table-active:hover > th { + background-color: rgba(0, 0, 0, 0.075); +} + +.table .thead-dark th { + color: #fff; + background-color: #343a40; + border-color: #454d55; +} + +.table .thead-light th { + color: #495057; + background-color: #e9ecef; + border-color: #dee2e6; +} + +.table-dark { + color: #fff; + background-color: #343a40; +} + +.table-dark th, +.table-dark td, +.table-dark thead th { + border-color: #454d55; +} + +.table-dark.table-bordered { + border: 0; +} + +.table-dark.table-striped tbody tr:nth-of-type(odd) { + background-color: rgba(255, 255, 255, 0.05); +} + +.table-dark.table-hover tbody tr:hover { + color: #fff; + background-color: rgba(255, 255, 255, 0.075); +} + +@media (max-width: 575.98px) { + .table-responsive-sm { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + } + .table-responsive-sm > .table-bordered { + border: 0; + } +} + +@media (max-width: 767.98px) { + .table-responsive-md { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + } + .table-responsive-md > .table-bordered { + border: 0; + } +} + +@media (max-width: 991.98px) { + .table-responsive-lg { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + } + .table-responsive-lg > .table-bordered { + border: 0; + } +} + +@media (max-width: 1199.98px) { + .table-responsive-xl { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + } + .table-responsive-xl > .table-bordered { + border: 0; + } +} + +.table-responsive { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; +} + +.table-responsive > .table-bordered { + border: 0; +} + +.form-control { + display: block; + width: 100%; + height: calc(1.5em + 0.75rem + 2px); + padding: 0.375rem 0.75rem; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: #495057; + background-color: #fff; + background-clip: padding-box; + border: 1px solid #ced4da; + border-radius: 0.25rem; + transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; +} + +@media (prefers-reduced-motion: reduce) { + .form-control { + transition: none; + } +} + +.form-control::-ms-expand { + background-color: transparent; + border: 0; +} + +.form-control:focus { + color: #495057; + background-color: #fff; + border-color: #80bdff; + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} + +.form-control::-webkit-input-placeholder { + color: #6c757d; + opacity: 1; +} + +.form-control::-moz-placeholder { + color: #6c757d; + opacity: 1; +} + +.form-control:-ms-input-placeholder { + color: #6c757d; + opacity: 1; +} + +.form-control::-ms-input-placeholder { + color: #6c757d; + opacity: 1; +} + +.form-control::placeholder { + color: #6c757d; + opacity: 1; +} + +.form-control:disabled, .form-control[readonly] { + background-color: #e9ecef; + opacity: 1; +} + +select.form-control:focus::-ms-value { + color: #495057; + background-color: #fff; +} + +.form-control-file, +.form-control-range { + display: block; + width: 100%; +} + +.col-form-label { + padding-top: calc(0.375rem + 1px); + padding-bottom: calc(0.375rem + 1px); + margin-bottom: 0; + font-size: inherit; + line-height: 1.5; +} + +.col-form-label-lg { + padding-top: calc(0.5rem + 1px); + padding-bottom: calc(0.5rem + 1px); + font-size: 1.25rem; + line-height: 1.5; +} + +.col-form-label-sm { + padding-top: calc(0.25rem + 1px); + padding-bottom: calc(0.25rem + 1px); + font-size: 0.875rem; + line-height: 1.5; +} + +.form-control-plaintext { + display: block; + width: 100%; + padding-top: 0.375rem; + padding-bottom: 0.375rem; + margin-bottom: 0; + line-height: 1.5; + color: #212529; + background-color: transparent; + border: solid transparent; + border-width: 1px 0; +} + +.form-control-plaintext.form-control-sm, .form-control-plaintext.form-control-lg { + padding-right: 0; + padding-left: 0; +} + +.form-control-sm { + height: calc(1.5em + 0.5rem + 2px); + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; +} + +.form-control-lg { + height: calc(1.5em + 1rem + 2px); + padding: 0.5rem 1rem; + font-size: 1.25rem; + line-height: 1.5; + border-radius: 0.3rem; +} + +select.form-control[size], select.form-control[multiple] { + height: auto; +} + +textarea.form-control { + height: auto; +} + +.form-group { + margin-bottom: 1rem; +} + +.form-text { + display: block; + margin-top: 0.25rem; +} + +.form-row { + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + margin-right: -5px; + margin-left: -5px; +} + +.form-row > .col, +.form-row > [class*="col-"] { + padding-right: 5px; + padding-left: 5px; +} + +.form-check { + position: relative; + display: block; + padding-left: 1.25rem; +} + +.form-check-input { + position: absolute; + margin-top: 0.3rem; + margin-left: -1.25rem; +} + +.form-check-input:disabled ~ .form-check-label { + color: #6c757d; +} + +.form-check-label { + margin-bottom: 0; +} + +.form-check-inline { + display: -ms-inline-flexbox; + display: inline-flex; + -ms-flex-align: center; + align-items: center; + padding-left: 0; + margin-right: 0.75rem; +} + +.form-check-inline .form-check-input { + position: static; + margin-top: 0; + margin-right: 0.3125rem; + margin-left: 0; +} + +.valid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 80%; + color: #28a745; +} + +.valid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: 0.25rem 0.5rem; + margin-top: .1rem; + font-size: 0.875rem; + line-height: 1.5; + color: #fff; + background-color: rgba(40, 167, 69, 0.9); + border-radius: 0.25rem; +} + +.was-validated .form-control:valid, .form-control.is-valid { + border-color: #28a745; + padding-right: calc(1.5em + 0.75rem); + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e"); + background-repeat: no-repeat; + background-position: center right calc(0.375em + 0.1875rem); + background-size: calc(0.75em + 0.375rem) calc(0.75em + 0.375rem); +} + +.was-validated .form-control:valid:focus, .form-control.is-valid:focus { + border-color: #28a745; + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.was-validated .form-control:valid ~ .valid-feedback, +.was-validated .form-control:valid ~ .valid-tooltip, .form-control.is-valid ~ .valid-feedback, +.form-control.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated textarea.form-control:valid, textarea.form-control.is-valid { + padding-right: calc(1.5em + 0.75rem); + background-position: top calc(0.375em + 0.1875rem) right calc(0.375em + 0.1875rem); +} + +.was-validated .custom-select:valid, .custom-select.is-valid { + border-color: #28a745; + padding-right: calc((1em + 0.75rem) * 3 / 4 + 1.75rem); + background: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3e%3cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3e%3c/svg%3e") no-repeat right 0.75rem center/8px 10px, url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e") #fff no-repeat center right 1.75rem/calc(0.75em + 0.375rem) calc(0.75em + 0.375rem); +} + +.was-validated .custom-select:valid:focus, .custom-select.is-valid:focus { + border-color: #28a745; + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.was-validated .custom-select:valid ~ .valid-feedback, +.was-validated .custom-select:valid ~ .valid-tooltip, .custom-select.is-valid ~ .valid-feedback, +.custom-select.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .form-control-file:valid ~ .valid-feedback, +.was-validated .form-control-file:valid ~ .valid-tooltip, .form-control-file.is-valid ~ .valid-feedback, +.form-control-file.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .form-check-input:valid ~ .form-check-label, .form-check-input.is-valid ~ .form-check-label { + color: #28a745; +} + +.was-validated .form-check-input:valid ~ .valid-feedback, +.was-validated .form-check-input:valid ~ .valid-tooltip, .form-check-input.is-valid ~ .valid-feedback, +.form-check-input.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .custom-control-input:valid ~ .custom-control-label, .custom-control-input.is-valid ~ .custom-control-label { + color: #28a745; +} + +.was-validated .custom-control-input:valid ~ .custom-control-label::before, .custom-control-input.is-valid ~ .custom-control-label::before { + border-color: #28a745; +} + +.was-validated .custom-control-input:valid ~ .valid-feedback, +.was-validated .custom-control-input:valid ~ .valid-tooltip, .custom-control-input.is-valid ~ .valid-feedback, +.custom-control-input.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .custom-control-input:valid:checked ~ .custom-control-label::before, .custom-control-input.is-valid:checked ~ .custom-control-label::before { + border-color: #34ce57; + background-color: #34ce57; +} + +.was-validated .custom-control-input:valid:focus ~ .custom-control-label::before, .custom-control-input.is-valid:focus ~ .custom-control-label::before { + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.was-validated .custom-control-input:valid:focus:not(:checked) ~ .custom-control-label::before, .custom-control-input.is-valid:focus:not(:checked) ~ .custom-control-label::before { + border-color: #28a745; +} + +.was-validated .custom-file-input:valid ~ .custom-file-label, .custom-file-input.is-valid ~ .custom-file-label { + border-color: #28a745; +} + +.was-validated .custom-file-input:valid ~ .valid-feedback, +.was-validated .custom-file-input:valid ~ .valid-tooltip, .custom-file-input.is-valid ~ .valid-feedback, +.custom-file-input.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .custom-file-input:valid:focus ~ .custom-file-label, .custom-file-input.is-valid:focus ~ .custom-file-label { + border-color: #28a745; + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.invalid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 80%; + color: #dc3545; +} + +.invalid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: 0.25rem 0.5rem; + margin-top: .1rem; + font-size: 0.875rem; + line-height: 1.5; + color: #fff; + background-color: rgba(220, 53, 69, 0.9); + border-radius: 0.25rem; +} + +.was-validated .form-control:invalid, .form-control.is-invalid { + border-color: #dc3545; + padding-right: calc(1.5em + 0.75rem); + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23dc3545' viewBox='-2 -2 7 7'%3e%3cpath stroke='%23dc3545' d='M0 0l3 3m0-3L0 3'/%3e%3ccircle r='.5'/%3e%3ccircle cx='3' r='.5'/%3e%3ccircle cy='3' r='.5'/%3e%3ccircle cx='3' cy='3' r='.5'/%3e%3c/svg%3E"); + background-repeat: no-repeat; + background-position: center right calc(0.375em + 0.1875rem); + background-size: calc(0.75em + 0.375rem) calc(0.75em + 0.375rem); +} + +.was-validated .form-control:invalid:focus, .form-control.is-invalid:focus { + border-color: #dc3545; + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.was-validated .form-control:invalid ~ .invalid-feedback, +.was-validated .form-control:invalid ~ .invalid-tooltip, .form-control.is-invalid ~ .invalid-feedback, +.form-control.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated textarea.form-control:invalid, textarea.form-control.is-invalid { + padding-right: calc(1.5em + 0.75rem); + background-position: top calc(0.375em + 0.1875rem) right calc(0.375em + 0.1875rem); +} + +.was-validated .custom-select:invalid, .custom-select.is-invalid { + border-color: #dc3545; + padding-right: calc((1em + 0.75rem) * 3 / 4 + 1.75rem); + background: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3e%3cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3e%3c/svg%3e") no-repeat right 0.75rem center/8px 10px, url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23dc3545' viewBox='-2 -2 7 7'%3e%3cpath stroke='%23dc3545' d='M0 0l3 3m0-3L0 3'/%3e%3ccircle r='.5'/%3e%3ccircle cx='3' r='.5'/%3e%3ccircle cy='3' r='.5'/%3e%3ccircle cx='3' cy='3' r='.5'/%3e%3c/svg%3E") #fff no-repeat center right 1.75rem/calc(0.75em + 0.375rem) calc(0.75em + 0.375rem); +} + +.was-validated .custom-select:invalid:focus, .custom-select.is-invalid:focus { + border-color: #dc3545; + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.was-validated .custom-select:invalid ~ .invalid-feedback, +.was-validated .custom-select:invalid ~ .invalid-tooltip, .custom-select.is-invalid ~ .invalid-feedback, +.custom-select.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .form-control-file:invalid ~ .invalid-feedback, +.was-validated .form-control-file:invalid ~ .invalid-tooltip, .form-control-file.is-invalid ~ .invalid-feedback, +.form-control-file.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .form-check-input:invalid ~ .form-check-label, .form-check-input.is-invalid ~ .form-check-label { + color: #dc3545; +} + +.was-validated .form-check-input:invalid ~ .invalid-feedback, +.was-validated .form-check-input:invalid ~ .invalid-tooltip, .form-check-input.is-invalid ~ .invalid-feedback, +.form-check-input.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .custom-control-input:invalid ~ .custom-control-label, .custom-control-input.is-invalid ~ .custom-control-label { + color: #dc3545; +} + +.was-validated .custom-control-input:invalid ~ .custom-control-label::before, .custom-control-input.is-invalid ~ .custom-control-label::before { + border-color: #dc3545; +} + +.was-validated .custom-control-input:invalid ~ .invalid-feedback, +.was-validated .custom-control-input:invalid ~ .invalid-tooltip, .custom-control-input.is-invalid ~ .invalid-feedback, +.custom-control-input.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .custom-control-input:invalid:checked ~ .custom-control-label::before, .custom-control-input.is-invalid:checked ~ .custom-control-label::before { + border-color: #e4606d; + background-color: #e4606d; +} + +.was-validated .custom-control-input:invalid:focus ~ .custom-control-label::before, .custom-control-input.is-invalid:focus ~ .custom-control-label::before { + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.was-validated .custom-control-input:invalid:focus:not(:checked) ~ .custom-control-label::before, .custom-control-input.is-invalid:focus:not(:checked) ~ .custom-control-label::before { + border-color: #dc3545; +} + +.was-validated .custom-file-input:invalid ~ .custom-file-label, .custom-file-input.is-invalid ~ .custom-file-label { + border-color: #dc3545; +} + +.was-validated .custom-file-input:invalid ~ .invalid-feedback, +.was-validated .custom-file-input:invalid ~ .invalid-tooltip, .custom-file-input.is-invalid ~ .invalid-feedback, +.custom-file-input.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .custom-file-input:invalid:focus ~ .custom-file-label, .custom-file-input.is-invalid:focus ~ .custom-file-label { + border-color: #dc3545; + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.form-inline { + display: -ms-flexbox; + display: flex; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -ms-flex-align: center; + align-items: center; +} + +.form-inline .form-check { + width: 100%; +} + +@media (min-width: 576px) { + .form-inline label { + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: center; + justify-content: center; + margin-bottom: 0; + } + .form-inline .form-group { + display: -ms-flexbox; + display: flex; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -ms-flex-align: center; + align-items: center; + margin-bottom: 0; + } + .form-inline .form-control { + display: inline-block; + width: auto; + vertical-align: middle; + } + .form-inline .form-control-plaintext { + display: inline-block; + } + .form-inline .input-group, + .form-inline .custom-select { + width: auto; + } + .form-inline .form-check { + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: center; + justify-content: center; + width: auto; + padding-left: 0; + } + .form-inline .form-check-input { + position: relative; + -ms-flex-negative: 0; + flex-shrink: 0; + margin-top: 0; + margin-right: 0.25rem; + margin-left: 0; + } + .form-inline .custom-control { + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: center; + justify-content: center; + } + .form-inline .custom-control-label { + margin-bottom: 0; + } +} + +.btn { + display: inline-block; + font-weight: 400; + color: #212529; + text-align: center; + vertical-align: middle; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + background-color: transparent; + border: 1px solid transparent; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + border-radius: 0.25rem; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; +} + +@media (prefers-reduced-motion: reduce) { + .btn { + transition: none; + } +} + +.btn:hover { + color: #212529; + text-decoration: none; +} + +.btn:focus, .btn.focus { + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} + +.btn.disabled, .btn:disabled { + opacity: 0.65; +} + +a.btn.disabled, +fieldset:disabled a.btn { + pointer-events: none; +} + +.btn-primary { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} + +.btn-primary:hover { + color: #fff; + background-color: #0069d9; + border-color: #0062cc; +} + +.btn-primary:focus, .btn-primary.focus { + box-shadow: 0 0 0 0.2rem rgba(38, 143, 255, 0.5); +} + +.btn-primary.disabled, .btn-primary:disabled { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} + +.btn-primary:not(:disabled):not(.disabled):active, .btn-primary:not(:disabled):not(.disabled).active, +.show > .btn-primary.dropdown-toggle { + color: #fff; + background-color: #0062cc; + border-color: #005cbf; +} + +.btn-primary:not(:disabled):not(.disabled):active:focus, .btn-primary:not(:disabled):not(.disabled).active:focus, +.show > .btn-primary.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(38, 143, 255, 0.5); +} + +.btn-secondary { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} + +.btn-secondary:hover { + color: #fff; + background-color: #5a6268; + border-color: #545b62; +} + +.btn-secondary:focus, .btn-secondary.focus { + box-shadow: 0 0 0 0.2rem rgba(130, 138, 145, 0.5); +} + +.btn-secondary.disabled, .btn-secondary:disabled { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} + +.btn-secondary:not(:disabled):not(.disabled):active, .btn-secondary:not(:disabled):not(.disabled).active, +.show > .btn-secondary.dropdown-toggle { + color: #fff; + background-color: #545b62; + border-color: #4e555b; +} + +.btn-secondary:not(:disabled):not(.disabled):active:focus, .btn-secondary:not(:disabled):not(.disabled).active:focus, +.show > .btn-secondary.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(130, 138, 145, 0.5); +} + +.btn-success { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} + +.btn-success:hover { + color: #fff; + background-color: #218838; + border-color: #1e7e34; +} + +.btn-success:focus, .btn-success.focus { + box-shadow: 0 0 0 0.2rem rgba(72, 180, 97, 0.5); +} + +.btn-success.disabled, .btn-success:disabled { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} + +.btn-success:not(:disabled):not(.disabled):active, .btn-success:not(:disabled):not(.disabled).active, +.show > .btn-success.dropdown-toggle { + color: #fff; + background-color: #1e7e34; + border-color: #1c7430; +} + +.btn-success:not(:disabled):not(.disabled):active:focus, .btn-success:not(:disabled):not(.disabled).active:focus, +.show > .btn-success.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(72, 180, 97, 0.5); +} + +.btn-info { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} + +.btn-info:hover { + color: #fff; + background-color: #138496; + border-color: #117a8b; +} + +.btn-info:focus, .btn-info.focus { + box-shadow: 0 0 0 0.2rem rgba(58, 176, 195, 0.5); +} + +.btn-info.disabled, .btn-info:disabled { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} + +.btn-info:not(:disabled):not(.disabled):active, .btn-info:not(:disabled):not(.disabled).active, +.show > .btn-info.dropdown-toggle { + color: #fff; + background-color: #117a8b; + border-color: #10707f; +} + +.btn-info:not(:disabled):not(.disabled):active:focus, .btn-info:not(:disabled):not(.disabled).active:focus, +.show > .btn-info.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(58, 176, 195, 0.5); +} + +.btn-warning { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} + +.btn-warning:hover { + color: #212529; + background-color: #e0a800; + border-color: #d39e00; +} + +.btn-warning:focus, .btn-warning.focus { + box-shadow: 0 0 0 0.2rem rgba(222, 170, 12, 0.5); +} + +.btn-warning.disabled, .btn-warning:disabled { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} + +.btn-warning:not(:disabled):not(.disabled):active, .btn-warning:not(:disabled):not(.disabled).active, +.show > .btn-warning.dropdown-toggle { + color: #212529; + background-color: #d39e00; + border-color: #c69500; +} + +.btn-warning:not(:disabled):not(.disabled):active:focus, .btn-warning:not(:disabled):not(.disabled).active:focus, +.show > .btn-warning.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(222, 170, 12, 0.5); +} + +.btn-danger { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} + +.btn-danger:hover { + color: #fff; + background-color: #c82333; + border-color: #bd2130; +} + +.btn-danger:focus, .btn-danger.focus { + box-shadow: 0 0 0 0.2rem rgba(225, 83, 97, 0.5); +} + +.btn-danger.disabled, .btn-danger:disabled { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} + +.btn-danger:not(:disabled):not(.disabled):active, .btn-danger:not(:disabled):not(.disabled).active, +.show > .btn-danger.dropdown-toggle { + color: #fff; + background-color: #bd2130; + border-color: #b21f2d; +} + +.btn-danger:not(:disabled):not(.disabled):active:focus, .btn-danger:not(:disabled):not(.disabled).active:focus, +.show > .btn-danger.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(225, 83, 97, 0.5); +} + +.btn-light { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} + +.btn-light:hover { + color: #212529; + background-color: #e2e6ea; + border-color: #dae0e5; +} + +.btn-light:focus, .btn-light.focus { + box-shadow: 0 0 0 0.2rem rgba(216, 217, 219, 0.5); +} + +.btn-light.disabled, .btn-light:disabled { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} + +.btn-light:not(:disabled):not(.disabled):active, .btn-light:not(:disabled):not(.disabled).active, +.show > .btn-light.dropdown-toggle { + color: #212529; + background-color: #dae0e5; + border-color: #d3d9df; +} + +.btn-light:not(:disabled):not(.disabled):active:focus, .btn-light:not(:disabled):not(.disabled).active:focus, +.show > .btn-light.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(216, 217, 219, 0.5); +} + +.btn-dark { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} + +.btn-dark:hover { + color: #fff; + background-color: #23272b; + border-color: #1d2124; +} + +.btn-dark:focus, .btn-dark.focus { + box-shadow: 0 0 0 0.2rem rgba(82, 88, 93, 0.5); +} + +.btn-dark.disabled, .btn-dark:disabled { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} + +.btn-dark:not(:disabled):not(.disabled):active, .btn-dark:not(:disabled):not(.disabled).active, +.show > .btn-dark.dropdown-toggle { + color: #fff; + background-color: #1d2124; + border-color: #171a1d; +} + +.btn-dark:not(:disabled):not(.disabled):active:focus, .btn-dark:not(:disabled):not(.disabled).active:focus, +.show > .btn-dark.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(82, 88, 93, 0.5); +} + +.btn-outline-primary { + color: #007bff; + border-color: #007bff; +} + +.btn-outline-primary:hover { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} + +.btn-outline-primary:focus, .btn-outline-primary.focus { + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} + +.btn-outline-primary.disabled, .btn-outline-primary:disabled { + color: #007bff; + background-color: transparent; +} + +.btn-outline-primary:not(:disabled):not(.disabled):active, .btn-outline-primary:not(:disabled):not(.disabled).active, +.show > .btn-outline-primary.dropdown-toggle { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} + +.btn-outline-primary:not(:disabled):not(.disabled):active:focus, .btn-outline-primary:not(:disabled):not(.disabled).active:focus, +.show > .btn-outline-primary.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} + +.btn-outline-secondary { + color: #6c757d; + border-color: #6c757d; +} + +.btn-outline-secondary:hover { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} + +.btn-outline-secondary:focus, .btn-outline-secondary.focus { + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} + +.btn-outline-secondary.disabled, .btn-outline-secondary:disabled { + color: #6c757d; + background-color: transparent; +} + +.btn-outline-secondary:not(:disabled):not(.disabled):active, .btn-outline-secondary:not(:disabled):not(.disabled).active, +.show > .btn-outline-secondary.dropdown-toggle { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} + +.btn-outline-secondary:not(:disabled):not(.disabled):active:focus, .btn-outline-secondary:not(:disabled):not(.disabled).active:focus, +.show > .btn-outline-secondary.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} + +.btn-outline-success { + color: #28a745; + border-color: #28a745; +} + +.btn-outline-success:hover { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} + +.btn-outline-success:focus, .btn-outline-success.focus { + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} + +.btn-outline-success.disabled, .btn-outline-success:disabled { + color: #28a745; + background-color: transparent; +} + +.btn-outline-success:not(:disabled):not(.disabled):active, .btn-outline-success:not(:disabled):not(.disabled).active, +.show > .btn-outline-success.dropdown-toggle { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} + +.btn-outline-success:not(:disabled):not(.disabled):active:focus, .btn-outline-success:not(:disabled):not(.disabled).active:focus, +.show > .btn-outline-success.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} + +.btn-outline-info { + color: #17a2b8; + border-color: #17a2b8; +} + +.btn-outline-info:hover { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} + +.btn-outline-info:focus, .btn-outline-info.focus { + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} + +.btn-outline-info.disabled, .btn-outline-info:disabled { + color: #17a2b8; + background-color: transparent; +} + +.btn-outline-info:not(:disabled):not(.disabled):active, .btn-outline-info:not(:disabled):not(.disabled).active, +.show > .btn-outline-info.dropdown-toggle { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} + +.btn-outline-info:not(:disabled):not(.disabled):active:focus, .btn-outline-info:not(:disabled):not(.disabled).active:focus, +.show > .btn-outline-info.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} + +.btn-outline-warning { + color: #ffc107; + border-color: #ffc107; +} + +.btn-outline-warning:hover { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} + +.btn-outline-warning:focus, .btn-outline-warning.focus { + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} + +.btn-outline-warning.disabled, .btn-outline-warning:disabled { + color: #ffc107; + background-color: transparent; +} + +.btn-outline-warning:not(:disabled):not(.disabled):active, .btn-outline-warning:not(:disabled):not(.disabled).active, +.show > .btn-outline-warning.dropdown-toggle { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} + +.btn-outline-warning:not(:disabled):not(.disabled):active:focus, .btn-outline-warning:not(:disabled):not(.disabled).active:focus, +.show > .btn-outline-warning.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} + +.btn-outline-danger { + color: #dc3545; + border-color: #dc3545; +} + +.btn-outline-danger:hover { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} + +.btn-outline-danger:focus, .btn-outline-danger.focus { + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} + +.btn-outline-danger.disabled, .btn-outline-danger:disabled { + color: #dc3545; + background-color: transparent; +} + +.btn-outline-danger:not(:disabled):not(.disabled):active, .btn-outline-danger:not(:disabled):not(.disabled).active, +.show > .btn-outline-danger.dropdown-toggle { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} + +.btn-outline-danger:not(:disabled):not(.disabled):active:focus, .btn-outline-danger:not(:disabled):not(.disabled).active:focus, +.show > .btn-outline-danger.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} + +.btn-outline-light { + color: #f8f9fa; + border-color: #f8f9fa; +} + +.btn-outline-light:hover { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} + +.btn-outline-light:focus, .btn-outline-light.focus { + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} + +.btn-outline-light.disabled, .btn-outline-light:disabled { + color: #f8f9fa; + background-color: transparent; +} + +.btn-outline-light:not(:disabled):not(.disabled):active, .btn-outline-light:not(:disabled):not(.disabled).active, +.show > .btn-outline-light.dropdown-toggle { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} + +.btn-outline-light:not(:disabled):not(.disabled):active:focus, .btn-outline-light:not(:disabled):not(.disabled).active:focus, +.show > .btn-outline-light.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} + +.btn-outline-dark { + color: #343a40; + border-color: #343a40; +} + +.btn-outline-dark:hover { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} + +.btn-outline-dark:focus, .btn-outline-dark.focus { + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} + +.btn-outline-dark.disabled, .btn-outline-dark:disabled { + color: #343a40; + background-color: transparent; +} + +.btn-outline-dark:not(:disabled):not(.disabled):active, .btn-outline-dark:not(:disabled):not(.disabled).active, +.show > .btn-outline-dark.dropdown-toggle { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} + +.btn-outline-dark:not(:disabled):not(.disabled):active:focus, .btn-outline-dark:not(:disabled):not(.disabled).active:focus, +.show > .btn-outline-dark.dropdown-toggle:focus { + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} + +.btn-link { + font-weight: 400; + color: #007bff; + text-decoration: none; +} + +.btn-link:hover { + color: #0056b3; + text-decoration: underline; +} + +.btn-link:focus, .btn-link.focus { + text-decoration: underline; + box-shadow: none; +} + +.btn-link:disabled, .btn-link.disabled { + color: #6c757d; + pointer-events: none; +} + +.btn-lg, .btn-group-lg > .btn { + padding: 0.5rem 1rem; + font-size: 1.25rem; + line-height: 1.5; + border-radius: 0.3rem; +} + +.btn-sm, .btn-group-sm > .btn { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; +} + +.btn-block { + display: block; + width: 100%; +} + +.btn-block + .btn-block { + margin-top: 0.5rem; +} + +input[type="submit"].btn-block, +input[type="reset"].btn-block, +input[type="button"].btn-block { + width: 100%; +} + +.fade { + transition: opacity 0.15s linear; +} + +@media (prefers-reduced-motion: reduce) { + .fade { + transition: none; + } +} + +.fade:not(.show) { + opacity: 0; +} + +.collapse:not(.show) { + display: none; +} + +.collapsing { + position: relative; + height: 0; + overflow: hidden; + transition: height 0.35s ease; +} + +@media (prefers-reduced-motion: reduce) { + .collapsing { + transition: none; + } +} + +.dropup, +.dropright, +.dropdown, +.dropleft { + position: relative; +} + +.dropdown-toggle { + white-space: nowrap; +} + +.dropdown-toggle::after { + display: inline-block; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid; + border-right: 0.3em solid transparent; + border-bottom: 0; + border-left: 0.3em solid transparent; +} + +.dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropdown-menu { + position: absolute; + top: 100%; + left: 0; + z-index: 1000; + display: none; + float: left; + min-width: 10rem; + padding: 0.5rem 0; + margin: 0.125rem 0 0; + font-size: 1rem; + color: #212529; + text-align: left; + list-style: none; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.15); + border-radius: 0.25rem; +} + +.dropdown-menu-left { + right: auto; + left: 0; +} + +.dropdown-menu-right { + right: 0; + left: auto; +} + +@media (min-width: 576px) { + .dropdown-menu-sm-left { + right: auto; + left: 0; + } + .dropdown-menu-sm-right { + right: 0; + left: auto; + } +} + +@media (min-width: 768px) { + .dropdown-menu-md-left { + right: auto; + left: 0; + } + .dropdown-menu-md-right { + right: 0; + left: auto; + } +} + +@media (min-width: 992px) { + .dropdown-menu-lg-left { + right: auto; + left: 0; + } + .dropdown-menu-lg-right { + right: 0; + left: auto; + } +} + +@media (min-width: 1200px) { + .dropdown-menu-xl-left { + right: auto; + left: 0; + } + .dropdown-menu-xl-right { + right: 0; + left: auto; + } +} + +.dropup .dropdown-menu { + top: auto; + bottom: 100%; + margin-top: 0; + margin-bottom: 0.125rem; +} + +.dropup .dropdown-toggle::after { + display: inline-block; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0; + border-right: 0.3em solid transparent; + border-bottom: 0.3em solid; + border-left: 0.3em solid transparent; +} + +.dropup .dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropright .dropdown-menu { + top: 0; + right: auto; + left: 100%; + margin-top: 0; + margin-left: 0.125rem; +} + +.dropright .dropdown-toggle::after { + display: inline-block; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-right: 0; + border-bottom: 0.3em solid transparent; + border-left: 0.3em solid; +} + +.dropright .dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropright .dropdown-toggle::after { + vertical-align: 0; +} + +.dropleft .dropdown-menu { + top: 0; + right: 100%; + left: auto; + margin-top: 0; + margin-right: 0.125rem; +} + +.dropleft .dropdown-toggle::after { + display: inline-block; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; +} + +.dropleft .dropdown-toggle::after { + display: none; +} + +.dropleft .dropdown-toggle::before { + display: inline-block; + margin-right: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-right: 0.3em solid; + border-bottom: 0.3em solid transparent; +} + +.dropleft .dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropleft .dropdown-toggle::before { + vertical-align: 0; +} + +.dropdown-menu[x-placement^="top"], .dropdown-menu[x-placement^="right"], .dropdown-menu[x-placement^="bottom"], .dropdown-menu[x-placement^="left"] { + right: auto; + bottom: auto; +} + +.dropdown-divider { + height: 0; + margin: 0.5rem 0; + overflow: hidden; + border-top: 1px solid #e9ecef; +} + +.dropdown-item { + display: block; + width: 100%; + padding: 0.25rem 1.5rem; + clear: both; + font-weight: 400; + color: #212529; + text-align: inherit; + white-space: nowrap; + background-color: transparent; + border: 0; +} + +.dropdown-item:hover, .dropdown-item:focus { + color: #16181b; + text-decoration: none; + background-color: #f8f9fa; +} + +.dropdown-item.active, .dropdown-item:active { + color: #fff; + text-decoration: none; + background-color: #007bff; +} + +.dropdown-item.disabled, .dropdown-item:disabled { + color: #6c757d; + pointer-events: none; + background-color: transparent; +} + +.dropdown-menu.show { + display: block; +} + +.dropdown-header { + display: block; + padding: 0.5rem 1.5rem; + margin-bottom: 0; + font-size: 0.875rem; + color: #6c757d; + white-space: nowrap; +} + +.dropdown-item-text { + display: block; + padding: 0.25rem 1.5rem; + color: #212529; +} + +.btn-group, +.btn-group-vertical { + position: relative; + display: -ms-inline-flexbox; + display: inline-flex; + vertical-align: middle; +} + +.btn-group > .btn, +.btn-group-vertical > .btn { + position: relative; + -ms-flex: 1 1 auto; + flex: 1 1 auto; +} + +.btn-group > .btn:hover, +.btn-group-vertical > .btn:hover { + z-index: 1; +} + +.btn-group > .btn:focus, .btn-group > .btn:active, .btn-group > .btn.active, +.btn-group-vertical > .btn:focus, +.btn-group-vertical > .btn:active, +.btn-group-vertical > .btn.active { + z-index: 1; +} + +.btn-toolbar { + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -ms-flex-pack: start; + justify-content: flex-start; +} + +.btn-toolbar .input-group { + width: auto; +} + +.btn-group > .btn:not(:first-child), +.btn-group > .btn-group:not(:first-child) { + margin-left: -1px; +} + +.btn-group > .btn:not(:last-child):not(.dropdown-toggle), +.btn-group > .btn-group:not(:last-child) > .btn { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} + +.btn-group > .btn:not(:first-child), +.btn-group > .btn-group:not(:first-child) > .btn { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.dropdown-toggle-split { + padding-right: 0.5625rem; + padding-left: 0.5625rem; +} + +.dropdown-toggle-split::after, +.dropup .dropdown-toggle-split::after, +.dropright .dropdown-toggle-split::after { + margin-left: 0; +} + +.dropleft .dropdown-toggle-split::before { + margin-right: 0; +} + +.btn-sm + .dropdown-toggle-split, .btn-group-sm > .btn + .dropdown-toggle-split { + padding-right: 0.375rem; + padding-left: 0.375rem; +} + +.btn-lg + .dropdown-toggle-split, .btn-group-lg > .btn + .dropdown-toggle-split { + padding-right: 0.75rem; + padding-left: 0.75rem; +} + +.btn-group-vertical { + -ms-flex-direction: column; + flex-direction: column; + -ms-flex-align: start; + align-items: flex-start; + -ms-flex-pack: center; + justify-content: center; +} + +.btn-group-vertical > .btn, +.btn-group-vertical > .btn-group { + width: 100%; +} + +.btn-group-vertical > .btn:not(:first-child), +.btn-group-vertical > .btn-group:not(:first-child) { + margin-top: -1px; +} + +.btn-group-vertical > .btn:not(:last-child):not(.dropdown-toggle), +.btn-group-vertical > .btn-group:not(:last-child) > .btn { + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} + +.btn-group-vertical > .btn:not(:first-child), +.btn-group-vertical > .btn-group:not(:first-child) > .btn { + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.btn-group-toggle > .btn, +.btn-group-toggle > .btn-group > .btn { + margin-bottom: 0; +} + +.btn-group-toggle > .btn input[type="radio"], +.btn-group-toggle > .btn input[type="checkbox"], +.btn-group-toggle > .btn-group > .btn input[type="radio"], +.btn-group-toggle > .btn-group > .btn input[type="checkbox"] { + position: absolute; + clip: rect(0, 0, 0, 0); + pointer-events: none; +} + +.input-group { + position: relative; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -ms-flex-align: stretch; + align-items: stretch; + width: 100%; +} + +.input-group > .form-control, +.input-group > .form-control-plaintext, +.input-group > .custom-select, +.input-group > .custom-file { + position: relative; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + width: 1%; + margin-bottom: 0; +} + +.input-group > .form-control + .form-control, +.input-group > .form-control + .custom-select, +.input-group > .form-control + .custom-file, +.input-group > .form-control-plaintext + .form-control, +.input-group > .form-control-plaintext + .custom-select, +.input-group > .form-control-plaintext + .custom-file, +.input-group > .custom-select + .form-control, +.input-group > .custom-select + .custom-select, +.input-group > .custom-select + .custom-file, +.input-group > .custom-file + .form-control, +.input-group > .custom-file + .custom-select, +.input-group > .custom-file + .custom-file { + margin-left: -1px; +} + +.input-group > .form-control:focus, +.input-group > .custom-select:focus, +.input-group > .custom-file .custom-file-input:focus ~ .custom-file-label { + z-index: 3; +} + +.input-group > .custom-file .custom-file-input:focus { + z-index: 4; +} + +.input-group > .form-control:not(:last-child), +.input-group > .custom-select:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} + +.input-group > .form-control:not(:first-child), +.input-group > .custom-select:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.input-group > .custom-file { + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; +} + +.input-group > .custom-file:not(:last-child) .custom-file-label, +.input-group > .custom-file:not(:last-child) .custom-file-label::after { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} + +.input-group > .custom-file:not(:first-child) .custom-file-label { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.input-group-prepend, +.input-group-append { + display: -ms-flexbox; + display: flex; +} + +.input-group-prepend .btn, +.input-group-append .btn { + position: relative; + z-index: 2; +} + +.input-group-prepend .btn:focus, +.input-group-append .btn:focus { + z-index: 3; +} + +.input-group-prepend .btn + .btn, +.input-group-prepend .btn + .input-group-text, +.input-group-prepend .input-group-text + .input-group-text, +.input-group-prepend .input-group-text + .btn, +.input-group-append .btn + .btn, +.input-group-append .btn + .input-group-text, +.input-group-append .input-group-text + .input-group-text, +.input-group-append .input-group-text + .btn { + margin-left: -1px; +} + +.input-group-prepend { + margin-right: -1px; +} + +.input-group-append { + margin-left: -1px; +} + +.input-group-text { + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + padding: 0.375rem 0.75rem; + margin-bottom: 0; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: #495057; + text-align: center; + white-space: nowrap; + background-color: #e9ecef; + border: 1px solid #ced4da; + border-radius: 0.25rem; +} + +.input-group-text input[type="radio"], +.input-group-text input[type="checkbox"] { + margin-top: 0; +} + +.input-group-lg > .form-control:not(textarea), +.input-group-lg > .custom-select { + height: calc(1.5em + 1rem + 2px); +} + +.input-group-lg > .form-control, +.input-group-lg > .custom-select, +.input-group-lg > .input-group-prepend > .input-group-text, +.input-group-lg > .input-group-append > .input-group-text, +.input-group-lg > .input-group-prepend > .btn, +.input-group-lg > .input-group-append > .btn { + padding: 0.5rem 1rem; + font-size: 1.25rem; + line-height: 1.5; + border-radius: 0.3rem; +} + +.input-group-sm > .form-control:not(textarea), +.input-group-sm > .custom-select { + height: calc(1.5em + 0.5rem + 2px); +} + +.input-group-sm > .form-control, +.input-group-sm > .custom-select, +.input-group-sm > .input-group-prepend > .input-group-text, +.input-group-sm > .input-group-append > .input-group-text, +.input-group-sm > .input-group-prepend > .btn, +.input-group-sm > .input-group-append > .btn { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; +} + +.input-group-lg > .custom-select, +.input-group-sm > .custom-select { + padding-right: 1.75rem; +} + +.input-group > .input-group-prepend > .btn, +.input-group > .input-group-prepend > .input-group-text, +.input-group > .input-group-append:not(:last-child) > .btn, +.input-group > .input-group-append:not(:last-child) > .input-group-text, +.input-group > .input-group-append:last-child > .btn:not(:last-child):not(.dropdown-toggle), +.input-group > .input-group-append:last-child > .input-group-text:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} + +.input-group > .input-group-append > .btn, +.input-group > .input-group-append > .input-group-text, +.input-group > .input-group-prepend:not(:first-child) > .btn, +.input-group > .input-group-prepend:not(:first-child) > .input-group-text, +.input-group > .input-group-prepend:first-child > .btn:not(:first-child), +.input-group > .input-group-prepend:first-child > .input-group-text:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.custom-control { + position: relative; + display: block; + min-height: 1.5rem; + padding-left: 1.5rem; +} + +.custom-control-inline { + display: -ms-inline-flexbox; + display: inline-flex; + margin-right: 1rem; +} + +.custom-control-input { + position: absolute; + z-index: -1; + opacity: 0; +} + +.custom-control-input:checked ~ .custom-control-label::before { + color: #fff; + border-color: #007bff; + background-color: #007bff; +} + +.custom-control-input:focus ~ .custom-control-label::before { + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} + +.custom-control-input:focus:not(:checked) ~ .custom-control-label::before { + border-color: #80bdff; +} + +.custom-control-input:not(:disabled):active ~ .custom-control-label::before { + color: #fff; + background-color: #b3d7ff; + border-color: #b3d7ff; +} + +.custom-control-input:disabled ~ .custom-control-label { + color: #6c757d; +} + +.custom-control-input:disabled ~ .custom-control-label::before { + background-color: #e9ecef; +} + +.custom-control-label { + position: relative; + margin-bottom: 0; + vertical-align: top; +} + +.custom-control-label::before { + position: absolute; + top: 0.25rem; + left: -1.5rem; + display: block; + width: 1rem; + height: 1rem; + pointer-events: none; + content: ""; + background-color: #fff; + border: #adb5bd solid 1px; +} + +.custom-control-label::after { + position: absolute; + top: 0.25rem; + left: -1.5rem; + display: block; + width: 1rem; + height: 1rem; + content: ""; + background: no-repeat 50% / 50% 50%; +} + +.custom-checkbox .custom-control-label::before { + border-radius: 0.25rem; +} + +.custom-checkbox .custom-control-input:checked ~ .custom-control-label::after { + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3e%3c/svg%3e"); +} + +.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::before { + border-color: #007bff; + background-color: #007bff; +} + +.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::after { + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3e%3cpath stroke='%23fff' d='M0 2h4'/%3e%3c/svg%3e"); +} + +.custom-checkbox .custom-control-input:disabled:checked ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-checkbox .custom-control-input:disabled:indeterminate ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-radio .custom-control-label::before { + border-radius: 50%; +} + +.custom-radio .custom-control-input:checked ~ .custom-control-label::after { + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e"); +} + +.custom-radio .custom-control-input:disabled:checked ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-switch { + padding-left: 2.25rem; +} + +.custom-switch .custom-control-label::before { + left: -2.25rem; + width: 1.75rem; + pointer-events: all; + border-radius: 0.5rem; +} + +.custom-switch .custom-control-label::after { + top: calc(0.25rem + 2px); + left: calc(-2.25rem + 2px); + width: calc(1rem - 4px); + height: calc(1rem - 4px); + background-color: #adb5bd; + border-radius: 0.5rem; + transition: background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-transform 0.15s ease-in-out; + transition: transform 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + transition: transform 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-transform 0.15s ease-in-out; +} + +@media (prefers-reduced-motion: reduce) { + .custom-switch .custom-control-label::after { + transition: none; + } +} + +.custom-switch .custom-control-input:checked ~ .custom-control-label::after { + background-color: #fff; + -webkit-transform: translateX(0.75rem); + transform: translateX(0.75rem); +} + +.custom-switch .custom-control-input:disabled:checked ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-select { + display: inline-block; + width: 100%; + height: calc(1.5em + 0.75rem + 2px); + padding: 0.375rem 1.75rem 0.375rem 0.75rem; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: #495057; + vertical-align: middle; + background: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3e%3cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3e%3c/svg%3e") no-repeat right 0.75rem center/8px 10px; + background-color: #fff; + border: 1px solid #ced4da; + border-radius: 0.25rem; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; +} + +.custom-select:focus { + border-color: #80bdff; + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} + +.custom-select:focus::-ms-value { + color: #495057; + background-color: #fff; +} + +.custom-select[multiple], .custom-select[size]:not([size="1"]) { + height: auto; + padding-right: 0.75rem; + background-image: none; +} + +.custom-select:disabled { + color: #6c757d; + background-color: #e9ecef; +} + +.custom-select::-ms-expand { + display: none; +} + +.custom-select-sm { + height: calc(1.5em + 0.5rem + 2px); + padding-top: 0.25rem; + padding-bottom: 0.25rem; + padding-left: 0.5rem; + font-size: 0.875rem; +} + +.custom-select-lg { + height: calc(1.5em + 1rem + 2px); + padding-top: 0.5rem; + padding-bottom: 0.5rem; + padding-left: 1rem; + font-size: 1.25rem; +} + +.custom-file { + position: relative; + display: inline-block; + width: 100%; + height: calc(1.5em + 0.75rem + 2px); + margin-bottom: 0; +} + +.custom-file-input { + position: relative; + z-index: 2; + width: 100%; + height: calc(1.5em + 0.75rem + 2px); + margin: 0; + opacity: 0; +} + +.custom-file-input:focus ~ .custom-file-label { + border-color: #80bdff; + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} + +.custom-file-input:disabled ~ .custom-file-label { + background-color: #e9ecef; +} + +.custom-file-input:lang(en) ~ .custom-file-label::after { + content: "Browse"; +} + +.custom-file-input ~ .custom-file-label[data-browse]::after { + content: attr(data-browse); +} + +.custom-file-label { + position: absolute; + top: 0; + right: 0; + left: 0; + z-index: 1; + height: calc(1.5em + 0.75rem + 2px); + padding: 0.375rem 0.75rem; + font-weight: 400; + line-height: 1.5; + color: #495057; + background-color: #fff; + border: 1px solid #ced4da; + border-radius: 0.25rem; +} + +.custom-file-label::after { + position: absolute; + top: 0; + right: 0; + bottom: 0; + z-index: 3; + display: block; + height: calc(1.5em + 0.75rem); + padding: 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + content: "Browse"; + background-color: #e9ecef; + border-left: inherit; + border-radius: 0 0.25rem 0.25rem 0; +} + +.custom-range { + width: 100%; + height: calc(1rem + 0.4rem); + padding: 0; + background-color: transparent; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; +} + +.custom-range:focus { + outline: none; +} + +.custom-range:focus::-webkit-slider-thumb { + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} + +.custom-range:focus::-moz-range-thumb { + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} + +.custom-range:focus::-ms-thumb { + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} + +.custom-range::-moz-focus-outer { + border: 0; +} + +.custom-range::-webkit-slider-thumb { + width: 1rem; + height: 1rem; + margin-top: -0.25rem; + background-color: #007bff; + border: 0; + border-radius: 1rem; + transition: background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + -webkit-appearance: none; + appearance: none; +} + +@media (prefers-reduced-motion: reduce) { + .custom-range::-webkit-slider-thumb { + transition: none; + } +} + +.custom-range::-webkit-slider-thumb:active { + background-color: #b3d7ff; +} + +.custom-range::-webkit-slider-runnable-track { + width: 100%; + height: 0.5rem; + color: transparent; + cursor: pointer; + background-color: #dee2e6; + border-color: transparent; + border-radius: 1rem; +} + +.custom-range::-moz-range-thumb { + width: 1rem; + height: 1rem; + background-color: #007bff; + border: 0; + border-radius: 1rem; + transition: background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + -moz-appearance: none; + appearance: none; +} + +@media (prefers-reduced-motion: reduce) { + .custom-range::-moz-range-thumb { + transition: none; + } +} + +.custom-range::-moz-range-thumb:active { + background-color: #b3d7ff; +} + +.custom-range::-moz-range-track { + width: 100%; + height: 0.5rem; + color: transparent; + cursor: pointer; + background-color: #dee2e6; + border-color: transparent; + border-radius: 1rem; +} + +.custom-range::-ms-thumb { + width: 1rem; + height: 1rem; + margin-top: 0; + margin-right: 0.2rem; + margin-left: 0.2rem; + background-color: #007bff; + border: 0; + border-radius: 1rem; + transition: background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + appearance: none; +} + +@media (prefers-reduced-motion: reduce) { + .custom-range::-ms-thumb { + transition: none; + } +} + +.custom-range::-ms-thumb:active { + background-color: #b3d7ff; +} + +.custom-range::-ms-track { + width: 100%; + height: 0.5rem; + color: transparent; + cursor: pointer; + background-color: transparent; + border-color: transparent; + border-width: 0.5rem; +} + +.custom-range::-ms-fill-lower { + background-color: #dee2e6; + border-radius: 1rem; +} + +.custom-range::-ms-fill-upper { + margin-right: 15px; + background-color: #dee2e6; + border-radius: 1rem; +} + +.custom-range:disabled::-webkit-slider-thumb { + background-color: #adb5bd; +} + +.custom-range:disabled::-webkit-slider-runnable-track { + cursor: default; +} + +.custom-range:disabled::-moz-range-thumb { + background-color: #adb5bd; +} + +.custom-range:disabled::-moz-range-track { + cursor: default; +} + +.custom-range:disabled::-ms-thumb { + background-color: #adb5bd; +} + +.custom-control-label::before, +.custom-file-label, +.custom-select { + transition: background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; +} + +@media (prefers-reduced-motion: reduce) { + .custom-control-label::before, + .custom-file-label, + .custom-select { + transition: none; + } +} + +.nav { + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + padding-left: 0; + margin-bottom: 0; + list-style: none; +} + +.nav-link { + display: block; + padding: 0.5rem 1rem; +} + +.nav-link:hover, .nav-link:focus { + text-decoration: none; +} + +.nav-link.disabled { + color: #6c757d; + pointer-events: none; + cursor: default; +} + +.nav-tabs { + border-bottom: 1px solid #dee2e6; +} + +.nav-tabs .nav-item { + margin-bottom: -1px; +} + +.nav-tabs .nav-link { + border: 1px solid transparent; + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} + +.nav-tabs .nav-link:hover, .nav-tabs .nav-link:focus { + border-color: #e9ecef #e9ecef #dee2e6; +} + +.nav-tabs .nav-link.disabled { + color: #6c757d; + background-color: transparent; + border-color: transparent; +} + +.nav-tabs .nav-link.active, +.nav-tabs .nav-item.show .nav-link { + color: #495057; + background-color: #fff; + border-color: #dee2e6 #dee2e6 #fff; +} + +.nav-tabs .dropdown-menu { + margin-top: -1px; + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.nav-pills .nav-link { + border-radius: 0.25rem; +} + +.nav-pills .nav-link.active, +.nav-pills .show > .nav-link { + color: #fff; + background-color: #007bff; +} + +.nav-fill .nav-item { + -ms-flex: 1 1 auto; + flex: 1 1 auto; + text-align: center; +} + +.nav-justified .nav-item { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -ms-flex-positive: 1; + flex-grow: 1; + text-align: center; +} + +.tab-content > .tab-pane { + display: none; +} + +.tab-content > .active { + display: block; +} + +.navbar { + position: relative; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: justify; + justify-content: space-between; + padding: 0.5rem 1rem; +} + +.navbar > .container, +.navbar > .container-fluid { + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: justify; + justify-content: space-between; +} + +.navbar-brand { + display: inline-block; + padding-top: 0.3125rem; + padding-bottom: 0.3125rem; + margin-right: 1rem; + font-size: 1.25rem; + line-height: inherit; + white-space: nowrap; +} + +.navbar-brand:hover, .navbar-brand:focus { + text-decoration: none; +} + +.navbar-nav { + display: -ms-flexbox; + display: flex; + -ms-flex-direction: column; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; + list-style: none; +} + +.navbar-nav .nav-link { + padding-right: 0; + padding-left: 0; +} + +.navbar-nav .dropdown-menu { + position: static; + float: none; +} + +.navbar-text { + display: inline-block; + padding-top: 0.5rem; + padding-bottom: 0.5rem; +} + +.navbar-collapse { + -ms-flex-preferred-size: 100%; + flex-basis: 100%; + -ms-flex-positive: 1; + flex-grow: 1; + -ms-flex-align: center; + align-items: center; +} + +.navbar-toggler { + padding: 0.25rem 0.75rem; + font-size: 1.25rem; + line-height: 1; + background-color: transparent; + border: 1px solid transparent; + border-radius: 0.25rem; +} + +.navbar-toggler:hover, .navbar-toggler:focus { + text-decoration: none; +} + +.navbar-toggler-icon { + display: inline-block; + width: 1.5em; + height: 1.5em; + vertical-align: middle; + content: ""; + background: no-repeat center center; + background-size: 100% 100%; +} + +@media (max-width: 575.98px) { + .navbar-expand-sm > .container, + .navbar-expand-sm > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} + +@media (min-width: 576px) { + .navbar-expand-sm { + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-sm .navbar-nav { + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-sm .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-sm .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-sm > .container, + .navbar-expand-sm > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-sm .navbar-collapse { + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-sm .navbar-toggler { + display: none; + } +} + +@media (max-width: 767.98px) { + .navbar-expand-md > .container, + .navbar-expand-md > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} + +@media (min-width: 768px) { + .navbar-expand-md { + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-md .navbar-nav { + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-md .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-md .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-md > .container, + .navbar-expand-md > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-md .navbar-collapse { + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-md .navbar-toggler { + display: none; + } +} + +@media (max-width: 991.98px) { + .navbar-expand-lg > .container, + .navbar-expand-lg > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} + +@media (min-width: 992px) { + .navbar-expand-lg { + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-lg .navbar-nav { + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-lg .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-lg .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-lg > .container, + .navbar-expand-lg > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-lg .navbar-collapse { + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-lg .navbar-toggler { + display: none; + } +} + +@media (max-width: 1199.98px) { + .navbar-expand-xl > .container, + .navbar-expand-xl > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} + +@media (min-width: 1200px) { + .navbar-expand-xl { + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-xl .navbar-nav { + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-xl .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-xl .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-xl > .container, + .navbar-expand-xl > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-xl .navbar-collapse { + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-xl .navbar-toggler { + display: none; + } +} + +.navbar-expand { + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -ms-flex-pack: start; + justify-content: flex-start; +} + +.navbar-expand > .container, +.navbar-expand > .container-fluid { + padding-right: 0; + padding-left: 0; +} + +.navbar-expand .navbar-nav { + -ms-flex-direction: row; + flex-direction: row; +} + +.navbar-expand .navbar-nav .dropdown-menu { + position: absolute; +} + +.navbar-expand .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; +} + +.navbar-expand > .container, +.navbar-expand > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; +} + +.navbar-expand .navbar-collapse { + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; +} + +.navbar-expand .navbar-toggler { + display: none; +} + +.navbar-light .navbar-brand { + color: rgba(0, 0, 0, 0.9); +} + +.navbar-light .navbar-brand:hover, .navbar-light .navbar-brand:focus { + color: rgba(0, 0, 0, 0.9); +} + +.navbar-light .navbar-nav .nav-link { + color: rgba(0, 0, 0, 0.5); +} + +.navbar-light .navbar-nav .nav-link:hover, .navbar-light .navbar-nav .nav-link:focus { + color: rgba(0, 0, 0, 0.7); +} + +.navbar-light .navbar-nav .nav-link.disabled { + color: rgba(0, 0, 0, 0.3); +} + +.navbar-light .navbar-nav .show > .nav-link, +.navbar-light .navbar-nav .active > .nav-link, +.navbar-light .navbar-nav .nav-link.show, +.navbar-light .navbar-nav .nav-link.active { + color: rgba(0, 0, 0, 0.9); +} + +.navbar-light .navbar-toggler { + color: rgba(0, 0, 0, 0.5); + border-color: rgba(0, 0, 0, 0.1); +} + +.navbar-light .navbar-toggler-icon { + background-image: url("data:image/svg+xml,%3csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3e%3cpath stroke='rgba(0, 0, 0, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e"); +} + +.navbar-light .navbar-text { + color: rgba(0, 0, 0, 0.5); +} + +.navbar-light .navbar-text a { + color: rgba(0, 0, 0, 0.9); +} + +.navbar-light .navbar-text a:hover, .navbar-light .navbar-text a:focus { + color: rgba(0, 0, 0, 0.9); +} + +.navbar-dark .navbar-brand { + color: #fff; +} + +.navbar-dark .navbar-brand:hover, .navbar-dark .navbar-brand:focus { + color: #fff; +} + +.navbar-dark .navbar-nav .nav-link { + color: rgba(255, 255, 255, 0.5); +} + +.navbar-dark .navbar-nav .nav-link:hover, .navbar-dark .navbar-nav .nav-link:focus { + color: rgba(255, 255, 255, 0.75); +} + +.navbar-dark .navbar-nav .nav-link.disabled { + color: rgba(255, 255, 255, 0.25); +} + +.navbar-dark .navbar-nav .show > .nav-link, +.navbar-dark .navbar-nav .active > .nav-link, +.navbar-dark .navbar-nav .nav-link.show, +.navbar-dark .navbar-nav .nav-link.active { + color: #fff; +} + +.navbar-dark .navbar-toggler { + color: rgba(255, 255, 255, 0.5); + border-color: rgba(255, 255, 255, 0.1); +} + +.navbar-dark .navbar-toggler-icon { + background-image: url("data:image/svg+xml,%3csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3e%3cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e"); +} + +.navbar-dark .navbar-text { + color: rgba(255, 255, 255, 0.5); +} + +.navbar-dark .navbar-text a { + color: #fff; +} + +.navbar-dark .navbar-text a:hover, .navbar-dark .navbar-text a:focus { + color: #fff; +} + +.card { + position: relative; + display: -ms-flexbox; + display: flex; + -ms-flex-direction: column; + flex-direction: column; + min-width: 0; + word-wrap: break-word; + background-color: #fff; + background-clip: border-box; + border: 1px solid rgba(0, 0, 0, 0.125); + border-radius: 0.25rem; +} + +.card > hr { + margin-right: 0; + margin-left: 0; +} + +.card > .list-group:first-child .list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} + +.card > .list-group:last-child .list-group-item:last-child { + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} + +.card-body { + -ms-flex: 1 1 auto; + flex: 1 1 auto; + padding: 1.25rem; +} + +.card-title { + margin-bottom: 0.75rem; +} + +.card-subtitle { + margin-top: -0.375rem; + margin-bottom: 0; +} + +.card-text:last-child { + margin-bottom: 0; +} + +.card-link:hover { + text-decoration: none; +} + +.card-link + .card-link { + margin-left: 1.25rem; +} + +.card-header { + padding: 0.75rem 1.25rem; + margin-bottom: 0; + background-color: rgba(0, 0, 0, 0.03); + border-bottom: 1px solid rgba(0, 0, 0, 0.125); +} + +.card-header:first-child { + border-radius: calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0; +} + +.card-header + .list-group .list-group-item:first-child { + border-top: 0; +} + +.card-footer { + padding: 0.75rem 1.25rem; + background-color: rgba(0, 0, 0, 0.03); + border-top: 1px solid rgba(0, 0, 0, 0.125); +} + +.card-footer:last-child { + border-radius: 0 0 calc(0.25rem - 1px) calc(0.25rem - 1px); +} + +.card-header-tabs { + margin-right: -0.625rem; + margin-bottom: -0.75rem; + margin-left: -0.625rem; + border-bottom: 0; +} + +.card-header-pills { + margin-right: -0.625rem; + margin-left: -0.625rem; +} + +.card-img-overlay { + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + padding: 1.25rem; +} + +.card-img { + width: 100%; + border-radius: calc(0.25rem - 1px); +} + +.card-img-top { + width: 100%; + border-top-left-radius: calc(0.25rem - 1px); + border-top-right-radius: calc(0.25rem - 1px); +} + +.card-img-bottom { + width: 100%; + border-bottom-right-radius: calc(0.25rem - 1px); + border-bottom-left-radius: calc(0.25rem - 1px); +} + +.card-deck { + display: -ms-flexbox; + display: flex; + -ms-flex-direction: column; + flex-direction: column; +} + +.card-deck .card { + margin-bottom: 15px; +} + +@media (min-width: 576px) { + .card-deck { + -ms-flex-flow: row wrap; + flex-flow: row wrap; + margin-right: -15px; + margin-left: -15px; + } + .card-deck .card { + display: -ms-flexbox; + display: flex; + -ms-flex: 1 0 0%; + flex: 1 0 0%; + -ms-flex-direction: column; + flex-direction: column; + margin-right: 15px; + margin-bottom: 0; + margin-left: 15px; + } +} + +.card-group { + display: -ms-flexbox; + display: flex; + -ms-flex-direction: column; + flex-direction: column; +} + +.card-group > .card { + margin-bottom: 15px; +} + +@media (min-width: 576px) { + .card-group { + -ms-flex-flow: row wrap; + flex-flow: row wrap; + } + .card-group > .card { + -ms-flex: 1 0 0%; + flex: 1 0 0%; + margin-bottom: 0; + } + .card-group > .card + .card { + margin-left: 0; + border-left: 0; + } + .card-group > .card:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; + } + .card-group > .card:not(:last-child) .card-img-top, + .card-group > .card:not(:last-child) .card-header { + border-top-right-radius: 0; + } + .card-group > .card:not(:last-child) .card-img-bottom, + .card-group > .card:not(:last-child) .card-footer { + border-bottom-right-radius: 0; + } + .card-group > .card:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; + } + .card-group > .card:not(:first-child) .card-img-top, + .card-group > .card:not(:first-child) .card-header { + border-top-left-radius: 0; + } + .card-group > .card:not(:first-child) .card-img-bottom, + .card-group > .card:not(:first-child) .card-footer { + border-bottom-left-radius: 0; + } +} + +.card-columns .card { + margin-bottom: 0.75rem; +} + +@media (min-width: 576px) { + .card-columns { + -webkit-column-count: 3; + -moz-column-count: 3; + column-count: 3; + -webkit-column-gap: 1.25rem; + -moz-column-gap: 1.25rem; + column-gap: 1.25rem; + orphans: 1; + widows: 1; + } + .card-columns .card { + display: inline-block; + width: 100%; + } +} + +.accordion > .card { + overflow: hidden; +} + +.accordion > .card:not(:first-of-type) .card-header:first-child { + border-radius: 0; +} + +.accordion > .card:not(:first-of-type):not(:last-of-type) { + border-bottom: 0; + border-radius: 0; +} + +.accordion > .card:first-of-type { + border-bottom: 0; + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} + +.accordion > .card:last-of-type { + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.accordion > .card .card-header { + margin-bottom: -1px; +} + +.breadcrumb { + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + padding: 0.75rem 1rem; + margin-bottom: 1rem; + list-style: none; + background-color: #e9ecef; + border-radius: 0.25rem; +} + +.breadcrumb-item + .breadcrumb-item { + padding-left: 0.5rem; +} + +.breadcrumb-item + .breadcrumb-item::before { + display: inline-block; + padding-right: 0.5rem; + color: #6c757d; + content: "/"; +} + +.breadcrumb-item + .breadcrumb-item:hover::before { + text-decoration: underline; +} + +.breadcrumb-item + .breadcrumb-item:hover::before { + text-decoration: none; +} + +.breadcrumb-item.active { + color: #6c757d; +} + +.pagination { + display: -ms-flexbox; + display: flex; + padding-left: 0; + list-style: none; + border-radius: 0.25rem; +} + +.page-link { + position: relative; + display: block; + padding: 0.5rem 0.75rem; + margin-left: -1px; + line-height: 1.25; + color: #007bff; + background-color: #fff; + border: 1px solid #dee2e6; +} + +.page-link:hover { + z-index: 2; + color: #0056b3; + text-decoration: none; + background-color: #e9ecef; + border-color: #dee2e6; +} + +.page-link:focus { + z-index: 2; + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} + +.page-item:first-child .page-link { + margin-left: 0; + border-top-left-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} + +.page-item:last-child .page-link { + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; +} + +.page-item.active .page-link { + z-index: 1; + color: #fff; + background-color: #007bff; + border-color: #007bff; +} + +.page-item.disabled .page-link { + color: #6c757d; + pointer-events: none; + cursor: auto; + background-color: #fff; + border-color: #dee2e6; +} + +.pagination-lg .page-link { + padding: 0.75rem 1.5rem; + font-size: 1.25rem; + line-height: 1.5; +} + +.pagination-lg .page-item:first-child .page-link { + border-top-left-radius: 0.3rem; + border-bottom-left-radius: 0.3rem; +} + +.pagination-lg .page-item:last-child .page-link { + border-top-right-radius: 0.3rem; + border-bottom-right-radius: 0.3rem; +} + +.pagination-sm .page-link { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; +} + +.pagination-sm .page-item:first-child .page-link { + border-top-left-radius: 0.2rem; + border-bottom-left-radius: 0.2rem; +} + +.pagination-sm .page-item:last-child .page-link { + border-top-right-radius: 0.2rem; + border-bottom-right-radius: 0.2rem; +} + +.badge { + display: inline-block; + padding: 0.25em 0.4em; + font-size: 75%; + font-weight: 700; + line-height: 1; + text-align: center; + white-space: nowrap; + vertical-align: baseline; + border-radius: 0.25rem; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; +} + +@media (prefers-reduced-motion: reduce) { + .badge { + transition: none; + } +} + +a.badge:hover, a.badge:focus { + text-decoration: none; +} + +.badge:empty { + display: none; +} + +.btn .badge { + position: relative; + top: -1px; +} + +.badge-pill { + padding-right: 0.6em; + padding-left: 0.6em; + border-radius: 10rem; +} + +.badge-primary { + color: #fff; + background-color: #007bff; +} + +a.badge-primary:hover, a.badge-primary:focus { + color: #fff; + background-color: #0062cc; +} + +a.badge-primary:focus, a.badge-primary.focus { + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} + +.badge-secondary { + color: #fff; + background-color: #6c757d; +} + +a.badge-secondary:hover, a.badge-secondary:focus { + color: #fff; + background-color: #545b62; +} + +a.badge-secondary:focus, a.badge-secondary.focus { + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} + +.badge-success { + color: #fff; + background-color: #28a745; +} + +a.badge-success:hover, a.badge-success:focus { + color: #fff; + background-color: #1e7e34; +} + +a.badge-success:focus, a.badge-success.focus { + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} + +.badge-info { + color: #fff; + background-color: #17a2b8; +} + +a.badge-info:hover, a.badge-info:focus { + color: #fff; + background-color: #117a8b; +} + +a.badge-info:focus, a.badge-info.focus { + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} + +.badge-warning { + color: #212529; + background-color: #ffc107; +} + +a.badge-warning:hover, a.badge-warning:focus { + color: #212529; + background-color: #d39e00; +} + +a.badge-warning:focus, a.badge-warning.focus { + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} + +.badge-danger { + color: #fff; + background-color: #dc3545; +} + +a.badge-danger:hover, a.badge-danger:focus { + color: #fff; + background-color: #bd2130; +} + +a.badge-danger:focus, a.badge-danger.focus { + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} + +.badge-light { + color: #212529; + background-color: #f8f9fa; +} + +a.badge-light:hover, a.badge-light:focus { + color: #212529; + background-color: #dae0e5; +} + +a.badge-light:focus, a.badge-light.focus { + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} + +.badge-dark { + color: #fff; + background-color: #343a40; +} + +a.badge-dark:hover, a.badge-dark:focus { + color: #fff; + background-color: #1d2124; +} + +a.badge-dark:focus, a.badge-dark.focus { + outline: 0; + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} + +.jumbotron { + padding: 2rem 1rem; + margin-bottom: 2rem; + background-color: #e9ecef; + border-radius: 0.3rem; +} + +@media (min-width: 576px) { + .jumbotron { + padding: 4rem 2rem; + } +} + +.jumbotron-fluid { + padding-right: 0; + padding-left: 0; + border-radius: 0; +} + +.alert { + position: relative; + padding: 0.75rem 1.25rem; + margin-bottom: 1rem; + border: 1px solid transparent; + border-radius: 0.25rem; +} + +.alert-heading { + color: inherit; +} + +.alert-link { + font-weight: 700; +} + +.alert-dismissible { + padding-right: 4rem; +} + +.alert-dismissible .close { + position: absolute; + top: 0; + right: 0; + padding: 0.75rem 1.25rem; + color: inherit; +} + +.alert-primary { + color: #004085; + background-color: #cce5ff; + border-color: #b8daff; +} + +.alert-primary hr { + border-top-color: #9fcdff; +} + +.alert-primary .alert-link { + color: #002752; +} + +.alert-secondary { + color: #383d41; + background-color: #e2e3e5; + border-color: #d6d8db; +} + +.alert-secondary hr { + border-top-color: #c8cbcf; +} + +.alert-secondary .alert-link { + color: #202326; +} + +.alert-success { + color: #155724; + background-color: #d4edda; + border-color: #c3e6cb; +} + +.alert-success hr { + border-top-color: #b1dfbb; +} + +.alert-success .alert-link { + color: #0b2e13; +} + +.alert-info { + color: #0c5460; + background-color: #d1ecf1; + border-color: #bee5eb; +} + +.alert-info hr { + border-top-color: #abdde5; +} + +.alert-info .alert-link { + color: #062c33; +} + +.alert-warning { + color: #856404; + background-color: #fff3cd; + border-color: #ffeeba; +} + +.alert-warning hr { + border-top-color: #ffe8a1; +} + +.alert-warning .alert-link { + color: #533f03; +} + +.alert-danger { + color: #721c24; + background-color: #f8d7da; + border-color: #f5c6cb; +} + +.alert-danger hr { + border-top-color: #f1b0b7; +} + +.alert-danger .alert-link { + color: #491217; +} + +.alert-light { + color: #818182; + background-color: #fefefe; + border-color: #fdfdfe; +} + +.alert-light hr { + border-top-color: #ececf6; +} + +.alert-light .alert-link { + color: #686868; +} + +.alert-dark { + color: #1b1e21; + background-color: #d6d8d9; + border-color: #c6c8ca; +} + +.alert-dark hr { + border-top-color: #b9bbbe; +} + +.alert-dark .alert-link { + color: #040505; +} + +@-webkit-keyframes progress-bar-stripes { + from { + background-position: 1rem 0; + } + to { + background-position: 0 0; + } +} + +@keyframes progress-bar-stripes { + from { + background-position: 1rem 0; + } + to { + background-position: 0 0; + } +} + +.progress { + display: -ms-flexbox; + display: flex; + height: 1rem; + overflow: hidden; + font-size: 0.75rem; + background-color: #e9ecef; + border-radius: 0.25rem; +} + +.progress-bar { + display: -ms-flexbox; + display: flex; + -ms-flex-direction: column; + flex-direction: column; + -ms-flex-pack: center; + justify-content: center; + color: #fff; + text-align: center; + white-space: nowrap; + background-color: #007bff; + transition: width 0.6s ease; +} + +@media (prefers-reduced-motion: reduce) { + .progress-bar { + transition: none; + } +} + +.progress-bar-striped { + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; +} + +.progress-bar-animated { + -webkit-animation: progress-bar-stripes 1s linear infinite; + animation: progress-bar-stripes 1s linear infinite; +} + +@media (prefers-reduced-motion: reduce) { + .progress-bar-animated { + -webkit-animation: none; + animation: none; + } +} + +.media { + display: -ms-flexbox; + display: flex; + -ms-flex-align: start; + align-items: flex-start; +} + +.media-body { + -ms-flex: 1; + flex: 1; +} + +.list-group { + display: -ms-flexbox; + display: flex; + -ms-flex-direction: column; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; +} + +.list-group-item-action { + width: 100%; + color: #495057; + text-align: inherit; +} + +.list-group-item-action:hover, .list-group-item-action:focus { + z-index: 1; + color: #495057; + text-decoration: none; + background-color: #f8f9fa; +} + +.list-group-item-action:active { + color: #212529; + background-color: #e9ecef; +} + +.list-group-item { + position: relative; + display: block; + padding: 0.75rem 1.25rem; + margin-bottom: -1px; + background-color: #fff; + border: 1px solid rgba(0, 0, 0, 0.125); +} + +.list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} + +.list-group-item:last-child { + margin-bottom: 0; + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} + +.list-group-item.disabled, .list-group-item:disabled { + color: #6c757d; + pointer-events: none; + background-color: #fff; +} + +.list-group-item.active { + z-index: 2; + color: #fff; + background-color: #007bff; + border-color: #007bff; +} + +.list-group-horizontal { + -ms-flex-direction: row; + flex-direction: row; +} + +.list-group-horizontal .list-group-item { + margin-right: -1px; + margin-bottom: 0; +} + +.list-group-horizontal .list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; + border-top-right-radius: 0; +} + +.list-group-horizontal .list-group-item:last-child { + margin-right: 0; + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0; +} + +@media (min-width: 576px) { + .list-group-horizontal-sm { + -ms-flex-direction: row; + flex-direction: row; + } + .list-group-horizontal-sm .list-group-item { + margin-right: -1px; + margin-bottom: 0; + } + .list-group-horizontal-sm .list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; + border-top-right-radius: 0; + } + .list-group-horizontal-sm .list-group-item:last-child { + margin-right: 0; + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0; + } +} + +@media (min-width: 768px) { + .list-group-horizontal-md { + -ms-flex-direction: row; + flex-direction: row; + } + .list-group-horizontal-md .list-group-item { + margin-right: -1px; + margin-bottom: 0; + } + .list-group-horizontal-md .list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; + border-top-right-radius: 0; + } + .list-group-horizontal-md .list-group-item:last-child { + margin-right: 0; + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0; + } +} + +@media (min-width: 992px) { + .list-group-horizontal-lg { + -ms-flex-direction: row; + flex-direction: row; + } + .list-group-horizontal-lg .list-group-item { + margin-right: -1px; + margin-bottom: 0; + } + .list-group-horizontal-lg .list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; + border-top-right-radius: 0; + } + .list-group-horizontal-lg .list-group-item:last-child { + margin-right: 0; + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0; + } +} + +@media (min-width: 1200px) { + .list-group-horizontal-xl { + -ms-flex-direction: row; + flex-direction: row; + } + .list-group-horizontal-xl .list-group-item { + margin-right: -1px; + margin-bottom: 0; + } + .list-group-horizontal-xl .list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; + border-top-right-radius: 0; + } + .list-group-horizontal-xl .list-group-item:last-child { + margin-right: 0; + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0; + } +} + +.list-group-flush .list-group-item { + border-right: 0; + border-left: 0; + border-radius: 0; +} + +.list-group-flush .list-group-item:last-child { + margin-bottom: -1px; +} + +.list-group-flush:first-child .list-group-item:first-child { + border-top: 0; +} + +.list-group-flush:last-child .list-group-item:last-child { + margin-bottom: 0; + border-bottom: 0; +} + +.list-group-item-primary { + color: #004085; + background-color: #b8daff; +} + +.list-group-item-primary.list-group-item-action:hover, .list-group-item-primary.list-group-item-action:focus { + color: #004085; + background-color: #9fcdff; +} + +.list-group-item-primary.list-group-item-action.active { + color: #fff; + background-color: #004085; + border-color: #004085; +} + +.list-group-item-secondary { + color: #383d41; + background-color: #d6d8db; +} + +.list-group-item-secondary.list-group-item-action:hover, .list-group-item-secondary.list-group-item-action:focus { + color: #383d41; + background-color: #c8cbcf; +} + +.list-group-item-secondary.list-group-item-action.active { + color: #fff; + background-color: #383d41; + border-color: #383d41; +} + +.list-group-item-success { + color: #155724; + background-color: #c3e6cb; +} + +.list-group-item-success.list-group-item-action:hover, .list-group-item-success.list-group-item-action:focus { + color: #155724; + background-color: #b1dfbb; +} + +.list-group-item-success.list-group-item-action.active { + color: #fff; + background-color: #155724; + border-color: #155724; +} + +.list-group-item-info { + color: #0c5460; + background-color: #bee5eb; +} + +.list-group-item-info.list-group-item-action:hover, .list-group-item-info.list-group-item-action:focus { + color: #0c5460; + background-color: #abdde5; +} + +.list-group-item-info.list-group-item-action.active { + color: #fff; + background-color: #0c5460; + border-color: #0c5460; +} + +.list-group-item-warning { + color: #856404; + background-color: #ffeeba; +} + +.list-group-item-warning.list-group-item-action:hover, .list-group-item-warning.list-group-item-action:focus { + color: #856404; + background-color: #ffe8a1; +} + +.list-group-item-warning.list-group-item-action.active { + color: #fff; + background-color: #856404; + border-color: #856404; +} + +.list-group-item-danger { + color: #721c24; + background-color: #f5c6cb; +} + +.list-group-item-danger.list-group-item-action:hover, .list-group-item-danger.list-group-item-action:focus { + color: #721c24; + background-color: #f1b0b7; +} + +.list-group-item-danger.list-group-item-action.active { + color: #fff; + background-color: #721c24; + border-color: #721c24; +} + +.list-group-item-light { + color: #818182; + background-color: #fdfdfe; +} + +.list-group-item-light.list-group-item-action:hover, .list-group-item-light.list-group-item-action:focus { + color: #818182; + background-color: #ececf6; +} + +.list-group-item-light.list-group-item-action.active { + color: #fff; + background-color: #818182; + border-color: #818182; +} + +.list-group-item-dark { + color: #1b1e21; + background-color: #c6c8ca; +} + +.list-group-item-dark.list-group-item-action:hover, .list-group-item-dark.list-group-item-action:focus { + color: #1b1e21; + background-color: #b9bbbe; +} + +.list-group-item-dark.list-group-item-action.active { + color: #fff; + background-color: #1b1e21; + border-color: #1b1e21; +} + +.close { + float: right; + font-size: 1.5rem; + font-weight: 700; + line-height: 1; + color: #000; + text-shadow: 0 1px 0 #fff; + opacity: .5; +} + +.close:hover { + color: #000; + text-decoration: none; +} + +.close:not(:disabled):not(.disabled):hover, .close:not(:disabled):not(.disabled):focus { + opacity: .75; +} + +button.close { + padding: 0; + background-color: transparent; + border: 0; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; +} + +a.close.disabled { + pointer-events: none; +} + +.toast { + max-width: 350px; + overflow: hidden; + font-size: 0.875rem; + background-color: rgba(255, 255, 255, 0.85); + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.1); + box-shadow: 0 0.25rem 0.75rem rgba(0, 0, 0, 0.1); + -webkit-backdrop-filter: blur(10px); + backdrop-filter: blur(10px); + opacity: 0; + border-radius: 0.25rem; +} + +.toast:not(:last-child) { + margin-bottom: 0.75rem; +} + +.toast.showing { + opacity: 1; +} + +.toast.show { + display: block; + opacity: 1; +} + +.toast.hide { + display: none; +} + +.toast-header { + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + padding: 0.25rem 0.75rem; + color: #6c757d; + background-color: rgba(255, 255, 255, 0.85); + background-clip: padding-box; + border-bottom: 1px solid rgba(0, 0, 0, 0.05); +} + +.toast-body { + padding: 0.75rem; +} + +.modal-open { + overflow: hidden; +} + +.modal-open .modal { + overflow-x: hidden; + overflow-y: auto; +} + +.modal { + position: fixed; + top: 0; + left: 0; + z-index: 1050; + display: none; + width: 100%; + height: 100%; + overflow: hidden; + outline: 0; +} + +.modal-dialog { + position: relative; + width: auto; + margin: 0.5rem; + pointer-events: none; +} + +.modal.fade .modal-dialog { + transition: -webkit-transform 0.3s ease-out; + transition: transform 0.3s ease-out; + transition: transform 0.3s ease-out, -webkit-transform 0.3s ease-out; + -webkit-transform: translate(0, -50px); + transform: translate(0, -50px); +} + +@media (prefers-reduced-motion: reduce) { + .modal.fade .modal-dialog { + transition: none; + } +} + +.modal.show .modal-dialog { + -webkit-transform: none; + transform: none; +} + +.modal-dialog-scrollable { + display: -ms-flexbox; + display: flex; + max-height: calc(100% - 1rem); +} + +.modal-dialog-scrollable .modal-content { + max-height: calc(100vh - 1rem); + overflow: hidden; +} + +.modal-dialog-scrollable .modal-header, +.modal-dialog-scrollable .modal-footer { + -ms-flex-negative: 0; + flex-shrink: 0; +} + +.modal-dialog-scrollable .modal-body { + overflow-y: auto; +} + +.modal-dialog-centered { + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + min-height: calc(100% - 1rem); +} + +.modal-dialog-centered::before { + display: block; + height: calc(100vh - 1rem); + content: ""; +} + +.modal-dialog-centered.modal-dialog-scrollable { + -ms-flex-direction: column; + flex-direction: column; + -ms-flex-pack: center; + justify-content: center; + height: 100%; +} + +.modal-dialog-centered.modal-dialog-scrollable .modal-content { + max-height: none; +} + +.modal-dialog-centered.modal-dialog-scrollable::before { + content: none; +} + +.modal-content { + position: relative; + display: -ms-flexbox; + display: flex; + -ms-flex-direction: column; + flex-direction: column; + width: 100%; + pointer-events: auto; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 0.3rem; + outline: 0; +} + +.modal-backdrop { + position: fixed; + top: 0; + left: 0; + z-index: 1040; + width: 100vw; + height: 100vh; + background-color: #000; +} + +.modal-backdrop.fade { + opacity: 0; +} + +.modal-backdrop.show { + opacity: 0.5; +} + +.modal-header { + display: -ms-flexbox; + display: flex; + -ms-flex-align: start; + align-items: flex-start; + -ms-flex-pack: justify; + justify-content: space-between; + padding: 1rem 1rem; + border-bottom: 1px solid #dee2e6; + border-top-left-radius: 0.3rem; + border-top-right-radius: 0.3rem; +} + +.modal-header .close { + padding: 1rem 1rem; + margin: -1rem -1rem -1rem auto; +} + +.modal-title { + margin-bottom: 0; + line-height: 1.5; +} + +.modal-body { + position: relative; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + padding: 1rem; +} + +.modal-footer { + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: end; + justify-content: flex-end; + padding: 1rem; + border-top: 1px solid #dee2e6; + border-bottom-right-radius: 0.3rem; + border-bottom-left-radius: 0.3rem; +} + +.modal-footer > :not(:first-child) { + margin-left: .25rem; +} + +.modal-footer > :not(:last-child) { + margin-right: .25rem; +} + +.modal-scrollbar-measure { + position: absolute; + top: -9999px; + width: 50px; + height: 50px; + overflow: scroll; +} + +@media (min-width: 576px) { + .modal-dialog { + max-width: 500px; + margin: 1.75rem auto; + } + .modal-dialog-scrollable { + max-height: calc(100% - 3.5rem); + } + .modal-dialog-scrollable .modal-content { + max-height: calc(100vh - 3.5rem); + } + .modal-dialog-centered { + min-height: calc(100% - 3.5rem); + } + .modal-dialog-centered::before { + height: calc(100vh - 3.5rem); + } + .modal-sm { + max-width: 300px; + } +} + +@media (min-width: 992px) { + .modal-lg, + .modal-xl { + max-width: 800px; + } +} + +@media (min-width: 1200px) { + .modal-xl { + max-width: 1140px; + } +} + +.tooltip { + position: absolute; + z-index: 1070; + display: block; + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + white-space: normal; + line-break: auto; + font-size: 0.875rem; + word-wrap: break-word; + opacity: 0; +} + +.tooltip.show { + opacity: 0.9; +} + +.tooltip .arrow { + position: absolute; + display: block; + width: 0.8rem; + height: 0.4rem; +} + +.tooltip .arrow::before { + position: absolute; + content: ""; + border-color: transparent; + border-style: solid; +} + +.bs-tooltip-top, .bs-tooltip-auto[x-placement^="top"] { + padding: 0.4rem 0; +} + +.bs-tooltip-top .arrow, .bs-tooltip-auto[x-placement^="top"] .arrow { + bottom: 0; +} + +.bs-tooltip-top .arrow::before, .bs-tooltip-auto[x-placement^="top"] .arrow::before { + top: 0; + border-width: 0.4rem 0.4rem 0; + border-top-color: #000; +} + +.bs-tooltip-right, .bs-tooltip-auto[x-placement^="right"] { + padding: 0 0.4rem; +} + +.bs-tooltip-right .arrow, .bs-tooltip-auto[x-placement^="right"] .arrow { + left: 0; + width: 0.4rem; + height: 0.8rem; +} + +.bs-tooltip-right .arrow::before, .bs-tooltip-auto[x-placement^="right"] .arrow::before { + right: 0; + border-width: 0.4rem 0.4rem 0.4rem 0; + border-right-color: #000; +} + +.bs-tooltip-bottom, .bs-tooltip-auto[x-placement^="bottom"] { + padding: 0.4rem 0; +} + +.bs-tooltip-bottom .arrow, .bs-tooltip-auto[x-placement^="bottom"] .arrow { + top: 0; +} + +.bs-tooltip-bottom .arrow::before, .bs-tooltip-auto[x-placement^="bottom"] .arrow::before { + bottom: 0; + border-width: 0 0.4rem 0.4rem; + border-bottom-color: #000; +} + +.bs-tooltip-left, .bs-tooltip-auto[x-placement^="left"] { + padding: 0 0.4rem; +} + +.bs-tooltip-left .arrow, .bs-tooltip-auto[x-placement^="left"] .arrow { + right: 0; + width: 0.4rem; + height: 0.8rem; +} + +.bs-tooltip-left .arrow::before, .bs-tooltip-auto[x-placement^="left"] .arrow::before { + left: 0; + border-width: 0.4rem 0 0.4rem 0.4rem; + border-left-color: #000; +} + +.tooltip-inner { + max-width: 200px; + padding: 0.25rem 0.5rem; + color: #fff; + text-align: center; + background-color: #000; + border-radius: 0.25rem; +} + +.popover { + position: absolute; + top: 0; + left: 0; + z-index: 1060; + display: block; + max-width: 276px; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + white-space: normal; + line-break: auto; + font-size: 0.875rem; + word-wrap: break-word; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 0.3rem; +} + +.popover .arrow { + position: absolute; + display: block; + width: 1rem; + height: 0.5rem; + margin: 0 0.3rem; +} + +.popover .arrow::before, .popover .arrow::after { + position: absolute; + display: block; + content: ""; + border-color: transparent; + border-style: solid; +} + +.bs-popover-top, .bs-popover-auto[x-placement^="top"] { + margin-bottom: 0.5rem; +} + +.bs-popover-top > .arrow, .bs-popover-auto[x-placement^="top"] > .arrow { + bottom: calc((0.5rem + 1px) * -1); +} + +.bs-popover-top > .arrow::before, .bs-popover-auto[x-placement^="top"] > .arrow::before { + bottom: 0; + border-width: 0.5rem 0.5rem 0; + border-top-color: rgba(0, 0, 0, 0.25); +} + +.bs-popover-top > .arrow::after, .bs-popover-auto[x-placement^="top"] > .arrow::after { + bottom: 1px; + border-width: 0.5rem 0.5rem 0; + border-top-color: #fff; +} + +.bs-popover-right, .bs-popover-auto[x-placement^="right"] { + margin-left: 0.5rem; +} + +.bs-popover-right > .arrow, .bs-popover-auto[x-placement^="right"] > .arrow { + left: calc((0.5rem + 1px) * -1); + width: 0.5rem; + height: 1rem; + margin: 0.3rem 0; +} + +.bs-popover-right > .arrow::before, .bs-popover-auto[x-placement^="right"] > .arrow::before { + left: 0; + border-width: 0.5rem 0.5rem 0.5rem 0; + border-right-color: rgba(0, 0, 0, 0.25); +} + +.bs-popover-right > .arrow::after, .bs-popover-auto[x-placement^="right"] > .arrow::after { + left: 1px; + border-width: 0.5rem 0.5rem 0.5rem 0; + border-right-color: #fff; +} + +.bs-popover-bottom, .bs-popover-auto[x-placement^="bottom"] { + margin-top: 0.5rem; +} + +.bs-popover-bottom > .arrow, .bs-popover-auto[x-placement^="bottom"] > .arrow { + top: calc((0.5rem + 1px) * -1); +} + +.bs-popover-bottom > .arrow::before, .bs-popover-auto[x-placement^="bottom"] > .arrow::before { + top: 0; + border-width: 0 0.5rem 0.5rem 0.5rem; + border-bottom-color: rgba(0, 0, 0, 0.25); +} + +.bs-popover-bottom > .arrow::after, .bs-popover-auto[x-placement^="bottom"] > .arrow::after { + top: 1px; + border-width: 0 0.5rem 0.5rem 0.5rem; + border-bottom-color: #fff; +} + +.bs-popover-bottom .popover-header::before, .bs-popover-auto[x-placement^="bottom"] .popover-header::before { + position: absolute; + top: 0; + left: 50%; + display: block; + width: 1rem; + margin-left: -0.5rem; + content: ""; + border-bottom: 1px solid #f7f7f7; +} + +.bs-popover-left, .bs-popover-auto[x-placement^="left"] { + margin-right: 0.5rem; +} + +.bs-popover-left > .arrow, .bs-popover-auto[x-placement^="left"] > .arrow { + right: calc((0.5rem + 1px) * -1); + width: 0.5rem; + height: 1rem; + margin: 0.3rem 0; +} + +.bs-popover-left > .arrow::before, .bs-popover-auto[x-placement^="left"] > .arrow::before { + right: 0; + border-width: 0.5rem 0 0.5rem 0.5rem; + border-left-color: rgba(0, 0, 0, 0.25); +} + +.bs-popover-left > .arrow::after, .bs-popover-auto[x-placement^="left"] > .arrow::after { + right: 1px; + border-width: 0.5rem 0 0.5rem 0.5rem; + border-left-color: #fff; +} + +.popover-header { + padding: 0.5rem 0.75rem; + margin-bottom: 0; + font-size: 1rem; + background-color: #f7f7f7; + border-bottom: 1px solid #ebebeb; + border-top-left-radius: calc(0.3rem - 1px); + border-top-right-radius: calc(0.3rem - 1px); +} + +.popover-header:empty { + display: none; +} + +.popover-body { + padding: 0.5rem 0.75rem; + color: #212529; +} + +.carousel { + position: relative; +} + +.carousel.pointer-event { + -ms-touch-action: pan-y; + touch-action: pan-y; +} + +.carousel-inner { + position: relative; + width: 100%; + overflow: hidden; +} + +.carousel-inner::after { + display: block; + clear: both; + content: ""; +} + +.carousel-item { + position: relative; + display: none; + float: left; + width: 100%; + margin-right: -100%; + -webkit-backface-visibility: hidden; + backface-visibility: hidden; + transition: -webkit-transform 0.6s ease-in-out; + transition: transform 0.6s ease-in-out; + transition: transform 0.6s ease-in-out, -webkit-transform 0.6s ease-in-out; +} + +@media (prefers-reduced-motion: reduce) { + .carousel-item { + transition: none; + } +} + +.carousel-item.active, +.carousel-item-next, +.carousel-item-prev { + display: block; +} + +.carousel-item-next:not(.carousel-item-left), +.active.carousel-item-right { + -webkit-transform: translateX(100%); + transform: translateX(100%); +} + +.carousel-item-prev:not(.carousel-item-right), +.active.carousel-item-left { + -webkit-transform: translateX(-100%); + transform: translateX(-100%); +} + +.carousel-fade .carousel-item { + opacity: 0; + transition-property: opacity; + -webkit-transform: none; + transform: none; +} + +.carousel-fade .carousel-item.active, +.carousel-fade .carousel-item-next.carousel-item-left, +.carousel-fade .carousel-item-prev.carousel-item-right { + z-index: 1; + opacity: 1; +} + +.carousel-fade .active.carousel-item-left, +.carousel-fade .active.carousel-item-right { + z-index: 0; + opacity: 0; + transition: 0s 0.6s opacity; +} + +@media (prefers-reduced-motion: reduce) { + .carousel-fade .active.carousel-item-left, + .carousel-fade .active.carousel-item-right { + transition: none; + } +} + +.carousel-control-prev, +.carousel-control-next { + position: absolute; + top: 0; + bottom: 0; + z-index: 1; + display: -ms-flexbox; + display: flex; + -ms-flex-align: center; + align-items: center; + -ms-flex-pack: center; + justify-content: center; + width: 15%; + color: #fff; + text-align: center; + opacity: 0.5; + transition: opacity 0.15s ease; +} + +@media (prefers-reduced-motion: reduce) { + .carousel-control-prev, + .carousel-control-next { + transition: none; + } +} + +.carousel-control-prev:hover, .carousel-control-prev:focus, +.carousel-control-next:hover, +.carousel-control-next:focus { + color: #fff; + text-decoration: none; + outline: 0; + opacity: 0.9; +} + +.carousel-control-prev { + left: 0; +} + +.carousel-control-next { + right: 0; +} + +.carousel-control-prev-icon, +.carousel-control-next-icon { + display: inline-block; + width: 20px; + height: 20px; + background: no-repeat 50% / 100% 100%; +} + +.carousel-control-prev-icon { + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3e%3cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3e%3c/svg%3e"); +} + +.carousel-control-next-icon { + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3e%3cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3e%3c/svg%3e"); +} + +.carousel-indicators { + position: absolute; + right: 0; + bottom: 0; + left: 0; + z-index: 15; + display: -ms-flexbox; + display: flex; + -ms-flex-pack: center; + justify-content: center; + padding-left: 0; + margin-right: 15%; + margin-left: 15%; + list-style: none; +} + +.carousel-indicators li { + box-sizing: content-box; + -ms-flex: 0 1 auto; + flex: 0 1 auto; + width: 30px; + height: 3px; + margin-right: 3px; + margin-left: 3px; + text-indent: -999px; + cursor: pointer; + background-color: #fff; + background-clip: padding-box; + border-top: 10px solid transparent; + border-bottom: 10px solid transparent; + opacity: .5; + transition: opacity 0.6s ease; +} + +@media (prefers-reduced-motion: reduce) { + .carousel-indicators li { + transition: none; + } +} + +.carousel-indicators .active { + opacity: 1; +} + +.carousel-caption { + position: absolute; + right: 15%; + bottom: 20px; + left: 15%; + z-index: 10; + padding-top: 20px; + padding-bottom: 20px; + color: #fff; + text-align: center; +} + +@-webkit-keyframes spinner-border { + to { + -webkit-transform: rotate(360deg); + transform: rotate(360deg); + } +} + +@keyframes spinner-border { + to { + -webkit-transform: rotate(360deg); + transform: rotate(360deg); + } +} + +.spinner-border { + display: inline-block; + width: 2rem; + height: 2rem; + vertical-align: text-bottom; + border: 0.25em solid currentColor; + border-right-color: transparent; + border-radius: 50%; + -webkit-animation: spinner-border .75s linear infinite; + animation: spinner-border .75s linear infinite; +} + +.spinner-border-sm { + width: 1rem; + height: 1rem; + border-width: 0.2em; +} + +@-webkit-keyframes spinner-grow { + 0% { + -webkit-transform: scale(0); + transform: scale(0); + } + 50% { + opacity: 1; + } +} + +@keyframes spinner-grow { + 0% { + -webkit-transform: scale(0); + transform: scale(0); + } + 50% { + opacity: 1; + } +} + +.spinner-grow { + display: inline-block; + width: 2rem; + height: 2rem; + vertical-align: text-bottom; + background-color: currentColor; + border-radius: 50%; + opacity: 0; + -webkit-animation: spinner-grow .75s linear infinite; + animation: spinner-grow .75s linear infinite; +} + +.spinner-grow-sm { + width: 1rem; + height: 1rem; +} + +.align-baseline { + vertical-align: baseline !important; +} + +.align-top { + vertical-align: top !important; +} + +.align-middle { + vertical-align: middle !important; +} + +.align-bottom { + vertical-align: bottom !important; +} + +.align-text-bottom { + vertical-align: text-bottom !important; +} + +.align-text-top { + vertical-align: text-top !important; +} + +a.bg-primary:hover, a.bg-primary:focus, +button.bg-primary:hover, +button.bg-primary:focus { + background-color: #0062cc !important; +} + +.bg-secondary { + background-color: #6c757d !important; +} + +a.bg-secondary:hover, a.bg-secondary:focus, +button.bg-secondary:hover, +button.bg-secondary:focus { + background-color: #545b62 !important; +} + +.bg-success { + background-color: #28a745 !important; +} + +a.bg-success:hover, a.bg-success:focus, +button.bg-success:hover, +button.bg-success:focus { + background-color: #1e7e34 !important; +} + +.bg-info { + background-color: #17a2b8 !important; +} + +a.bg-info:hover, a.bg-info:focus, +button.bg-info:hover, +button.bg-info:focus { + background-color: #117a8b !important; +} + +.bg-warning { + background-color: #fddd7c !important; +} + +a.bg-warning:hover, a.bg-warning:focus, +button.bg-warning:hover, +button.bg-warning:focus { + background-color: #d39e00 !important; +} + +.bg-danger { + background-color: #dc3545 !important; +} + +a.bg-danger:hover, a.bg-danger:focus, +button.bg-danger:hover, +button.bg-danger:focus { + background-color: #bd2130 !important; +} + +.bg-light { + background-color: #f8f9fa !important; +} + +a.bg-light:hover, a.bg-light:focus, +button.bg-light:hover, +button.bg-light:focus { + background-color: #dae0e5 !important; +} + +.bg-dark { + background-color: #343a40 !important; +} + +a.bg-dark:hover, a.bg-dark:focus, +button.bg-dark:hover, +button.bg-dark:focus { + background-color: #1d2124 !important; +} + +.bg-white { + background-color: #fff !important; +} + +.bg-transparent { + background-color: transparent !important; +} + +.border { + border: 1px solid #dee2e6 !important; +} + +.border-top { + border-top: 1px solid #dee2e6 !important; +} + +.border-right { + border-right: 1px solid #dee2e6 !important; +} + +.border-bottom { + border-bottom: 1px solid #dee2e6 !important; +} + +.border-left { + border-left: 1px solid #dee2e6 !important; +} + +.border-0 { + border: 0 !important; +} + +.border-top-0 { + border-top: 0 !important; +} + +.border-right-0 { + border-right: 0 !important; +} + +.border-bottom-0 { + border-bottom: 0 !important; +} + +.border-left-0 { + border-left: 0 !important; +} + +.border-primary { + border-color: #007bff !important; +} + +.border-secondary { + border-color: #6c757d !important; +} + +.border-success { + border-color: #28a745 !important; +} + +.border-info { + border-color: #17a2b8 !important; +} + +.border-warning { + border-color: #ffc107 !important; +} + +.border-danger { + border-color: #dc3545 !important; +} + +.border-light { + border-color: #f8f9fa !important; +} + +.border-dark { + border-color: #343a40 !important; +} + +.border-white { + border-color: #fff !important; +} + +.rounded-sm { + border-radius: 0.2rem !important; +} + +.rounded { + border-radius: 0.25rem !important; +} + +.rounded-top { + border-top-left-radius: 0.25rem !important; + border-top-right-radius: 0.25rem !important; +} + +.rounded-right { + border-top-right-radius: 0.25rem !important; + border-bottom-right-radius: 0.25rem !important; +} + +.rounded-bottom { + border-bottom-right-radius: 0.25rem !important; + border-bottom-left-radius: 0.25rem !important; +} + +.rounded-left { + border-top-left-radius: 0.25rem !important; + border-bottom-left-radius: 0.25rem !important; +} + +.rounded-lg { + border-radius: 0.3rem !important; +} + +.rounded-circle { + border-radius: 50% !important; +} + +.rounded-pill { + border-radius: 50rem !important; +} + +.rounded-0 { + border-radius: 0 !important; +} + +.clearfix::after { + display: block; + clear: both; + content: ""; +} + +.d-none { + display: none !important; +} + +.d-inline { + display: inline !important; +} + +.d-inline-block { + display: inline-block !important; +} + +.d-block { + display: block !important; +} + +.d-table { + display: table !important; +} + +.d-table-row { + display: table-row !important; +} + +.d-table-cell { + display: table-cell !important; +} + +.d-flex { + display: -ms-flexbox !important; + display: flex !important; +} + +.d-inline-flex { + display: -ms-inline-flexbox !important; + display: inline-flex !important; +} + +@media (min-width: 576px) { + .d-sm-none { + display: none !important; + } + .d-sm-inline { + display: inline !important; + } + .d-sm-inline-block { + display: inline-block !important; + } + .d-sm-block { + display: block !important; + } + .d-sm-table { + display: table !important; + } + .d-sm-table-row { + display: table-row !important; + } + .d-sm-table-cell { + display: table-cell !important; + } + .d-sm-flex { + display: -ms-flexbox !important; + display: flex !important; + } + .d-sm-inline-flex { + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} + +@media (min-width: 768px) { + .d-md-none { + display: none !important; + } + .d-md-inline { + display: inline !important; + } + .d-md-inline-block { + display: inline-block !important; + } + .d-md-block { + display: block !important; + } + .d-md-table { + display: table !important; + } + .d-md-table-row { + display: table-row !important; + } + .d-md-table-cell { + display: table-cell !important; + } + .d-md-flex { + display: -ms-flexbox !important; + display: flex !important; + } + .d-md-inline-flex { + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} + +@media (min-width: 992px) { + .d-lg-none { + display: none !important; + } + .d-lg-inline { + display: inline !important; + } + .d-lg-inline-block { + display: inline-block !important; + } + .d-lg-block { + display: block !important; + } + .d-lg-table { + display: table !important; + } + .d-lg-table-row { + display: table-row !important; + } + .d-lg-table-cell { + display: table-cell !important; + } + .d-lg-flex { + display: -ms-flexbox !important; + display: flex !important; + } + .d-lg-inline-flex { + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} + +@media (min-width: 1200px) { + .d-xl-none { + display: none !important; + } + .d-xl-inline { + display: inline !important; + } + .d-xl-inline-block { + display: inline-block !important; + } + .d-xl-block { + display: block !important; + } + .d-xl-table { + display: table !important; + } + .d-xl-table-row { + display: table-row !important; + } + .d-xl-table-cell { + display: table-cell !important; + } + .d-xl-flex { + display: -ms-flexbox !important; + display: flex !important; + } + .d-xl-inline-flex { + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} + +@media print { + .d-print-none { + display: none !important; + } + .d-print-inline { + display: inline !important; + } + .d-print-inline-block { + display: inline-block !important; + } + .d-print-block { + display: block !important; + } + .d-print-table { + display: table !important; + } + .d-print-table-row { + display: table-row !important; + } + .d-print-table-cell { + display: table-cell !important; + } + .d-print-flex { + display: -ms-flexbox !important; + display: flex !important; + } + .d-print-inline-flex { + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} + +.embed-responsive { + position: relative; + display: block; + width: 100%; + padding: 0; + overflow: hidden; +} + +.embed-responsive::before { + display: block; + content: ""; +} + +.embed-responsive .embed-responsive-item, +.embed-responsive iframe, +.embed-responsive embed, +.embed-responsive object, +.embed-responsive video { + position: absolute; + top: 0; + bottom: 0; + left: 0; + width: 100%; + height: 100%; + border: 0; +} + +.embed-responsive-21by9::before { + padding-top: 42.857143%; +} + +.embed-responsive-16by9::before { + padding-top: 56.25%; +} + +.embed-responsive-4by3::before { + padding-top: 75%; +} + +.embed-responsive-1by1::before { + padding-top: 100%; +} + +.flex-row { + -ms-flex-direction: row !important; + flex-direction: row !important; +} + +.flex-column { + -ms-flex-direction: column !important; + flex-direction: column !important; +} + +.flex-row-reverse { + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; +} + +.flex-column-reverse { + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; +} + +.flex-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; +} + +.flex-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; +} + +.flex-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; +} + +.flex-fill { + -ms-flex: 1 1 auto !important; + flex: 1 1 auto !important; +} + +.flex-grow-0 { + -ms-flex-positive: 0 !important; + flex-grow: 0 !important; +} + +.flex-grow-1 { + -ms-flex-positive: 1 !important; + flex-grow: 1 !important; +} + +.flex-shrink-0 { + -ms-flex-negative: 0 !important; + flex-shrink: 0 !important; +} + +.flex-shrink-1 { + -ms-flex-negative: 1 !important; + flex-shrink: 1 !important; +} + +.justify-content-start { + -ms-flex-pack: start !important; + justify-content: flex-start !important; +} + +.justify-content-end { + -ms-flex-pack: end !important; + justify-content: flex-end !important; +} + +.justify-content-center { + -ms-flex-pack: center !important; + justify-content: center !important; +} + +.justify-content-between { + -ms-flex-pack: justify !important; + justify-content: space-between !important; +} + +.justify-content-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; +} + +.align-items-start { + -ms-flex-align: start !important; + align-items: flex-start !important; +} + +.align-items-end { + -ms-flex-align: end !important; + align-items: flex-end !important; +} + +.align-items-center { + -ms-flex-align: center !important; + align-items: center !important; +} + +.align-items-baseline { + -ms-flex-align: baseline !important; + align-items: baseline !important; +} + +.align-items-stretch { + -ms-flex-align: stretch !important; + align-items: stretch !important; +} + +.align-content-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; +} + +.align-content-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; +} + +.align-content-center { + -ms-flex-line-pack: center !important; + align-content: center !important; +} + +.align-content-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; +} + +.align-content-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; +} + +.align-content-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; +} + +.align-self-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; +} + +.align-self-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; +} + +.align-self-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; +} + +.align-self-center { + -ms-flex-item-align: center !important; + align-self: center !important; +} + +.align-self-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; +} + +.align-self-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; +} + +@media (min-width: 576px) { + .flex-sm-row { + -ms-flex-direction: row !important; + flex-direction: row !important; + } + .flex-sm-column { + -ms-flex-direction: column !important; + flex-direction: column !important; + } + .flex-sm-row-reverse { + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + .flex-sm-column-reverse { + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + .flex-sm-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + .flex-sm-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + .flex-sm-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + .flex-sm-fill { + -ms-flex: 1 1 auto !important; + flex: 1 1 auto !important; + } + .flex-sm-grow-0 { + -ms-flex-positive: 0 !important; + flex-grow: 0 !important; + } + .flex-sm-grow-1 { + -ms-flex-positive: 1 !important; + flex-grow: 1 !important; + } + .flex-sm-shrink-0 { + -ms-flex-negative: 0 !important; + flex-shrink: 0 !important; + } + .flex-sm-shrink-1 { + -ms-flex-negative: 1 !important; + flex-shrink: 1 !important; + } + .justify-content-sm-start { + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + .justify-content-sm-end { + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + .justify-content-sm-center { + -ms-flex-pack: center !important; + justify-content: center !important; + } + .justify-content-sm-between { + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + .justify-content-sm-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + .align-items-sm-start { + -ms-flex-align: start !important; + align-items: flex-start !important; + } + .align-items-sm-end { + -ms-flex-align: end !important; + align-items: flex-end !important; + } + .align-items-sm-center { + -ms-flex-align: center !important; + align-items: center !important; + } + .align-items-sm-baseline { + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + .align-items-sm-stretch { + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + .align-content-sm-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + .align-content-sm-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + .align-content-sm-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + .align-content-sm-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + .align-content-sm-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + .align-content-sm-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + .align-self-sm-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + .align-self-sm-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + .align-self-sm-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + .align-self-sm-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + .align-self-sm-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + .align-self-sm-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} + +@media (min-width: 768px) { + .flex-md-row { + -ms-flex-direction: row !important; + flex-direction: row !important; + } + .flex-md-column { + -ms-flex-direction: column !important; + flex-direction: column !important; + } + .flex-md-row-reverse { + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + .flex-md-column-reverse { + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + .flex-md-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + .flex-md-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + .flex-md-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + .flex-md-fill { + -ms-flex: 1 1 auto !important; + flex: 1 1 auto !important; + } + .flex-md-grow-0 { + -ms-flex-positive: 0 !important; + flex-grow: 0 !important; + } + .flex-md-grow-1 { + -ms-flex-positive: 1 !important; + flex-grow: 1 !important; + } + .flex-md-shrink-0 { + -ms-flex-negative: 0 !important; + flex-shrink: 0 !important; + } + .flex-md-shrink-1 { + -ms-flex-negative: 1 !important; + flex-shrink: 1 !important; + } + .justify-content-md-start { + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + .justify-content-md-end { + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + .justify-content-md-center { + -ms-flex-pack: center !important; + justify-content: center !important; + } + .justify-content-md-between { + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + .justify-content-md-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + .align-items-md-start { + -ms-flex-align: start !important; + align-items: flex-start !important; + } + .align-items-md-end { + -ms-flex-align: end !important; + align-items: flex-end !important; + } + .align-items-md-center { + -ms-flex-align: center !important; + align-items: center !important; + } + .align-items-md-baseline { + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + .align-items-md-stretch { + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + .align-content-md-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + .align-content-md-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + .align-content-md-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + .align-content-md-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + .align-content-md-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + .align-content-md-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + .align-self-md-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + .align-self-md-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + .align-self-md-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + .align-self-md-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + .align-self-md-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + .align-self-md-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} + +@media (min-width: 992px) { + .flex-lg-row { + -ms-flex-direction: row !important; + flex-direction: row !important; + } + .flex-lg-column { + -ms-flex-direction: column !important; + flex-direction: column !important; + } + .flex-lg-row-reverse { + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + .flex-lg-column-reverse { + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + .flex-lg-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + .flex-lg-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + .flex-lg-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + .flex-lg-fill { + -ms-flex: 1 1 auto !important; + flex: 1 1 auto !important; + } + .flex-lg-grow-0 { + -ms-flex-positive: 0 !important; + flex-grow: 0 !important; + } + .flex-lg-grow-1 { + -ms-flex-positive: 1 !important; + flex-grow: 1 !important; + } + .flex-lg-shrink-0 { + -ms-flex-negative: 0 !important; + flex-shrink: 0 !important; + } + .flex-lg-shrink-1 { + -ms-flex-negative: 1 !important; + flex-shrink: 1 !important; + } + .justify-content-lg-start { + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + .justify-content-lg-end { + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + .justify-content-lg-center { + -ms-flex-pack: center !important; + justify-content: center !important; + } + .justify-content-lg-between { + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + .justify-content-lg-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + .align-items-lg-start { + -ms-flex-align: start !important; + align-items: flex-start !important; + } + .align-items-lg-end { + -ms-flex-align: end !important; + align-items: flex-end !important; + } + .align-items-lg-center { + -ms-flex-align: center !important; + align-items: center !important; + } + .align-items-lg-baseline { + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + .align-items-lg-stretch { + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + .align-content-lg-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + .align-content-lg-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + .align-content-lg-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + .align-content-lg-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + .align-content-lg-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + .align-content-lg-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + .align-self-lg-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + .align-self-lg-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + .align-self-lg-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + .align-self-lg-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + .align-self-lg-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + .align-self-lg-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} + +@media (min-width: 1200px) { + .flex-xl-row { + -ms-flex-direction: row !important; + flex-direction: row !important; + } + .flex-xl-column { + -ms-flex-direction: column !important; + flex-direction: column !important; + } + .flex-xl-row-reverse { + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + .flex-xl-column-reverse { + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + .flex-xl-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + .flex-xl-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + .flex-xl-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + .flex-xl-fill { + -ms-flex: 1 1 auto !important; + flex: 1 1 auto !important; + } + .flex-xl-grow-0 { + -ms-flex-positive: 0 !important; + flex-grow: 0 !important; + } + .flex-xl-grow-1 { + -ms-flex-positive: 1 !important; + flex-grow: 1 !important; + } + .flex-xl-shrink-0 { + -ms-flex-negative: 0 !important; + flex-shrink: 0 !important; + } + .flex-xl-shrink-1 { + -ms-flex-negative: 1 !important; + flex-shrink: 1 !important; + } + .justify-content-xl-start { + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + .justify-content-xl-end { + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + .justify-content-xl-center { + -ms-flex-pack: center !important; + justify-content: center !important; + } + .justify-content-xl-between { + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + .justify-content-xl-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + .align-items-xl-start { + -ms-flex-align: start !important; + align-items: flex-start !important; + } + .align-items-xl-end { + -ms-flex-align: end !important; + align-items: flex-end !important; + } + .align-items-xl-center { + -ms-flex-align: center !important; + align-items: center !important; + } + .align-items-xl-baseline { + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + .align-items-xl-stretch { + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + .align-content-xl-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + .align-content-xl-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + .align-content-xl-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + .align-content-xl-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + .align-content-xl-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + .align-content-xl-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + .align-self-xl-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + .align-self-xl-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + .align-self-xl-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + .align-self-xl-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + .align-self-xl-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + .align-self-xl-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} + +.float-left { + float: left !important; +} + +.float-right { + float: right !important; +} + +.float-none { + float: none !important; +} + +@media (min-width: 576px) { + .float-sm-left { + float: left !important; + } + .float-sm-right { + float: right !important; + } + .float-sm-none { + float: none !important; + } +} + +@media (min-width: 768px) { + .float-md-left { + float: left !important; + } + .float-md-right { + float: right !important; + } + .float-md-none { + float: none !important; + } +} + +@media (min-width: 992px) { + .float-lg-left { + float: left !important; + } + .float-lg-right { + float: right !important; + } + .float-lg-none { + float: none !important; + } +} + +@media (min-width: 1200px) { + .float-xl-left { + float: left !important; + } + .float-xl-right { + float: right !important; + } + .float-xl-none { + float: none !important; + } +} + +.overflow-auto { + overflow: auto !important; +} + +.overflow-hidden { + overflow: hidden !important; +} + +.position-static { + position: static !important; +} + +.position-relative { + position: relative !important; +} + +.position-absolute { + position: absolute !important; +} + +.position-fixed { + position: fixed !important; +} + +.position-sticky { + position: -webkit-sticky !important; + position: sticky !important; +} + +.fixed-top { + position: fixed; + top: 0; + right: 0; + left: 0; + z-index: 1030; +} + +.fixed-bottom { + position: fixed; + right: 0; + bottom: 0; + left: 0; + z-index: 1030; +} + +@supports ((position: -webkit-sticky) or (position: sticky)) { + .sticky-top { + position: -webkit-sticky; + position: sticky; + top: 0; + z-index: 1020; + } +} + +.sr-only { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + border: 0; +} + +.sr-only-focusable:active, .sr-only-focusable:focus { + position: static; + width: auto; + height: auto; + overflow: visible; + clip: auto; + white-space: normal; +} + +.shadow-sm { + box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075) !important; +} + +.shadow { + box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15) !important; +} + +.shadow-lg { + box-shadow: 0 1rem 3rem rgba(0, 0, 0, 0.175) !important; +} + +.shadow-none { + box-shadow: none !important; +} + +.w-25 { + width: 25% !important; +} + +.w-50 { + width: 50% !important; +} + +.w-75 { + width: 75% !important; +} + +.w-100 { + width: 100% !important; +} + +.w-auto { + width: auto !important; +} + +.h-25 { + height: 25% !important; +} + +.h-50 { + height: 50% !important; +} + +.h-75 { + height: 75% !important; +} + +.h-100 { + height: 100% !important; +} + +.h-auto { + height: auto !important; +} + +.mw-100 { + max-width: 100% !important; +} + +.mh-100 { + max-height: 100% !important; +} + +.min-vw-100 { + min-width: 100vw !important; +} + +.min-vh-100 { + min-height: 100vh !important; +} + +.vw-100 { + width: 100vw !important; +} + +.vh-100 { + height: 100vh !important; +} + +.stretched-link::after { + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1; + pointer-events: auto; + content: ""; + background-color: rgba(0, 0, 0, 0); +} + +.m-0 { + margin: 0 !important; +} + +.mt-0, +.my-0 { + margin-top: 0 !important; +} + +.mr-0, +.mx-0 { + margin-right: 0 !important; +} + +.mb-0, +.my-0 { + margin-bottom: 0 !important; +} + +.ml-0, +.mx-0 { + margin-left: 0 !important; +} + +.m-1 { + margin: 0.25rem !important; +} + +.mt-1, +.my-1 { + margin-top: 0.25rem !important; +} + +.mr-1, +.mx-1 { + margin-right: 0.25rem !important; +} + +.mb-1, +.my-1 { + margin-bottom: 0.25rem !important; +} + +.ml-1, +.mx-1 { + margin-left: 0.25rem !important; +} + +.m-2 { + margin: 0.5rem !important; +} + +.mt-2, +.my-2 { + margin-top: 0.5rem !important; +} + +.mr-2, +.mx-2 { + margin-right: 0.5rem !important; +} + +.mb-2, +.my-2 { + margin-bottom: 0.5rem !important; +} + +.ml-2, +.mx-2 { + margin-left: 0.5rem !important; +} + +.m-3 { + margin: 1rem !important; +} + +.mt-3, +.my-3 { + margin-top: 1rem !important; +} + +.mr-3, +.mx-3 { + margin-right: 1rem !important; +} + +.mb-3, +.my-3 { + margin-bottom: 1rem !important; +} + +.ml-3, +.mx-3 { + margin-left: 1rem !important; +} + +.m-4 { + margin: 1.5rem !important; +} + +.mt-4, +.my-4 { + margin-top: 1.5rem !important; +} + +.mr-4, +.mx-4 { + margin-right: 1.5rem !important; +} + +.mb-4, +.my-4 { + margin-bottom: 1.5rem !important; +} + +.ml-4, +.mx-4 { + margin-left: 1.5rem !important; +} + +.m-5 { + margin: 3rem !important; +} + +.mt-5, +.my-5 { + margin-top: 3rem !important; +} + +.mr-5, +.mx-5 { + margin-right: 3rem !important; +} + +.mb-5, +.my-5 { + margin-bottom: 3rem !important; +} + +.ml-5, +.mx-5 { + margin-left: 3rem !important; +} + +.p-0 { + padding: 0 !important; +} + +.pt-0, +.py-0 { + padding-top: 0 !important; +} + +.pr-0, +.px-0 { + padding-right: 0 !important; +} + +.pb-0, +.py-0 { + padding-bottom: 0 !important; +} + +.pl-0, +.px-0 { + padding-left: 0 !important; +} + +.p-1 { + padding: 0.25rem !important; +} + +.pt-1, +.py-1 { + padding-top: 0.25rem !important; +} + +.pr-1, +.px-1 { + padding-right: 0.25rem !important; +} + +.pb-1, +.py-1 { + padding-bottom: 0.25rem !important; +} + +.pl-1, +.px-1 { + padding-left: 0.25rem !important; +} + +.p-2 { + padding: 0.5rem !important; +} + +.pt-2, +.py-2 { + padding-top: 0.5rem !important; +} + +.pr-2, +.px-2 { + padding-right: 0.5rem !important; +} + +.pb-2, +.py-2 { + padding-bottom: 0.5rem !important; +} + +.pl-2, +.px-2 { + padding-left: 0.5rem !important; +} + +.p-3 { + padding: 1rem !important; +} + +.pt-3, +.py-3 { + padding-top: 1rem !important; +} + +.pr-3, +.px-3 { + padding-right: 1rem !important; +} + +.pb-3, +.py-3 { + padding-bottom: 1rem !important; +} + +.pl-3, +.px-3 { + padding-left: 1rem !important; +} + +.p-4 { + padding: 1.5rem !important; +} + +.pt-4, +.py-4 { + padding-top: 1.5rem !important; +} + +.pr-4, +.px-4 { + padding-right: 1.5rem !important; +} + +.pb-4, +.py-4 { + padding-bottom: 1.5rem !important; +} + +.pl-4, +.px-4 { + padding-left: 1.5rem !important; +} + +.p-5 { + padding: 3rem !important; +} + +.pt-5, +.py-5 { + padding-top: 3rem !important; +} + +.pr-5, +.px-5 { + padding-right: 3rem !important; +} + +.pb-5, +.py-5 { + padding-bottom: 3rem !important; +} + +.pl-5, +.px-5 { + padding-left: 3rem !important; +} + +.m-n1 { + margin: -0.25rem !important; +} + +.mt-n1, +.my-n1 { + margin-top: -0.25rem !important; +} + +.mr-n1, +.mx-n1 { + margin-right: -0.25rem !important; +} + +.mb-n1, +.my-n1 { + margin-bottom: -0.25rem !important; +} + +.ml-n1, +.mx-n1 { + margin-left: -0.25rem !important; +} + +.m-n2 { + margin: -0.5rem !important; +} + +.mt-n2, +.my-n2 { + margin-top: -0.5rem !important; +} + +.mr-n2, +.mx-n2 { + margin-right: -0.5rem !important; +} + +.mb-n2, +.my-n2 { + margin-bottom: -0.5rem !important; +} + +.ml-n2, +.mx-n2 { + margin-left: -0.5rem !important; +} + +.m-n3 { + margin: -1rem !important; +} + +.mt-n3, +.my-n3 { + margin-top: -1rem !important; +} + +.mr-n3, +.mx-n3 { + margin-right: -1rem !important; +} + +.mb-n3, +.my-n3 { + margin-bottom: -1rem !important; +} + +.ml-n3, +.mx-n3 { + margin-left: -1rem !important; +} + +.m-n4 { + margin: -1.5rem !important; +} + +.mt-n4, +.my-n4 { + margin-top: -1.5rem !important; +} + +.mr-n4, +.mx-n4 { + margin-right: -1.5rem !important; +} + +.mb-n4, +.my-n4 { + margin-bottom: -1.5rem !important; +} + +.ml-n4, +.mx-n4 { + margin-left: -1.5rem !important; +} + +.m-n5 { + margin: -3rem !important; +} + +.mt-n5, +.my-n5 { + margin-top: -3rem !important; +} + +.mr-n5, +.mx-n5 { + margin-right: -3rem !important; +} + +.mb-n5, +.my-n5 { + margin-bottom: -3rem !important; +} + +.ml-n5, +.mx-n5 { + margin-left: -3rem !important; +} + +.m-auto { + margin: auto !important; +} + +.mt-auto, +.my-auto { + margin-top: auto !important; +} + +.mr-auto, +.mx-auto { + margin-right: auto !important; +} + +.mb-auto, +.my-auto { + margin-bottom: auto !important; +} + +.ml-auto, +.mx-auto { + margin-left: auto !important; +} + +@media (min-width: 576px) { + .m-sm-0 { + margin: 0 !important; + } + .mt-sm-0, + .my-sm-0 { + margin-top: 0 !important; + } + .mr-sm-0, + .mx-sm-0 { + margin-right: 0 !important; + } + .mb-sm-0, + .my-sm-0 { + margin-bottom: 0 !important; + } + .ml-sm-0, + .mx-sm-0 { + margin-left: 0 !important; + } + .m-sm-1 { + margin: 0.25rem !important; + } + .mt-sm-1, + .my-sm-1 { + margin-top: 0.25rem !important; + } + .mr-sm-1, + .mx-sm-1 { + margin-right: 0.25rem !important; + } + .mb-sm-1, + .my-sm-1 { + margin-bottom: 0.25rem !important; + } + .ml-sm-1, + .mx-sm-1 { + margin-left: 0.25rem !important; + } + .m-sm-2 { + margin: 0.5rem !important; + } + .mt-sm-2, + .my-sm-2 { + margin-top: 0.5rem !important; + } + .mr-sm-2, + .mx-sm-2 { + margin-right: 0.5rem !important; + } + .mb-sm-2, + .my-sm-2 { + margin-bottom: 0.5rem !important; + } + .ml-sm-2, + .mx-sm-2 { + margin-left: 0.5rem !important; + } + .m-sm-3 { + margin: 1rem !important; + } + .mt-sm-3, + .my-sm-3 { + margin-top: 1rem !important; + } + .mr-sm-3, + .mx-sm-3 { + margin-right: 1rem !important; + } + .mb-sm-3, + .my-sm-3 { + margin-bottom: 1rem !important; + } + .ml-sm-3, + .mx-sm-3 { + margin-left: 1rem !important; + } + .m-sm-4 { + margin: 1.5rem !important; + } + .mt-sm-4, + .my-sm-4 { + margin-top: 1.5rem !important; + } + .mr-sm-4, + .mx-sm-4 { + margin-right: 1.5rem !important; + } + .mb-sm-4, + .my-sm-4 { + margin-bottom: 1.5rem !important; + } + .ml-sm-4, + .mx-sm-4 { + margin-left: 1.5rem !important; + } + .m-sm-5 { + margin: 3rem !important; + } + .mt-sm-5, + .my-sm-5 { + margin-top: 3rem !important; + } + .mr-sm-5, + .mx-sm-5 { + margin-right: 3rem !important; + } + .mb-sm-5, + .my-sm-5 { + margin-bottom: 3rem !important; + } + .ml-sm-5, + .mx-sm-5 { + margin-left: 3rem !important; + } + .p-sm-0 { + padding: 0 !important; + } + .pt-sm-0, + .py-sm-0 { + padding-top: 0 !important; + } + .pr-sm-0, + .px-sm-0 { + padding-right: 0 !important; + } + .pb-sm-0, + .py-sm-0 { + padding-bottom: 0 !important; + } + .pl-sm-0, + .px-sm-0 { + padding-left: 0 !important; + } + .p-sm-1 { + padding: 0.25rem !important; + } + .pt-sm-1, + .py-sm-1 { + padding-top: 0.25rem !important; + } + .pr-sm-1, + .px-sm-1 { + padding-right: 0.25rem !important; + } + .pb-sm-1, + .py-sm-1 { + padding-bottom: 0.25rem !important; + } + .pl-sm-1, + .px-sm-1 { + padding-left: 0.25rem !important; + } + .p-sm-2 { + padding: 0.5rem !important; + } + .pt-sm-2, + .py-sm-2 { + padding-top: 0.5rem !important; + } + .pr-sm-2, + .px-sm-2 { + padding-right: 0.5rem !important; + } + .pb-sm-2, + .py-sm-2 { + padding-bottom: 0.5rem !important; + } + .pl-sm-2, + .px-sm-2 { + padding-left: 0.5rem !important; + } + .p-sm-3 { + padding: 1rem !important; + } + .pt-sm-3, + .py-sm-3 { + padding-top: 1rem !important; + } + .pr-sm-3, + .px-sm-3 { + padding-right: 1rem !important; + } + .pb-sm-3, + .py-sm-3 { + padding-bottom: 1rem !important; + } + .pl-sm-3, + .px-sm-3 { + padding-left: 1rem !important; + } + .p-sm-4 { + padding: 1.5rem !important; + } + .pt-sm-4, + .py-sm-4 { + padding-top: 1.5rem !important; + } + .pr-sm-4, + .px-sm-4 { + padding-right: 1.5rem !important; + } + .pb-sm-4, + .py-sm-4 { + padding-bottom: 1.5rem !important; + } + .pl-sm-4, + .px-sm-4 { + padding-left: 1.5rem !important; + } + .p-sm-5 { + padding: 3rem !important; + } + .pt-sm-5, + .py-sm-5 { + padding-top: 3rem !important; + } + .pr-sm-5, + .px-sm-5 { + padding-right: 3rem !important; + } + .pb-sm-5, + .py-sm-5 { + padding-bottom: 3rem !important; + } + .pl-sm-5, + .px-sm-5 { + padding-left: 3rem !important; + } + .m-sm-n1 { + margin: -0.25rem !important; + } + .mt-sm-n1, + .my-sm-n1 { + margin-top: -0.25rem !important; + } + .mr-sm-n1, + .mx-sm-n1 { + margin-right: -0.25rem !important; + } + .mb-sm-n1, + .my-sm-n1 { + margin-bottom: -0.25rem !important; + } + .ml-sm-n1, + .mx-sm-n1 { + margin-left: -0.25rem !important; + } + .m-sm-n2 { + margin: -0.5rem !important; + } + .mt-sm-n2, + .my-sm-n2 { + margin-top: -0.5rem !important; + } + .mr-sm-n2, + .mx-sm-n2 { + margin-right: -0.5rem !important; + } + .mb-sm-n2, + .my-sm-n2 { + margin-bottom: -0.5rem !important; + } + .ml-sm-n2, + .mx-sm-n2 { + margin-left: -0.5rem !important; + } + .m-sm-n3 { + margin: -1rem !important; + } + .mt-sm-n3, + .my-sm-n3 { + margin-top: -1rem !important; + } + .mr-sm-n3, + .mx-sm-n3 { + margin-right: -1rem !important; + } + .mb-sm-n3, + .my-sm-n3 { + margin-bottom: -1rem !important; + } + .ml-sm-n3, + .mx-sm-n3 { + margin-left: -1rem !important; + } + .m-sm-n4 { + margin: -1.5rem !important; + } + .mt-sm-n4, + .my-sm-n4 { + margin-top: -1.5rem !important; + } + .mr-sm-n4, + .mx-sm-n4 { + margin-right: -1.5rem !important; + } + .mb-sm-n4, + .my-sm-n4 { + margin-bottom: -1.5rem !important; + } + .ml-sm-n4, + .mx-sm-n4 { + margin-left: -1.5rem !important; + } + .m-sm-n5 { + margin: -3rem !important; + } + .mt-sm-n5, + .my-sm-n5 { + margin-top: -3rem !important; + } + .mr-sm-n5, + .mx-sm-n5 { + margin-right: -3rem !important; + } + .mb-sm-n5, + .my-sm-n5 { + margin-bottom: -3rem !important; + } + .ml-sm-n5, + .mx-sm-n5 { + margin-left: -3rem !important; + } + .m-sm-auto { + margin: auto !important; + } + .mt-sm-auto, + .my-sm-auto { + margin-top: auto !important; + } + .mr-sm-auto, + .mx-sm-auto { + margin-right: auto !important; + } + .mb-sm-auto, + .my-sm-auto { + margin-bottom: auto !important; + } + .ml-sm-auto, + .mx-sm-auto { + margin-left: auto !important; + } +} + +@media (min-width: 768px) { + .m-md-0 { + margin: 0 !important; + } + .mt-md-0, + .my-md-0 { + margin-top: 0 !important; + } + .mr-md-0, + .mx-md-0 { + margin-right: 0 !important; + } + .mb-md-0, + .my-md-0 { + margin-bottom: 0 !important; + } + .ml-md-0, + .mx-md-0 { + margin-left: 0 !important; + } + .m-md-1 { + margin: 0.25rem !important; + } + .mt-md-1, + .my-md-1 { + margin-top: 0.25rem !important; + } + .mr-md-1, + .mx-md-1 { + margin-right: 0.25rem !important; + } + .mb-md-1, + .my-md-1 { + margin-bottom: 0.25rem !important; + } + .ml-md-1, + .mx-md-1 { + margin-left: 0.25rem !important; + } + .m-md-2 { + margin: 0.5rem !important; + } + .mt-md-2, + .my-md-2 { + margin-top: 0.5rem !important; + } + .mr-md-2, + .mx-md-2 { + margin-right: 0.5rem !important; + } + .mb-md-2, + .my-md-2 { + margin-bottom: 0.5rem !important; + } + .ml-md-2, + .mx-md-2 { + margin-left: 0.5rem !important; + } + .m-md-3 { + margin: 1rem !important; + } + .mt-md-3, + .my-md-3 { + margin-top: 1rem !important; + } + .mr-md-3, + .mx-md-3 { + margin-right: 1rem !important; + } + .mb-md-3, + .my-md-3 { + margin-bottom: 1rem !important; + } + .ml-md-3, + .mx-md-3 { + margin-left: 1rem !important; + } + .m-md-4 { + margin: 1.5rem !important; + } + .mt-md-4, + .my-md-4 { + margin-top: 1.5rem !important; + } + .mr-md-4, + .mx-md-4 { + margin-right: 1.5rem !important; + } + .mb-md-4, + .my-md-4 { + margin-bottom: 1.5rem !important; + } + .ml-md-4, + .mx-md-4 { + margin-left: 1.5rem !important; + } + .m-md-5 { + margin: 3rem !important; + } + .mt-md-5, + .my-md-5 { + margin-top: 3rem !important; + } + .mr-md-5, + .mx-md-5 { + margin-right: 3rem !important; + } + .mb-md-5, + .my-md-5 { + margin-bottom: 3rem !important; + } + .ml-md-5, + .mx-md-5 { + margin-left: 3rem !important; + } + .p-md-0 { + padding: 0 !important; + } + .pt-md-0, + .py-md-0 { + padding-top: 0 !important; + } + .pr-md-0, + .px-md-0 { + padding-right: 0 !important; + } + .pb-md-0, + .py-md-0 { + padding-bottom: 0 !important; + } + .pl-md-0, + .px-md-0 { + padding-left: 0 !important; + } + .p-md-1 { + padding: 0.25rem !important; + } + .pt-md-1, + .py-md-1 { + padding-top: 0.25rem !important; + } + .pr-md-1, + .px-md-1 { + padding-right: 0.25rem !important; + } + .pb-md-1, + .py-md-1 { + padding-bottom: 0.25rem !important; + } + .pl-md-1, + .px-md-1 { + padding-left: 0.25rem !important; + } + .p-md-2 { + padding: 0.5rem !important; + } + .pt-md-2, + .py-md-2 { + padding-top: 0.5rem !important; + } + .pr-md-2, + .px-md-2 { + padding-right: 0.5rem !important; + } + .pb-md-2, + .py-md-2 { + padding-bottom: 0.5rem !important; + } + .pl-md-2, + .px-md-2 { + padding-left: 0.5rem !important; + } + .p-md-3 { + padding: 1rem !important; + } + .pt-md-3, + .py-md-3 { + padding-top: 1rem !important; + } + .pr-md-3, + .px-md-3 { + padding-right: 1rem !important; + } + .pb-md-3, + .py-md-3 { + padding-bottom: 1rem !important; + } + .pl-md-3, + .px-md-3 { + padding-left: 1rem !important; + } + .p-md-4 { + padding: 1.5rem !important; + } + .pt-md-4, + .py-md-4 { + padding-top: 1.5rem !important; + } + .pr-md-4, + .px-md-4 { + padding-right: 1.5rem !important; + } + .pb-md-4, + .py-md-4 { + padding-bottom: 1.5rem !important; + } + .pl-md-4, + .px-md-4 { + padding-left: 1.5rem !important; + } + .p-md-5 { + padding: 3rem !important; + } + .pt-md-5, + .py-md-5 { + padding-top: 3rem !important; + } + .pr-md-5, + .px-md-5 { + padding-right: 3rem !important; + } + .pb-md-5, + .py-md-5 { + padding-bottom: 3rem !important; + } + .pl-md-5, + .px-md-5 { + padding-left: 3rem !important; + } + .m-md-n1 { + margin: -0.25rem !important; + } + .mt-md-n1, + .my-md-n1 { + margin-top: -0.25rem !important; + } + .mr-md-n1, + .mx-md-n1 { + margin-right: -0.25rem !important; + } + .mb-md-n1, + .my-md-n1 { + margin-bottom: -0.25rem !important; + } + .ml-md-n1, + .mx-md-n1 { + margin-left: -0.25rem !important; + } + .m-md-n2 { + margin: -0.5rem !important; + } + .mt-md-n2, + .my-md-n2 { + margin-top: -0.5rem !important; + } + .mr-md-n2, + .mx-md-n2 { + margin-right: -0.5rem !important; + } + .mb-md-n2, + .my-md-n2 { + margin-bottom: -0.5rem !important; + } + .ml-md-n2, + .mx-md-n2 { + margin-left: -0.5rem !important; + } + .m-md-n3 { + margin: -1rem !important; + } + .mt-md-n3, + .my-md-n3 { + margin-top: -1rem !important; + } + .mr-md-n3, + .mx-md-n3 { + margin-right: -1rem !important; + } + .mb-md-n3, + .my-md-n3 { + margin-bottom: -1rem !important; + } + .ml-md-n3, + .mx-md-n3 { + margin-left: -1rem !important; + } + .m-md-n4 { + margin: -1.5rem !important; + } + .mt-md-n4, + .my-md-n4 { + margin-top: -1.5rem !important; + } + .mr-md-n4, + .mx-md-n4 { + margin-right: -1.5rem !important; + } + .mb-md-n4, + .my-md-n4 { + margin-bottom: -1.5rem !important; + } + .ml-md-n4, + .mx-md-n4 { + margin-left: -1.5rem !important; + } + .m-md-n5 { + margin: -3rem !important; + } + .mt-md-n5, + .my-md-n5 { + margin-top: -3rem !important; + } + .mr-md-n5, + .mx-md-n5 { + margin-right: -3rem !important; + } + .mb-md-n5, + .my-md-n5 { + margin-bottom: -3rem !important; + } + .ml-md-n5, + .mx-md-n5 { + margin-left: -3rem !important; + } + .m-md-auto { + margin: auto !important; + } + .mt-md-auto, + .my-md-auto { + margin-top: auto !important; + } + .mr-md-auto, + .mx-md-auto { + margin-right: auto !important; + } + .mb-md-auto, + .my-md-auto { + margin-bottom: auto !important; + } + .ml-md-auto, + .mx-md-auto { + margin-left: auto !important; + } +} + +@media (min-width: 992px) { + .m-lg-0 { + margin: 0 !important; + } + .mt-lg-0, + .my-lg-0 { + margin-top: 0 !important; + } + .mr-lg-0, + .mx-lg-0 { + margin-right: 0 !important; + } + .mb-lg-0, + .my-lg-0 { + margin-bottom: 0 !important; + } + .ml-lg-0, + .mx-lg-0 { + margin-left: 0 !important; + } + .m-lg-1 { + margin: 0.25rem !important; + } + .mt-lg-1, + .my-lg-1 { + margin-top: 0.25rem !important; + } + .mr-lg-1, + .mx-lg-1 { + margin-right: 0.25rem !important; + } + .mb-lg-1, + .my-lg-1 { + margin-bottom: 0.25rem !important; + } + .ml-lg-1, + .mx-lg-1 { + margin-left: 0.25rem !important; + } + .m-lg-2 { + margin: 0.5rem !important; + } + .mt-lg-2, + .my-lg-2 { + margin-top: 0.5rem !important; + } + .mr-lg-2, + .mx-lg-2 { + margin-right: 0.5rem !important; + } + .mb-lg-2, + .my-lg-2 { + margin-bottom: 0.5rem !important; + } + .ml-lg-2, + .mx-lg-2 { + margin-left: 0.5rem !important; + } + .m-lg-3 { + margin: 1rem !important; + } + .mt-lg-3, + .my-lg-3 { + margin-top: 1rem !important; + } + .mr-lg-3, + .mx-lg-3 { + margin-right: 1rem !important; + } + .mb-lg-3, + .my-lg-3 { + margin-bottom: 1rem !important; + } + .ml-lg-3, + .mx-lg-3 { + margin-left: 1rem !important; + } + .m-lg-4 { + margin: 1.5rem !important; + } + .mt-lg-4, + .my-lg-4 { + margin-top: 1.5rem !important; + } + .mr-lg-4, + .mx-lg-4 { + margin-right: 1.5rem !important; + } + .mb-lg-4, + .my-lg-4 { + margin-bottom: 1.5rem !important; + } + .ml-lg-4, + .mx-lg-4 { + margin-left: 1.5rem !important; + } + .m-lg-5 { + margin: 3rem !important; + } + .mt-lg-5, + .my-lg-5 { + margin-top: 3rem !important; + } + .mr-lg-5, + .mx-lg-5 { + margin-right: 3rem !important; + } + .mb-lg-5, + .my-lg-5 { + margin-bottom: 3rem !important; + } + .ml-lg-5, + .mx-lg-5 { + margin-left: 3rem !important; + } + .p-lg-0 { + padding: 0 !important; + } + .pt-lg-0, + .py-lg-0 { + padding-top: 0 !important; + } + .pr-lg-0, + .px-lg-0 { + padding-right: 0 !important; + } + .pb-lg-0, + .py-lg-0 { + padding-bottom: 0 !important; + } + .pl-lg-0, + .px-lg-0 { + padding-left: 0 !important; + } + .p-lg-1 { + padding: 0.25rem !important; + } + .pt-lg-1, + .py-lg-1 { + padding-top: 0.25rem !important; + } + .pr-lg-1, + .px-lg-1 { + padding-right: 0.25rem !important; + } + .pb-lg-1, + .py-lg-1 { + padding-bottom: 0.25rem !important; + } + .pl-lg-1, + .px-lg-1 { + padding-left: 0.25rem !important; + } + .p-lg-2 { + padding: 0.5rem !important; + } + .pt-lg-2, + .py-lg-2 { + padding-top: 0.5rem !important; + } + .pr-lg-2, + .px-lg-2 { + padding-right: 0.5rem !important; + } + .pb-lg-2, + .py-lg-2 { + padding-bottom: 0.5rem !important; + } + .pl-lg-2, + .px-lg-2 { + padding-left: 0.5rem !important; + } + .p-lg-3 { + padding: 1rem !important; + } + .pt-lg-3, + .py-lg-3 { + padding-top: 1rem !important; + } + .pr-lg-3, + .px-lg-3 { + padding-right: 1rem !important; + } + .pb-lg-3, + .py-lg-3 { + padding-bottom: 1rem !important; + } + .pl-lg-3, + .px-lg-3 { + padding-left: 1rem !important; + } + .p-lg-4 { + padding: 1.5rem !important; + } + .pt-lg-4, + .py-lg-4 { + padding-top: 1.5rem !important; + } + .pr-lg-4, + .px-lg-4 { + padding-right: 1.5rem !important; + } + .pb-lg-4, + .py-lg-4 { + padding-bottom: 1.5rem !important; + } + .pl-lg-4, + .px-lg-4 { + padding-left: 1.5rem !important; + } + .p-lg-5 { + padding: 3rem !important; + } + .pt-lg-5, + .py-lg-5 { + padding-top: 3rem !important; + } + .pr-lg-5, + .px-lg-5 { + padding-right: 3rem !important; + } + .pb-lg-5, + .py-lg-5 { + padding-bottom: 3rem !important; + } + .pl-lg-5, + .px-lg-5 { + padding-left: 3rem !important; + } + .m-lg-n1 { + margin: -0.25rem !important; + } + .mt-lg-n1, + .my-lg-n1 { + margin-top: -0.25rem !important; + } + .mr-lg-n1, + .mx-lg-n1 { + margin-right: -0.25rem !important; + } + .mb-lg-n1, + .my-lg-n1 { + margin-bottom: -0.25rem !important; + } + .ml-lg-n1, + .mx-lg-n1 { + margin-left: -0.25rem !important; + } + .m-lg-n2 { + margin: -0.5rem !important; + } + .mt-lg-n2, + .my-lg-n2 { + margin-top: -0.5rem !important; + } + .mr-lg-n2, + .mx-lg-n2 { + margin-right: -0.5rem !important; + } + .mb-lg-n2, + .my-lg-n2 { + margin-bottom: -0.5rem !important; + } + .ml-lg-n2, + .mx-lg-n2 { + margin-left: -0.5rem !important; + } + .m-lg-n3 { + margin: -1rem !important; + } + .mt-lg-n3, + .my-lg-n3 { + margin-top: -1rem !important; + } + .mr-lg-n3, + .mx-lg-n3 { + margin-right: -1rem !important; + } + .mb-lg-n3, + .my-lg-n3 { + margin-bottom: -1rem !important; + } + .ml-lg-n3, + .mx-lg-n3 { + margin-left: -1rem !important; + } + .m-lg-n4 { + margin: -1.5rem !important; + } + .mt-lg-n4, + .my-lg-n4 { + margin-top: -1.5rem !important; + } + .mr-lg-n4, + .mx-lg-n4 { + margin-right: -1.5rem !important; + } + .mb-lg-n4, + .my-lg-n4 { + margin-bottom: -1.5rem !important; + } + .ml-lg-n4, + .mx-lg-n4 { + margin-left: -1.5rem !important; + } + .m-lg-n5 { + margin: -3rem !important; + } + .mt-lg-n5, + .my-lg-n5 { + margin-top: -3rem !important; + } + .mr-lg-n5, + .mx-lg-n5 { + margin-right: -3rem !important; + } + .mb-lg-n5, + .my-lg-n5 { + margin-bottom: -3rem !important; + } + .ml-lg-n5, + .mx-lg-n5 { + margin-left: -3rem !important; + } + .m-lg-auto { + margin: auto !important; + } + .mt-lg-auto, + .my-lg-auto { + margin-top: auto !important; + } + .mr-lg-auto, + .mx-lg-auto { + margin-right: auto !important; + } + .mb-lg-auto, + .my-lg-auto { + margin-bottom: auto !important; + } + .ml-lg-auto, + .mx-lg-auto { + margin-left: auto !important; + } +} + +@media (min-width: 1200px) { + .m-xl-0 { + margin: 0 !important; + } + .mt-xl-0, + .my-xl-0 { + margin-top: 0 !important; + } + .mr-xl-0, + .mx-xl-0 { + margin-right: 0 !important; + } + .mb-xl-0, + .my-xl-0 { + margin-bottom: 0 !important; + } + .ml-xl-0, + .mx-xl-0 { + margin-left: 0 !important; + } + .m-xl-1 { + margin: 0.25rem !important; + } + .mt-xl-1, + .my-xl-1 { + margin-top: 0.25rem !important; + } + .mr-xl-1, + .mx-xl-1 { + margin-right: 0.25rem !important; + } + .mb-xl-1, + .my-xl-1 { + margin-bottom: 0.25rem !important; + } + .ml-xl-1, + .mx-xl-1 { + margin-left: 0.25rem !important; + } + .m-xl-2 { + margin: 0.5rem !important; + } + .mt-xl-2, + .my-xl-2 { + margin-top: 0.5rem !important; + } + .mr-xl-2, + .mx-xl-2 { + margin-right: 0.5rem !important; + } + .mb-xl-2, + .my-xl-2 { + margin-bottom: 0.5rem !important; + } + .ml-xl-2, + .mx-xl-2 { + margin-left: 0.5rem !important; + } + .m-xl-3 { + margin: 1rem !important; + } + .mt-xl-3, + .my-xl-3 { + margin-top: 1rem !important; + } + .mr-xl-3, + .mx-xl-3 { + margin-right: 1rem !important; + } + .mb-xl-3, + .my-xl-3 { + margin-bottom: 1rem !important; + } + .ml-xl-3, + .mx-xl-3 { + margin-left: 1rem !important; + } + .m-xl-4 { + margin: 1.5rem !important; + } + .mt-xl-4, + .my-xl-4 { + margin-top: 1.5rem !important; + } + .mr-xl-4, + .mx-xl-4 { + margin-right: 1.5rem !important; + } + .mb-xl-4, + .my-xl-4 { + margin-bottom: 1.5rem !important; + } + .ml-xl-4, + .mx-xl-4 { + margin-left: 1.5rem !important; + } + .m-xl-5 { + margin: 3rem !important; + } + .mt-xl-5, + .my-xl-5 { + margin-top: 3rem !important; + } + .mr-xl-5, + .mx-xl-5 { + margin-right: 3rem !important; + } + .mb-xl-5, + .my-xl-5 { + margin-bottom: 3rem !important; + } + .ml-xl-5, + .mx-xl-5 { + margin-left: 3rem !important; + } + .p-xl-0 { + padding: 0 !important; + } + .pt-xl-0, + .py-xl-0 { + padding-top: 0 !important; + } + .pr-xl-0, + .px-xl-0 { + padding-right: 0 !important; + } + .pb-xl-0, + .py-xl-0 { + padding-bottom: 0 !important; + } + .pl-xl-0, + .px-xl-0 { + padding-left: 0 !important; + } + .p-xl-1 { + padding: 0.25rem !important; + } + .pt-xl-1, + .py-xl-1 { + padding-top: 0.25rem !important; + } + .pr-xl-1, + .px-xl-1 { + padding-right: 0.25rem !important; + } + .pb-xl-1, + .py-xl-1 { + padding-bottom: 0.25rem !important; + } + .pl-xl-1, + .px-xl-1 { + padding-left: 0.25rem !important; + } + .p-xl-2 { + padding: 0.5rem !important; + } + .pt-xl-2, + .py-xl-2 { + padding-top: 0.5rem !important; + } + .pr-xl-2, + .px-xl-2 { + padding-right: 0.5rem !important; + } + .pb-xl-2, + .py-xl-2 { + padding-bottom: 0.5rem !important; + } + .pl-xl-2, + .px-xl-2 { + padding-left: 0.5rem !important; + } + .p-xl-3 { + padding: 1rem !important; + } + .pt-xl-3, + .py-xl-3 { + padding-top: 1rem !important; + } + .pr-xl-3, + .px-xl-3 { + padding-right: 1rem !important; + } + .pb-xl-3, + .py-xl-3 { + padding-bottom: 1rem !important; + } + .pl-xl-3, + .px-xl-3 { + padding-left: 1rem !important; + } + .p-xl-4 { + padding: 1.5rem !important; + } + .pt-xl-4, + .py-xl-4 { + padding-top: 1.5rem !important; + } + .pr-xl-4, + .px-xl-4 { + padding-right: 1.5rem !important; + } + .pb-xl-4, + .py-xl-4 { + padding-bottom: 1.5rem !important; + } + .pl-xl-4, + .px-xl-4 { + padding-left: 1.5rem !important; + } + .p-xl-5 { + padding: 3rem !important; + } + .pt-xl-5, + .py-xl-5 { + padding-top: 3rem !important; + } + .pr-xl-5, + .px-xl-5 { + padding-right: 3rem !important; + } + .pb-xl-5, + .py-xl-5 { + padding-bottom: 3rem !important; + } + .pl-xl-5, + .px-xl-5 { + padding-left: 3rem !important; + } + .m-xl-n1 { + margin: -0.25rem !important; + } + .mt-xl-n1, + .my-xl-n1 { + margin-top: -0.25rem !important; + } + .mr-xl-n1, + .mx-xl-n1 { + margin-right: -0.25rem !important; + } + .mb-xl-n1, + .my-xl-n1 { + margin-bottom: -0.25rem !important; + } + .ml-xl-n1, + .mx-xl-n1 { + margin-left: -0.25rem !important; + } + .m-xl-n2 { + margin: -0.5rem !important; + } + .mt-xl-n2, + .my-xl-n2 { + margin-top: -0.5rem !important; + } + .mr-xl-n2, + .mx-xl-n2 { + margin-right: -0.5rem !important; + } + .mb-xl-n2, + .my-xl-n2 { + margin-bottom: -0.5rem !important; + } + .ml-xl-n2, + .mx-xl-n2 { + margin-left: -0.5rem !important; + } + .m-xl-n3 { + margin: -1rem !important; + } + .mt-xl-n3, + .my-xl-n3 { + margin-top: -1rem !important; + } + .mr-xl-n3, + .mx-xl-n3 { + margin-right: -1rem !important; + } + .mb-xl-n3, + .my-xl-n3 { + margin-bottom: -1rem !important; + } + .ml-xl-n3, + .mx-xl-n3 { + margin-left: -1rem !important; + } + .m-xl-n4 { + margin: -1.5rem !important; + } + .mt-xl-n4, + .my-xl-n4 { + margin-top: -1.5rem !important; + } + .mr-xl-n4, + .mx-xl-n4 { + margin-right: -1.5rem !important; + } + .mb-xl-n4, + .my-xl-n4 { + margin-bottom: -1.5rem !important; + } + .ml-xl-n4, + .mx-xl-n4 { + margin-left: -1.5rem !important; + } + .m-xl-n5 { + margin: -3rem !important; + } + .mt-xl-n5, + .my-xl-n5 { + margin-top: -3rem !important; + } + .mr-xl-n5, + .mx-xl-n5 { + margin-right: -3rem !important; + } + .mb-xl-n5, + .my-xl-n5 { + margin-bottom: -3rem !important; + } + .ml-xl-n5, + .mx-xl-n5 { + margin-left: -3rem !important; + } + .m-xl-auto { + margin: auto !important; + } + .mt-xl-auto, + .my-xl-auto { + margin-top: auto !important; + } + .mr-xl-auto, + .mx-xl-auto { + margin-right: auto !important; + } + .mb-xl-auto, + .my-xl-auto { + margin-bottom: auto !important; + } + .ml-xl-auto, + .mx-xl-auto { + margin-left: auto !important; + } +} + +.text-monospace { + font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace !important; +} + +.text-justify { + text-align: justify !important; +} + +.text-wrap { + white-space: normal !important; +} + +.text-nowrap { + white-space: nowrap !important; +} + +.text-truncate { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.text-left { + text-align: left !important; +} + +.text-right { + text-align: right !important; +} + +.text-center { + text-align: center !important; +} + +@media (min-width: 576px) { + .text-sm-left { + text-align: left !important; + } + .text-sm-right { + text-align: right !important; + } + .text-sm-center { + text-align: center !important; + } +} + +@media (min-width: 768px) { + .text-md-left { + text-align: left !important; + } + .text-md-right { + text-align: right !important; + } + .text-md-center { + text-align: center !important; + } +} + +@media (min-width: 992px) { + .text-lg-left { + text-align: left !important; + } + .text-lg-right { + text-align: right !important; + } + .text-lg-center { + text-align: center !important; + } +} + +@media (min-width: 1200px) { + .text-xl-left { + text-align: left !important; + } + .text-xl-right { + text-align: right !important; + } + .text-xl-center { + text-align: center !important; + } +} + +.text-lowercase { + text-transform: lowercase !important; +} + +.text-uppercase { + text-transform: uppercase !important; +} + +.text-capitalize { + text-transform: capitalize !important; +} + +.font-weight-light { + font-weight: 300 !important; +} + +.font-weight-lighter { + font-weight: lighter !important; +} + +.font-weight-normal { + font-weight: 400 !important; +} + +.font-weight-bold { + font-weight: 700 !important; +} + +.font-weight-bolder { + font-weight: bolder !important; +} + +.font-italic { + font-style: italic !important; +} + +.text-white { + color: #fff !important; +} + +.text-primary { + color: #007bff !important; +} + +a.text-primary:hover, a.text-primary:focus { + color: #0056b3 !important; +} + +.text-secondary { + color: #6c757d !important; +} + +a.text-secondary:hover, a.text-secondary:focus { + color: #494f54 !important; +} + +.text-success { + color: #28a745 !important; +} + +a.text-success:hover, a.text-success:focus { + color: #19692c !important; +} + +.text-info { + color: #17a2b8 !important; +} + +a.text-info:hover, a.text-info:focus { + color: #0f6674 !important; +} + +.text-warning { + color: #ffc107 !important; +} + +a.text-warning:hover, a.text-warning:focus { + color: #ba8b00 !important; +} + +.text-danger { + color: #dc3545 !important; +} + +a.text-danger:hover, a.text-danger:focus { + color: #a71d2a !important; +} + +.text-light { + color: #f8f9fa !important; +} + +a.text-light:hover, a.text-light:focus { + color: #cbd3da !important; +} + +.text-dark { + color: #343a40 !important; +} + +a.text-dark:hover, a.text-dark:focus { + color: #121416 !important; +} + +.text-body { + color: #212529 !important; +} + +.text-muted { + color: #6c757d !important; +} + +.text-black-50 { + color: rgba(0, 0, 0, 0.5) !important; +} + +.text-white-50 { + color: rgba(255, 255, 255, 0.5) !important; +} + +.text-hide { + font: 0/0 a; + color: transparent; + text-shadow: none; + background-color: transparent; + border: 0; +} + +.text-decoration-none { + text-decoration: none !important; +} + +.text-break { + word-break: break-word !important; + overflow-wrap: break-word !important; +} + +.text-reset { + color: inherit !important; +} + +.visible { + visibility: visible !important; +} + +.invisible { + visibility: hidden !important; +} + +@media print { + *, + *::before, + *::after { + text-shadow: none !important; + box-shadow: none !important; + } + a:not(.btn) { + text-decoration: underline; + } + abbr[title]::after { + content: " (" attr(title) ")"; + } + pre { + white-space: pre-wrap !important; + } + pre, + blockquote { + border: 1px solid #adb5bd; + page-break-inside: avoid; + } + thead { + display: table-header-group; + } + tr, + img { + page-break-inside: avoid; + } + @page { + size: a3; + } + body { + min-width: 992px !important; + } + .container { + min-width: 992px !important; + } + .navbar { + display: none; + } + .badge { + border: 1px solid #000; + } + .table { + border-collapse: collapse !important; + } + .table td, + .table th { + background-color: #fff !important; + } + .table-bordered th, + .table-bordered td { + border: 1px solid #dee2e6 !important; + } + .table-dark { + color: inherit; + } + .table-dark th, + .table-dark td, + .table-dark thead th, + .table-dark tbody + tbody { + border-color: #dee2e6; + } + .table .thead-dark th { + color: inherit; + border-color: #dee2e6; + } +} +/*# sourceMappingURL=bootstrap.css.map */ \ No newline at end of file diff --git a/preview-fall2024-info/assets/css/research-style.css b/preview-fall2024-info/assets/css/research-style.css new file mode 100644 index 000000000..880660a5a --- /dev/null +++ b/preview-fall2024-info/assets/css/research-style.css @@ -0,0 +1,34 @@ +// This is the old styles that are important yet +code.term { + border: 0px solid #303030; + border-bottom: 0px solid #323232; + border-top: 0px; + border-left: 0px solid #323232; + padding: 0.5em 0.5em; +} + +pre { + border-left: 5px solid #b70101; + padding: 0.5em 1.2em; +} + +pre.term { + border-left: 0px solid #323232; + background: #484848; + color: #F0F0F0; +} + +pre.sub { + border-left: 5px solid #b70101; + padding: 0.5em 1.2em; +} + +pre.file { + border-left: 5px solid #0D4F8B; + padding: 0.5em 1.2em; +} + +pre.other { + border-left: 0px solid #0D4F8B; + padding: 0.5em 1.2em; +} \ No newline at end of file diff --git a/preview-fall2024-info/assets/css/style-v10.css b/preview-fall2024-info/assets/css/style-v10.css new file mode 100644 index 000000000..d1b51df49 --- /dev/null +++ b/preview-fall2024-info/assets/css/style-v10.css @@ -0,0 +1,9 @@ +/*! + * Bootstrap v5.0.2 (https://getbootstrap.com/) + * Copyright 2011-2021 The Bootstrap Authors + * Copyright 2011-2021 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */:root{--bs-blue: #0d6efd;--bs-indigo: #6610f2;--bs-purple: #6f42c1;--bs-pink: #d63384;--bs-red: #dc3545;--bs-orange: #fd7e14;--bs-yellow: #ffc107;--bs-green: #198754;--bs-teal: #20c997;--bs-cyan: #0dcaf0;--bs-white: #fff;--bs-gray: #6c757d;--bs-gray-dark: #343a40;--bs-primary: #c5050c;--bs-secondary: #6c757d;--bs-success: #198754;--bs-info: #0dcaf0;--bs-warning: #ffc107;--bs-danger: #dc3545;--bs-light: #f8f9fa;--bs-dark: #212529;--bs-font-sans-serif: system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", "Liberation Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";--bs-font-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;--bs-gradient: linear-gradient(180deg, rgba(255,255,255,0.15), rgba(255,255,255,0))}*,*::before,*::after{box-sizing:border-box}@media (prefers-reduced-motion: no-preference){:root{scroll-behavior:smooth}}body{margin:0;font-family:var(--bs-font-sans-serif);font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:rgba(0,0,0,0)}hr{margin:1rem 0;color:inherit;background-color:currentColor;border:0;opacity:.25}hr:not([size]){height:1px}h6,.h6,h5,.h5,h4,.h4,h3,.h3,h2,.h2,h1,.h1{margin-top:0;margin-bottom:.5rem;font-weight:500;line-height:1.2}h1,.h1{font-size:calc(1.375rem + 1.5vw)}@media (min-width: 1200px){h1,.h1{font-size:2.5rem}}h2,.h2{font-size:calc(1.325rem + .9vw)}@media (min-width: 1200px){h2,.h2{font-size:2rem}}h3,.h3{font-size:calc(1.3rem + .6vw)}@media (min-width: 1200px){h3,.h3{font-size:1.75rem}}h4,.h4{font-size:calc(1.275rem + .3vw)}@media (min-width: 1200px){h4,.h4{font-size:1.5rem}}h5,.h5{font-size:1.25rem}h6,.h6{font-size:1rem}p{margin-top:0;margin-bottom:1rem}abbr[title],abbr[data-bs-original-title]{text-decoration:underline dotted;cursor:help;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul{padding-left:2rem}ol,ul,dl{margin-top:0;margin-bottom:1rem}ol ol,ul ul,ol ul,ul ol{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small,.small{font-size:.875em}mark,.mark{padding:.2em;background-color:#fcf8e3}sub,sup{position:relative;font-size:.75em;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#c5050c;text-decoration:underline}a:hover{color:#9e040a}a:not([href]):not([class]),a:not([href]):not([class]):hover{color:inherit;text-decoration:none}pre,code,kbd,samp{font-family:var(--bs-font-monospace);font-size:1em;direction:ltr /* rtl:ignore */;unicode-bidi:bidi-override}pre{display:block;margin-top:0;margin-bottom:1rem;overflow:auto;font-size:.875em}pre code{font-size:inherit;color:inherit;word-break:normal}code{font-size:.875em;color:#d63384;word-wrap:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:.875em;color:#fff;background-color:#212529;border-radius:.2rem}kbd kbd{padding:0;font-size:1em;font-weight:700}figure{margin:0 0 1rem}img,svg{vertical-align:middle}table{caption-side:bottom;border-collapse:collapse}caption{padding-top:.5rem;padding-bottom:.5rem;color:#6c757d;text-align:left}th{text-align:inherit;text-align:-webkit-match-parent}thead,tbody,tfoot,tr,td,th{border-color:inherit;border-style:solid;border-width:0}label{display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}input,button,select,optgroup,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,select{text-transform:none}[role="button"]{cursor:pointer}select{word-wrap:normal}select:disabled{opacity:1}[list]::-webkit-calendar-picker-indicator{display:none}button,[type="button"],[type="reset"],[type="submit"]{-webkit-appearance:button}button:not(:disabled),[type="button"]:not(:disabled),[type="reset"]:not(:disabled),[type="submit"]:not(:disabled){cursor:pointer}::-moz-focus-inner{padding:0;border-style:none}textarea{resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{float:left;width:100%;padding:0;margin-bottom:.5rem;font-size:calc(1.275rem + .3vw);line-height:inherit}@media (min-width: 1200px){legend{font-size:1.5rem}}legend+*{clear:left}::-webkit-datetime-edit-fields-wrapper,::-webkit-datetime-edit-text,::-webkit-datetime-edit-minute,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-year-field{padding:0}::-webkit-inner-spin-button{height:auto}[type="search"]{outline-offset:-2px;-webkit-appearance:textfield}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-color-swatch-wrapper{padding:0}::file-selector-button{font:inherit}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}iframe{border:0}summary{display:list-item;cursor:pointer}progress{vertical-align:baseline}[hidden]{display:none !important}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:calc(1.625rem + 4.5vw);font-weight:300;line-height:1.2}@media (min-width: 1200px){.display-1{font-size:5rem}}.display-2{font-size:calc(1.575rem + 3.9vw);font-weight:300;line-height:1.2}@media (min-width: 1200px){.display-2{font-size:4.5rem}}.display-3{font-size:calc(1.525rem + 3.3vw);font-weight:300;line-height:1.2}@media (min-width: 1200px){.display-3{font-size:4rem}}.display-4{font-size:calc(1.475rem + 2.7vw);font-weight:300;line-height:1.2}@media (min-width: 1200px){.display-4{font-size:3.5rem}}.display-5{font-size:calc(1.425rem + 2.1vw);font-weight:300;line-height:1.2}@media (min-width: 1200px){.display-5{font-size:3rem}}.display-6{font-size:calc(1.375rem + 1.5vw);font-weight:300;line-height:1.2}@media (min-width: 1200px){.display-6{font-size:2.5rem}}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:.875em;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote>:last-child{margin-bottom:0}.blockquote-footer{margin-top:-1rem;margin-bottom:1rem;font-size:.875em;color:#6c757d}.blockquote-footer::before{content:"\2014\00A0"}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:.875em;color:#6c757d}.container,.container-fluid,.container-xxl,.container-xl,.container-lg,.container-md,.container-sm{width:100%;padding-right:var(--bs-gutter-x, .75rem);padding-left:var(--bs-gutter-x, .75rem);margin-right:auto;margin-left:auto}@media (min-width: 576px){.container-sm,.container{max-width:540px}}@media (min-width: 768px){.container-md,.container-sm,.container{max-width:720px}}@media (min-width: 992px){.container-lg,.container-md,.container-sm,.container{max-width:960px}}@media (min-width: 1200px){.container-xl,.container-lg,.container-md,.container-sm,.container{max-width:1140px}}@media (min-width: 1400px){.container-xxl,.container-xl,.container-lg,.container-md,.container-sm,.container{max-width:1320px}}.row{--bs-gutter-x: 1.5rem;--bs-gutter-y: 0;display:flex;flex-wrap:wrap;margin-top:calc(var(--bs-gutter-y) * -1);margin-right:calc(var(--bs-gutter-x) * -.5);margin-left:calc(var(--bs-gutter-x) * -.5)}.row>*{flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--bs-gutter-x) * .5);padding-left:calc(var(--bs-gutter-x) * .5);margin-top:var(--bs-gutter-y)}.col{flex:1 0 0%}.row-cols-auto>*{flex:0 0 auto;width:auto}.row-cols-1>*{flex:0 0 auto;width:100%}.row-cols-2>*{flex:0 0 auto;width:50%}.row-cols-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-4>*{flex:0 0 auto;width:25%}.row-cols-5>*{flex:0 0 auto;width:20%}.row-cols-6>*{flex:0 0 auto;width:16.6666666667%}@media (min-width: 576px){.col-sm{flex:1 0 0%}.row-cols-sm-auto>*{flex:0 0 auto;width:auto}.row-cols-sm-1>*{flex:0 0 auto;width:100%}.row-cols-sm-2>*{flex:0 0 auto;width:50%}.row-cols-sm-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-sm-4>*{flex:0 0 auto;width:25%}.row-cols-sm-5>*{flex:0 0 auto;width:20%}.row-cols-sm-6>*{flex:0 0 auto;width:16.6666666667%}}@media (min-width: 768px){.col-md{flex:1 0 0%}.row-cols-md-auto>*{flex:0 0 auto;width:auto}.row-cols-md-1>*{flex:0 0 auto;width:100%}.row-cols-md-2>*{flex:0 0 auto;width:50%}.row-cols-md-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-md-4>*{flex:0 0 auto;width:25%}.row-cols-md-5>*{flex:0 0 auto;width:20%}.row-cols-md-6>*{flex:0 0 auto;width:16.6666666667%}}@media (min-width: 992px){.col-lg{flex:1 0 0%}.row-cols-lg-auto>*{flex:0 0 auto;width:auto}.row-cols-lg-1>*{flex:0 0 auto;width:100%}.row-cols-lg-2>*{flex:0 0 auto;width:50%}.row-cols-lg-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-lg-4>*{flex:0 0 auto;width:25%}.row-cols-lg-5>*{flex:0 0 auto;width:20%}.row-cols-lg-6>*{flex:0 0 auto;width:16.6666666667%}}@media (min-width: 1200px){.col-xl{flex:1 0 0%}.row-cols-xl-auto>*{flex:0 0 auto;width:auto}.row-cols-xl-1>*{flex:0 0 auto;width:100%}.row-cols-xl-2>*{flex:0 0 auto;width:50%}.row-cols-xl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xl-4>*{flex:0 0 auto;width:25%}.row-cols-xl-5>*{flex:0 0 auto;width:20%}.row-cols-xl-6>*{flex:0 0 auto;width:16.6666666667%}}@media (min-width: 1400px){.col-xxl{flex:1 0 0%}.row-cols-xxl-auto>*{flex:0 0 auto;width:auto}.row-cols-xxl-1>*{flex:0 0 auto;width:100%}.row-cols-xxl-2>*{flex:0 0 auto;width:50%}.row-cols-xxl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xxl-4>*{flex:0 0 auto;width:25%}.row-cols-xxl-5>*{flex:0 0 auto;width:20%}.row-cols-xxl-6>*{flex:0 0 auto;width:16.6666666667%}}.col-auto{flex:0 0 auto;width:auto}.col-1{flex:0 0 auto;width:8.33333333%}.col-2{flex:0 0 auto;width:16.66666667%}.col-3{flex:0 0 auto;width:25%}.col-4{flex:0 0 auto;width:33.33333333%}.col-5{flex:0 0 auto;width:41.66666667%}.col-6{flex:0 0 auto;width:50%}.col-7{flex:0 0 auto;width:58.33333333%}.col-8{flex:0 0 auto;width:66.66666667%}.col-9{flex:0 0 auto;width:75%}.col-10{flex:0 0 auto;width:83.33333333%}.col-11{flex:0 0 auto;width:91.66666667%}.col-12{flex:0 0 auto;width:100%}.offset-1{margin-left:8.33333333%}.offset-2{margin-left:16.66666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.33333333%}.offset-5{margin-left:41.66666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.33333333%}.offset-8{margin-left:66.66666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.33333333%}.offset-11{margin-left:91.66666667%}.g-0,.gx-0{--bs-gutter-x: 0}.g-0,.gy-0{--bs-gutter-y: 0}.g-1,.gx-1{--bs-gutter-x: .25rem}.g-1,.gy-1{--bs-gutter-y: .25rem}.g-2,.gx-2{--bs-gutter-x: .5rem}.g-2,.gy-2{--bs-gutter-y: .5rem}.g-3,.gx-3{--bs-gutter-x: 1rem}.g-3,.gy-3{--bs-gutter-y: 1rem}.g-4,.gx-4{--bs-gutter-x: 1.5rem}.g-4,.gy-4{--bs-gutter-y: 1.5rem}.g-5,.gx-5{--bs-gutter-x: 3rem}.g-5,.gy-5{--bs-gutter-y: 3rem}@media (min-width: 576px){.col-sm-auto{flex:0 0 auto;width:auto}.col-sm-1{flex:0 0 auto;width:8.33333333%}.col-sm-2{flex:0 0 auto;width:16.66666667%}.col-sm-3{flex:0 0 auto;width:25%}.col-sm-4{flex:0 0 auto;width:33.33333333%}.col-sm-5{flex:0 0 auto;width:41.66666667%}.col-sm-6{flex:0 0 auto;width:50%}.col-sm-7{flex:0 0 auto;width:58.33333333%}.col-sm-8{flex:0 0 auto;width:66.66666667%}.col-sm-9{flex:0 0 auto;width:75%}.col-sm-10{flex:0 0 auto;width:83.33333333%}.col-sm-11{flex:0 0 auto;width:91.66666667%}.col-sm-12{flex:0 0 auto;width:100%}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.33333333%}.offset-sm-2{margin-left:16.66666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.33333333%}.offset-sm-5{margin-left:41.66666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.33333333%}.offset-sm-8{margin-left:66.66666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.33333333%}.offset-sm-11{margin-left:91.66666667%}.g-sm-0,.gx-sm-0{--bs-gutter-x: 0}.g-sm-0,.gy-sm-0{--bs-gutter-y: 0}.g-sm-1,.gx-sm-1{--bs-gutter-x: .25rem}.g-sm-1,.gy-sm-1{--bs-gutter-y: .25rem}.g-sm-2,.gx-sm-2{--bs-gutter-x: .5rem}.g-sm-2,.gy-sm-2{--bs-gutter-y: .5rem}.g-sm-3,.gx-sm-3{--bs-gutter-x: 1rem}.g-sm-3,.gy-sm-3{--bs-gutter-y: 1rem}.g-sm-4,.gx-sm-4{--bs-gutter-x: 1.5rem}.g-sm-4,.gy-sm-4{--bs-gutter-y: 1.5rem}.g-sm-5,.gx-sm-5{--bs-gutter-x: 3rem}.g-sm-5,.gy-sm-5{--bs-gutter-y: 3rem}}@media (min-width: 768px){.col-md-auto{flex:0 0 auto;width:auto}.col-md-1{flex:0 0 auto;width:8.33333333%}.col-md-2{flex:0 0 auto;width:16.66666667%}.col-md-3{flex:0 0 auto;width:25%}.col-md-4{flex:0 0 auto;width:33.33333333%}.col-md-5{flex:0 0 auto;width:41.66666667%}.col-md-6{flex:0 0 auto;width:50%}.col-md-7{flex:0 0 auto;width:58.33333333%}.col-md-8{flex:0 0 auto;width:66.66666667%}.col-md-9{flex:0 0 auto;width:75%}.col-md-10{flex:0 0 auto;width:83.33333333%}.col-md-11{flex:0 0 auto;width:91.66666667%}.col-md-12{flex:0 0 auto;width:100%}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.33333333%}.offset-md-2{margin-left:16.66666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.33333333%}.offset-md-5{margin-left:41.66666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.33333333%}.offset-md-8{margin-left:66.66666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.33333333%}.offset-md-11{margin-left:91.66666667%}.g-md-0,.gx-md-0{--bs-gutter-x: 0}.g-md-0,.gy-md-0{--bs-gutter-y: 0}.g-md-1,.gx-md-1{--bs-gutter-x: .25rem}.g-md-1,.gy-md-1{--bs-gutter-y: .25rem}.g-md-2,.gx-md-2{--bs-gutter-x: .5rem}.g-md-2,.gy-md-2{--bs-gutter-y: .5rem}.g-md-3,.gx-md-3{--bs-gutter-x: 1rem}.g-md-3,.gy-md-3{--bs-gutter-y: 1rem}.g-md-4,.gx-md-4{--bs-gutter-x: 1.5rem}.g-md-4,.gy-md-4{--bs-gutter-y: 1.5rem}.g-md-5,.gx-md-5{--bs-gutter-x: 3rem}.g-md-5,.gy-md-5{--bs-gutter-y: 3rem}}@media (min-width: 992px){.col-lg-auto{flex:0 0 auto;width:auto}.col-lg-1{flex:0 0 auto;width:8.33333333%}.col-lg-2{flex:0 0 auto;width:16.66666667%}.col-lg-3{flex:0 0 auto;width:25%}.col-lg-4{flex:0 0 auto;width:33.33333333%}.col-lg-5{flex:0 0 auto;width:41.66666667%}.col-lg-6{flex:0 0 auto;width:50%}.col-lg-7{flex:0 0 auto;width:58.33333333%}.col-lg-8{flex:0 0 auto;width:66.66666667%}.col-lg-9{flex:0 0 auto;width:75%}.col-lg-10{flex:0 0 auto;width:83.33333333%}.col-lg-11{flex:0 0 auto;width:91.66666667%}.col-lg-12{flex:0 0 auto;width:100%}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.33333333%}.offset-lg-2{margin-left:16.66666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.33333333%}.offset-lg-5{margin-left:41.66666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.33333333%}.offset-lg-8{margin-left:66.66666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.33333333%}.offset-lg-11{margin-left:91.66666667%}.g-lg-0,.gx-lg-0{--bs-gutter-x: 0}.g-lg-0,.gy-lg-0{--bs-gutter-y: 0}.g-lg-1,.gx-lg-1{--bs-gutter-x: .25rem}.g-lg-1,.gy-lg-1{--bs-gutter-y: .25rem}.g-lg-2,.gx-lg-2{--bs-gutter-x: .5rem}.g-lg-2,.gy-lg-2{--bs-gutter-y: .5rem}.g-lg-3,.gx-lg-3{--bs-gutter-x: 1rem}.g-lg-3,.gy-lg-3{--bs-gutter-y: 1rem}.g-lg-4,.gx-lg-4{--bs-gutter-x: 1.5rem}.g-lg-4,.gy-lg-4{--bs-gutter-y: 1.5rem}.g-lg-5,.gx-lg-5{--bs-gutter-x: 3rem}.g-lg-5,.gy-lg-5{--bs-gutter-y: 3rem}}@media (min-width: 1200px){.col-xl-auto{flex:0 0 auto;width:auto}.col-xl-1{flex:0 0 auto;width:8.33333333%}.col-xl-2{flex:0 0 auto;width:16.66666667%}.col-xl-3{flex:0 0 auto;width:25%}.col-xl-4{flex:0 0 auto;width:33.33333333%}.col-xl-5{flex:0 0 auto;width:41.66666667%}.col-xl-6{flex:0 0 auto;width:50%}.col-xl-7{flex:0 0 auto;width:58.33333333%}.col-xl-8{flex:0 0 auto;width:66.66666667%}.col-xl-9{flex:0 0 auto;width:75%}.col-xl-10{flex:0 0 auto;width:83.33333333%}.col-xl-11{flex:0 0 auto;width:91.66666667%}.col-xl-12{flex:0 0 auto;width:100%}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.33333333%}.offset-xl-2{margin-left:16.66666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.33333333%}.offset-xl-5{margin-left:41.66666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.33333333%}.offset-xl-8{margin-left:66.66666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.33333333%}.offset-xl-11{margin-left:91.66666667%}.g-xl-0,.gx-xl-0{--bs-gutter-x: 0}.g-xl-0,.gy-xl-0{--bs-gutter-y: 0}.g-xl-1,.gx-xl-1{--bs-gutter-x: .25rem}.g-xl-1,.gy-xl-1{--bs-gutter-y: .25rem}.g-xl-2,.gx-xl-2{--bs-gutter-x: .5rem}.g-xl-2,.gy-xl-2{--bs-gutter-y: .5rem}.g-xl-3,.gx-xl-3{--bs-gutter-x: 1rem}.g-xl-3,.gy-xl-3{--bs-gutter-y: 1rem}.g-xl-4,.gx-xl-4{--bs-gutter-x: 1.5rem}.g-xl-4,.gy-xl-4{--bs-gutter-y: 1.5rem}.g-xl-5,.gx-xl-5{--bs-gutter-x: 3rem}.g-xl-5,.gy-xl-5{--bs-gutter-y: 3rem}}@media (min-width: 1400px){.col-xxl-auto{flex:0 0 auto;width:auto}.col-xxl-1{flex:0 0 auto;width:8.33333333%}.col-xxl-2{flex:0 0 auto;width:16.66666667%}.col-xxl-3{flex:0 0 auto;width:25%}.col-xxl-4{flex:0 0 auto;width:33.33333333%}.col-xxl-5{flex:0 0 auto;width:41.66666667%}.col-xxl-6{flex:0 0 auto;width:50%}.col-xxl-7{flex:0 0 auto;width:58.33333333%}.col-xxl-8{flex:0 0 auto;width:66.66666667%}.col-xxl-9{flex:0 0 auto;width:75%}.col-xxl-10{flex:0 0 auto;width:83.33333333%}.col-xxl-11{flex:0 0 auto;width:91.66666667%}.col-xxl-12{flex:0 0 auto;width:100%}.offset-xxl-0{margin-left:0}.offset-xxl-1{margin-left:8.33333333%}.offset-xxl-2{margin-left:16.66666667%}.offset-xxl-3{margin-left:25%}.offset-xxl-4{margin-left:33.33333333%}.offset-xxl-5{margin-left:41.66666667%}.offset-xxl-6{margin-left:50%}.offset-xxl-7{margin-left:58.33333333%}.offset-xxl-8{margin-left:66.66666667%}.offset-xxl-9{margin-left:75%}.offset-xxl-10{margin-left:83.33333333%}.offset-xxl-11{margin-left:91.66666667%}.g-xxl-0,.gx-xxl-0{--bs-gutter-x: 0}.g-xxl-0,.gy-xxl-0{--bs-gutter-y: 0}.g-xxl-1,.gx-xxl-1{--bs-gutter-x: .25rem}.g-xxl-1,.gy-xxl-1{--bs-gutter-y: .25rem}.g-xxl-2,.gx-xxl-2{--bs-gutter-x: .5rem}.g-xxl-2,.gy-xxl-2{--bs-gutter-y: .5rem}.g-xxl-3,.gx-xxl-3{--bs-gutter-x: 1rem}.g-xxl-3,.gy-xxl-3{--bs-gutter-y: 1rem}.g-xxl-4,.gx-xxl-4{--bs-gutter-x: 1.5rem}.g-xxl-4,.gy-xxl-4{--bs-gutter-y: 1.5rem}.g-xxl-5,.gx-xxl-5{--bs-gutter-x: 3rem}.g-xxl-5,.gy-xxl-5{--bs-gutter-y: 3rem}}.table{--bs-table-bg: rgba(0,0,0,0);--bs-table-accent-bg: rgba(0,0,0,0);--bs-table-striped-color: #212529;--bs-table-striped-bg: rgba(0,0,0,0.05);--bs-table-active-color: #212529;--bs-table-active-bg: rgba(0,0,0,0.1);--bs-table-hover-color: #212529;--bs-table-hover-bg: rgba(0,0,0,0.075);width:100%;margin-bottom:1rem;color:#212529;vertical-align:top;border-color:#dee2e6}.table>:not(caption)>*>*{padding:.5rem .5rem;background-color:var(--bs-table-bg);border-bottom-width:1px;box-shadow:inset 0 0 0 9999px var(--bs-table-accent-bg)}.table>tbody{vertical-align:inherit}.table>thead{vertical-align:bottom}.table>:not(:last-child)>:last-child>*{border-bottom-color:currentColor}.caption-top{caption-side:top}.table-sm>:not(caption)>*>*{padding:.25rem .25rem}.table-bordered>:not(caption)>*{border-width:1px 0}.table-bordered>:not(caption)>*>*{border-width:0 1px}.table-borderless>:not(caption)>*>*{border-bottom-width:0}.table-striped>tbody>tr:nth-of-type(odd){--bs-table-accent-bg: var(--bs-table-striped-bg);color:var(--bs-table-striped-color)}.table-active{--bs-table-accent-bg: var(--bs-table-active-bg);color:var(--bs-table-active-color)}.table-hover>tbody>tr:hover{--bs-table-accent-bg: var(--bs-table-hover-bg);color:var(--bs-table-hover-color)}.table-primary{--bs-table-bg: #f3cdce;--bs-table-striped-bg: #e7c3c4;--bs-table-striped-color: #000;--bs-table-active-bg: #dbb9b9;--bs-table-active-color: #000;--bs-table-hover-bg: #e1bebf;--bs-table-hover-color: #000;color:#000;border-color:#dbb9b9}.table-secondary{--bs-table-bg: #e2e3e5;--bs-table-striped-bg: #d7d8da;--bs-table-striped-color: #000;--bs-table-active-bg: #cbccce;--bs-table-active-color: #000;--bs-table-hover-bg: #d1d2d4;--bs-table-hover-color: #000;color:#000;border-color:#cbccce}.table-success{--bs-table-bg: #d1e7dd;--bs-table-striped-bg: #c7dbd2;--bs-table-striped-color: #000;--bs-table-active-bg: #bcd0c7;--bs-table-active-color: #000;--bs-table-hover-bg: #c1d6cc;--bs-table-hover-color: #000;color:#000;border-color:#bcd0c7}.table-info{--bs-table-bg: #cff4fc;--bs-table-striped-bg: #c5e8ef;--bs-table-striped-color: #000;--bs-table-active-bg: #badce3;--bs-table-active-color: #000;--bs-table-hover-bg: #bfe2e9;--bs-table-hover-color: #000;color:#000;border-color:#badce3}.table-warning{--bs-table-bg: #fff3cd;--bs-table-striped-bg: #f2e7c3;--bs-table-striped-color: #000;--bs-table-active-bg: #e6dbb9;--bs-table-active-color: #000;--bs-table-hover-bg: #ece1be;--bs-table-hover-color: #000;color:#000;border-color:#e6dbb9}.table-danger{--bs-table-bg: #f8d7da;--bs-table-striped-bg: #eccccf;--bs-table-striped-color: #000;--bs-table-active-bg: #dfc2c4;--bs-table-active-color: #000;--bs-table-hover-bg: #e5c7ca;--bs-table-hover-color: #000;color:#000;border-color:#dfc2c4}.table-light{--bs-table-bg: #f8f9fa;--bs-table-striped-bg: #ecedee;--bs-table-striped-color: #000;--bs-table-active-bg: #dfe0e1;--bs-table-active-color: #000;--bs-table-hover-bg: #e5e6e7;--bs-table-hover-color: #000;color:#000;border-color:#dfe0e1}.table-dark{--bs-table-bg: #212529;--bs-table-striped-bg: #2c3034;--bs-table-striped-color: #fff;--bs-table-active-bg: #373b3e;--bs-table-active-color: #fff;--bs-table-hover-bg: #323539;--bs-table-hover-color: #fff;color:#fff;border-color:#373b3e}.table-responsive{overflow-x:auto;-webkit-overflow-scrolling:touch}@media (max-width: 575.98px){.table-responsive-sm{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width: 767.98px){.table-responsive-md{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width: 991.98px){.table-responsive-lg{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width: 1199.98px){.table-responsive-xl{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width: 1399.98px){.table-responsive-xxl{overflow-x:auto;-webkit-overflow-scrolling:touch}}.form-label{margin-bottom:.5rem}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.25rem}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem}.form-text{margin-top:.25rem;font-size:.875em;color:#6c757d}.form-control{display:block;width:100%;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;appearance:none;border-radius:.25rem;transition:border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.form-control{transition:none}}.form-control[type="file"]{overflow:hidden}.form-control[type="file"]:not(:disabled):not([readonly]){cursor:pointer}.form-control:focus{color:#212529;background-color:#fff;border-color:#e28286;outline:0;box-shadow:0 0 0 .25rem rgba(197,5,12,0.25)}.form-control::-webkit-date-and-time-value{height:1.5em}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}.form-control::file-selector-button{padding:.375rem .75rem;margin:-.375rem -.75rem;margin-inline-end:.75rem;color:#212529;background-color:#e9ecef;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.form-control::file-selector-button{transition:none}}.form-control:hover:not(:disabled):not([readonly])::file-selector-button{background-color:#dde0e3}.form-control::-webkit-file-upload-button{padding:.375rem .75rem;margin:-.375rem -.75rem;margin-inline-end:.75rem;color:#212529;background-color:#e9ecef;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.form-control::-webkit-file-upload-button{transition:none}}.form-control:hover:not(:disabled):not([readonly])::-webkit-file-upload-button{background-color:#dde0e3}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;line-height:1.5;color:#212529;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-sm,.form-control-plaintext.form-control-lg{padding-right:0;padding-left:0}.form-control-sm{min-height:calc(1.5em + (.5rem + 2px));padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.form-control-sm::file-selector-button{padding:.25rem .5rem;margin:-.25rem -.5rem;margin-inline-end:.5rem}.form-control-sm::-webkit-file-upload-button{padding:.25rem .5rem;margin:-.25rem -.5rem;margin-inline-end:.5rem}.form-control-lg{min-height:calc(1.5em + (1rem + 2px));padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.form-control-lg::file-selector-button{padding:.5rem 1rem;margin:-.5rem -1rem;margin-inline-end:1rem}.form-control-lg::-webkit-file-upload-button{padding:.5rem 1rem;margin:-.5rem -1rem;margin-inline-end:1rem}textarea.form-control{min-height:calc(1.5em + (.75rem + 2px))}textarea.form-control-sm{min-height:calc(1.5em + (.5rem + 2px))}textarea.form-control-lg{min-height:calc(1.5em + (1rem + 2px))}.form-control-color{max-width:3rem;height:auto;padding:.375rem}.form-control-color:not(:disabled):not([readonly]){cursor:pointer}.form-control-color::-moz-color-swatch{height:1.5em;border-radius:.25rem}.form-control-color::-webkit-color-swatch{height:1.5em;border-radius:.25rem}.form-select{display:block;width:100%;padding:.375rem 2.25rem .375rem .75rem;-moz-padding-start:calc(.75rem - 3px);font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right .75rem center;background-size:16px 12px;border:1px solid #ced4da;border-radius:.25rem;transition:border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;appearance:none}@media (prefers-reduced-motion: reduce){.form-select{transition:none}}.form-select:focus{border-color:#e28286;outline:0;box-shadow:0 0 0 .25rem rgba(197,5,12,0.25)}.form-select[multiple],.form-select[size]:not([size="1"]){padding-right:.75rem;background-image:none}.form-select:disabled{background-color:#e9ecef}.form-select:-moz-focusring{color:transparent;text-shadow:0 0 0 #212529}.form-select-sm{padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem}.form-select-lg{padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem}.form-check{display:block;min-height:1.5rem;padding-left:1.5em;margin-bottom:.125rem}.form-check .form-check-input{float:left;margin-left:-1.5em}.form-check-input{width:1em;height:1em;margin-top:.25em;vertical-align:top;background-color:#fff;background-repeat:no-repeat;background-position:center;background-size:contain;border:1px solid rgba(0,0,0,0.25);appearance:none;color-adjust:exact}.form-check-input[type="checkbox"]{border-radius:.25em}.form-check-input[type="radio"]{border-radius:50%}.form-check-input:active{filter:brightness(90%)}.form-check-input:focus{border-color:#e28286;outline:0;box-shadow:0 0 0 .25rem rgba(197,5,12,0.25)}.form-check-input:checked{background-color:#c5050c;border-color:#c5050c}.form-check-input[type="checkbox"]:checked{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10l3 3l6-6'/%3e%3c/svg%3e")}.form-check-input[type="radio"]:checked{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='2' fill='%23fff'/%3e%3c/svg%3e")}.form-check-input[type="checkbox"]:indeterminate{background-color:#c5050c;border-color:#c5050c;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10h8'/%3e%3c/svg%3e")}.form-check-input:disabled{pointer-events:none;filter:none;opacity:.5}.form-check-input[disabled]~.form-check-label,.form-check-input:disabled~.form-check-label{opacity:.5}.form-switch{padding-left:2.5em}.form-switch .form-check-input{width:2em;margin-left:-2.5em;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%280,0,0,0.25%29'/%3e%3c/svg%3e");background-position:left center;border-radius:2em;transition:background-position 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.form-switch .form-check-input{transition:none}}.form-switch .form-check-input:focus{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23e28286'/%3e%3c/svg%3e")}.form-switch .form-check-input:checked{background-position:right center;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e")}.form-check-inline{display:inline-block;margin-right:1rem}.btn-check{position:absolute;clip:rect(0, 0, 0, 0);pointer-events:none}.btn-check[disabled]+.btn,.btn-check:disabled+.btn{pointer-events:none;filter:none;opacity:.65}.form-range{width:100%;height:1.5rem;padding:0;background-color:transparent;appearance:none}.form-range:focus{outline:0}.form-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(197,5,12,0.25)}.form-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(197,5,12,0.25)}.form-range::-moz-focus-outer{border:0}.form-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;background-color:#c5050c;border:0;border-radius:1rem;transition:background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;appearance:none}@media (prefers-reduced-motion: reduce){.form-range::-webkit-slider-thumb{transition:none}}.form-range::-webkit-slider-thumb:active{background-color:#eeb4b6}.form-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.form-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#c5050c;border:0;border-radius:1rem;transition:background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;appearance:none}@media (prefers-reduced-motion: reduce){.form-range::-moz-range-thumb{transition:none}}.form-range::-moz-range-thumb:active{background-color:#eeb4b6}.form-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.form-range:disabled{pointer-events:none}.form-range:disabled::-webkit-slider-thumb{background-color:#adb5bd}.form-range:disabled::-moz-range-thumb{background-color:#adb5bd}.form-floating{position:relative}.form-floating>.form-control,.form-floating>.form-select{height:calc(3.5rem + 2px);line-height:1.25}.form-floating>label{position:absolute;top:0;left:0;height:100%;padding:1rem .75rem;pointer-events:none;border:1px solid transparent;transform-origin:0 0;transition:opacity 0.1s ease-in-out,transform 0.1s ease-in-out}@media (prefers-reduced-motion: reduce){.form-floating>label{transition:none}}.form-floating>.form-control{padding:1rem .75rem}.form-floating>.form-control::placeholder{color:transparent}.form-floating>.form-control:focus,.form-floating>.form-control:not(:placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:-webkit-autofill{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-select{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:focus~label,.form-floating>.form-control:not(:placeholder-shown)~label,.form-floating>.form-select~label{opacity:.65;transform:scale(0.85) translateY(-0.5rem) translateX(0.15rem)}.form-floating>.form-control:-webkit-autofill~label{opacity:.65;transform:scale(0.85) translateY(-0.5rem) translateX(0.15rem)}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.form-select{position:relative;flex:1 1 auto;width:1%;min-width:0}.input-group>.form-control:focus,.input-group>.form-select:focus{z-index:3}.input-group .btn{position:relative;z-index:2}.input-group .btn:focus{z-index:3}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.25rem}.input-group-lg>.form-control,.input-group-lg>.form-select,.input-group-lg>.input-group-text,.input-group-lg>.btn{padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.input-group-sm>.form-control,.input-group-sm>.form-select,.input-group-sm>.input-group-text,.input-group-sm>.btn{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.input-group-lg>.form-select,.input-group-sm>.form-select{padding-right:3rem}.input-group:not(.has-validation)>:not(:last-child):not(.dropdown-toggle):not(.dropdown-menu),.input-group:not(.has-validation)>.dropdown-toggle:nth-last-child(n + 3){border-top-right-radius:0;border-bottom-right-radius:0}.input-group.has-validation>:nth-last-child(n + 3):not(.dropdown-toggle):not(.dropdown-menu),.input-group.has-validation>.dropdown-toggle:nth-last-child(n + 4){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>:not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback){margin-left:-1px;border-top-left-radius:0;border-bottom-left-radius:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:#198754}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:rgba(25,135,84,0.9);border-radius:.25rem}.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip,.is-valid~.valid-feedback,.is-valid~.valid-tooltip{display:block}.was-validated .form-control:valid,.form-control.is-valid{border-color:#198754;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.was-validated .form-control:valid:focus,.form-control.is-valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,0.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.was-validated .form-select:valid,.form-select.is-valid{border-color:#198754}.was-validated .form-select:valid:not([multiple]):not([size]),.was-validated .form-select[size="1"]:valid:not([multiple]),.form-select.is-valid:not([multiple]):not([size]),.form-select.is-valid[size="1"]:not([multiple]){padding-right:4.125rem;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3e%3c/svg%3e"),url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.was-validated .form-select:valid:focus,.form-select.is-valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,0.25)}.was-validated .form-check-input:valid,.form-check-input.is-valid{border-color:#198754}.was-validated .form-check-input:valid:checked,.form-check-input.is-valid:checked{background-color:#198754}.was-validated .form-check-input:valid:focus,.form-check-input.is-valid:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,0.25)}.was-validated .form-check-input:valid~.form-check-label,.form-check-input.is-valid~.form-check-label{color:#198754}.form-check-inline .form-check-input~.valid-feedback{margin-left:.5em}.was-validated .input-group .form-control:valid,.input-group .form-control.is-valid,.was-validated .input-group .form-select:valid,.input-group .form-select.is-valid{z-index:1}.was-validated .input-group .form-control:valid:focus,.input-group .form-control.is-valid:focus,.was-validated .input-group .form-select:valid:focus,.input-group .form-select.is-valid:focus{z-index:3}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:rgba(220,53,69,0.9);border-radius:.25rem}.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip,.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip{display:block}.was-validated .form-control:invalid,.form-control.is-invalid{border-color:#dc3545;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.was-validated .form-control:invalid:focus,.form-control.is-invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,0.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.was-validated .form-select:invalid,.form-select.is-invalid{border-color:#dc3545}.was-validated .form-select:invalid:not([multiple]):not([size]),.was-validated .form-select[size="1"]:invalid:not([multiple]),.form-select.is-invalid:not([multiple]):not([size]),.form-select.is-invalid[size="1"]:not([multiple]){padding-right:4.125rem;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3e%3c/svg%3e"),url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.was-validated .form-select:invalid:focus,.form-select.is-invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,0.25)}.was-validated .form-check-input:invalid,.form-check-input.is-invalid{border-color:#dc3545}.was-validated .form-check-input:invalid:checked,.form-check-input.is-invalid:checked{background-color:#dc3545}.was-validated .form-check-input:invalid:focus,.form-check-input.is-invalid:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,0.25)}.was-validated .form-check-input:invalid~.form-check-label,.form-check-input.is-invalid~.form-check-label{color:#dc3545}.form-check-inline .form-check-input~.invalid-feedback{margin-left:.5em}.was-validated .input-group .form-control:invalid,.input-group .form-control.is-invalid,.was-validated .input-group .form-select:invalid,.input-group .form-select.is-invalid{z-index:2}.was-validated .input-group .form-control:invalid:focus,.input-group .form-control.is-invalid:focus,.was-validated .input-group .form-select:invalid:focus,.input-group .form-select.is-invalid:focus{z-index:3}.btn{display:inline-block;font-weight:400;line-height:1.5;color:#212529;text-align:center;text-decoration:none;vertical-align:middle;cursor:pointer;user-select:none;background-color:transparent;border:1px solid transparent;padding:.375rem .75rem;font-size:1rem;border-radius:.25rem;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.btn{transition:none}}.btn:hover{color:#212529}.btn-check:focus+.btn,.btn:focus{outline:0;box-shadow:0 0 0 .25rem rgba(197,5,12,0.25)}.btn:disabled,.btn.disabled,fieldset:disabled .btn{pointer-events:none;opacity:.65}.btn-primary{color:#fff;background-color:#c5050c;border-color:#c5050c}.btn-primary:hover{color:#fff;background-color:#a7040a;border-color:#9e040a}.btn-check:focus+.btn-primary,.btn-primary:focus{color:#fff;background-color:#a7040a;border-color:#9e040a;box-shadow:0 0 0 .25rem rgba(206,43,48,0.5)}.btn-check:checked+.btn-primary,.btn-check:active+.btn-primary,.btn-primary:active,.btn-primary.active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#9e040a;border-color:#940409}.btn-check:checked+.btn-primary:focus,.btn-check:active+.btn-primary:focus,.btn-primary:active:focus,.btn-primary.active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(206,43,48,0.5)}.btn-primary:disabled,.btn-primary.disabled{color:#fff;background-color:#c5050c;border-color:#c5050c}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:hover{color:#fff;background-color:#5c636a;border-color:#565e64}.btn-check:focus+.btn-secondary,.btn-secondary:focus{color:#fff;background-color:#5c636a;border-color:#565e64;box-shadow:0 0 0 .25rem rgba(130,138,145,0.5)}.btn-check:checked+.btn-secondary,.btn-check:active+.btn-secondary,.btn-secondary:active,.btn-secondary.active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#565e64;border-color:#51585e}.btn-check:checked+.btn-secondary:focus,.btn-check:active+.btn-secondary:focus,.btn-secondary:active:focus,.btn-secondary.active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(130,138,145,0.5)}.btn-secondary:disabled,.btn-secondary.disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-success{color:#fff;background-color:#198754;border-color:#198754}.btn-success:hover{color:#fff;background-color:#157347;border-color:#146c43}.btn-check:focus+.btn-success,.btn-success:focus{color:#fff;background-color:#157347;border-color:#146c43;box-shadow:0 0 0 .25rem rgba(60,153,110,0.5)}.btn-check:checked+.btn-success,.btn-check:active+.btn-success,.btn-success:active,.btn-success.active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#146c43;border-color:#13653f}.btn-check:checked+.btn-success:focus,.btn-check:active+.btn-success:focus,.btn-success:active:focus,.btn-success.active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(60,153,110,0.5)}.btn-success:disabled,.btn-success.disabled{color:#fff;background-color:#198754;border-color:#198754}.btn-info{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-info:hover{color:#000;background-color:#31d2f2;border-color:#25cff2}.btn-check:focus+.btn-info,.btn-info:focus{color:#000;background-color:#31d2f2;border-color:#25cff2;box-shadow:0 0 0 .25rem rgba(11,172,204,0.5)}.btn-check:checked+.btn-info,.btn-check:active+.btn-info,.btn-info:active,.btn-info.active,.show>.btn-info.dropdown-toggle{color:#000;background-color:#3dd5f3;border-color:#25cff2}.btn-check:checked+.btn-info:focus,.btn-check:active+.btn-info:focus,.btn-info:active:focus,.btn-info.active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(11,172,204,0.5)}.btn-info:disabled,.btn-info.disabled{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-warning{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-warning:hover{color:#000;background-color:#ffca2c;border-color:#ffc720}.btn-check:focus+.btn-warning,.btn-warning:focus{color:#000;background-color:#ffca2c;border-color:#ffc720;box-shadow:0 0 0 .25rem rgba(217,164,6,0.5)}.btn-check:checked+.btn-warning,.btn-check:active+.btn-warning,.btn-warning:active,.btn-warning.active,.show>.btn-warning.dropdown-toggle{color:#000;background-color:#ffcd39;border-color:#ffc720}.btn-check:checked+.btn-warning:focus,.btn-check:active+.btn-warning:focus,.btn-warning:active:focus,.btn-warning.active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(217,164,6,0.5)}.btn-warning:disabled,.btn-warning.disabled{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:hover{color:#fff;background-color:#bb2d3b;border-color:#b02a37}.btn-check:focus+.btn-danger,.btn-danger:focus{color:#fff;background-color:#bb2d3b;border-color:#b02a37;box-shadow:0 0 0 .25rem rgba(225,83,97,0.5)}.btn-check:checked+.btn-danger,.btn-check:active+.btn-danger,.btn-danger:active,.btn-danger.active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#b02a37;border-color:#a52834}.btn-check:checked+.btn-danger:focus,.btn-check:active+.btn-danger:focus,.btn-danger:active:focus,.btn-danger.active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(225,83,97,0.5)}.btn-danger:disabled,.btn-danger.disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-light{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:hover{color:#000;background-color:#f9fafb;border-color:#f9fafb}.btn-check:focus+.btn-light,.btn-light:focus{color:#000;background-color:#f9fafb;border-color:#f9fafb;box-shadow:0 0 0 .25rem rgba(211,212,213,0.5)}.btn-check:checked+.btn-light,.btn-check:active+.btn-light,.btn-light:active,.btn-light.active,.show>.btn-light.dropdown-toggle{color:#000;background-color:#f9fafb;border-color:#f9fafb}.btn-check:checked+.btn-light:focus,.btn-check:active+.btn-light:focus,.btn-light:active:focus,.btn-light.active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(211,212,213,0.5)}.btn-light:disabled,.btn-light.disabled{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-dark{color:#fff;background-color:#212529;border-color:#212529}.btn-dark:hover{color:#fff;background-color:#1c1f23;border-color:#1a1e21}.btn-check:focus+.btn-dark,.btn-dark:focus{color:#fff;background-color:#1c1f23;border-color:#1a1e21;box-shadow:0 0 0 .25rem rgba(66,70,73,0.5)}.btn-check:checked+.btn-dark,.btn-check:active+.btn-dark,.btn-dark:active,.btn-dark.active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1a1e21;border-color:#191c1f}.btn-check:checked+.btn-dark:focus,.btn-check:active+.btn-dark:focus,.btn-dark:active:focus,.btn-dark.active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(66,70,73,0.5)}.btn-dark:disabled,.btn-dark.disabled{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-primary{color:#c5050c;border-color:#c5050c}.btn-outline-primary:hover{color:#fff;background-color:#c5050c;border-color:#c5050c}.btn-check:focus+.btn-outline-primary,.btn-outline-primary:focus{box-shadow:0 0 0 .25rem rgba(197,5,12,0.5)}.btn-check:checked+.btn-outline-primary,.btn-check:active+.btn-outline-primary,.btn-outline-primary:active,.btn-outline-primary.active,.btn-outline-primary.dropdown-toggle.show{color:#fff;background-color:#c5050c;border-color:#c5050c}.btn-check:checked+.btn-outline-primary:focus,.btn-check:active+.btn-outline-primary:focus,.btn-outline-primary:active:focus,.btn-outline-primary.active:focus,.btn-outline-primary.dropdown-toggle.show:focus{box-shadow:0 0 0 .25rem rgba(197,5,12,0.5)}.btn-outline-primary:disabled,.btn-outline-primary.disabled{color:#c5050c;background-color:transparent}.btn-outline-secondary{color:#6c757d;border-color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-check:focus+.btn-outline-secondary,.btn-outline-secondary:focus{box-shadow:0 0 0 .25rem rgba(108,117,125,0.5)}.btn-check:checked+.btn-outline-secondary,.btn-check:active+.btn-outline-secondary,.btn-outline-secondary:active,.btn-outline-secondary.active,.btn-outline-secondary.dropdown-toggle.show{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-check:checked+.btn-outline-secondary:focus,.btn-check:active+.btn-outline-secondary:focus,.btn-outline-secondary:active:focus,.btn-outline-secondary.active:focus,.btn-outline-secondary.dropdown-toggle.show:focus{box-shadow:0 0 0 .25rem rgba(108,117,125,0.5)}.btn-outline-secondary:disabled,.btn-outline-secondary.disabled{color:#6c757d;background-color:transparent}.btn-outline-success{color:#198754;border-color:#198754}.btn-outline-success:hover{color:#fff;background-color:#198754;border-color:#198754}.btn-check:focus+.btn-outline-success,.btn-outline-success:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,0.5)}.btn-check:checked+.btn-outline-success,.btn-check:active+.btn-outline-success,.btn-outline-success:active,.btn-outline-success.active,.btn-outline-success.dropdown-toggle.show{color:#fff;background-color:#198754;border-color:#198754}.btn-check:checked+.btn-outline-success:focus,.btn-check:active+.btn-outline-success:focus,.btn-outline-success:active:focus,.btn-outline-success.active:focus,.btn-outline-success.dropdown-toggle.show:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,0.5)}.btn-outline-success:disabled,.btn-outline-success.disabled{color:#198754;background-color:transparent}.btn-outline-info{color:#0dcaf0;border-color:#0dcaf0}.btn-outline-info:hover{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-check:focus+.btn-outline-info,.btn-outline-info:focus{box-shadow:0 0 0 .25rem rgba(13,202,240,0.5)}.btn-check:checked+.btn-outline-info,.btn-check:active+.btn-outline-info,.btn-outline-info:active,.btn-outline-info.active,.btn-outline-info.dropdown-toggle.show{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-check:checked+.btn-outline-info:focus,.btn-check:active+.btn-outline-info:focus,.btn-outline-info:active:focus,.btn-outline-info.active:focus,.btn-outline-info.dropdown-toggle.show:focus{box-shadow:0 0 0 .25rem rgba(13,202,240,0.5)}.btn-outline-info:disabled,.btn-outline-info.disabled{color:#0dcaf0;background-color:transparent}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:hover{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-check:focus+.btn-outline-warning,.btn-outline-warning:focus{box-shadow:0 0 0 .25rem rgba(255,193,7,0.5)}.btn-check:checked+.btn-outline-warning,.btn-check:active+.btn-outline-warning,.btn-outline-warning:active,.btn-outline-warning.active,.btn-outline-warning.dropdown-toggle.show{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-check:checked+.btn-outline-warning:focus,.btn-check:active+.btn-outline-warning:focus,.btn-outline-warning:active:focus,.btn-outline-warning.active:focus,.btn-outline-warning.dropdown-toggle.show:focus{box-shadow:0 0 0 .25rem rgba(255,193,7,0.5)}.btn-outline-warning:disabled,.btn-outline-warning.disabled{color:#ffc107;background-color:transparent}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-check:focus+.btn-outline-danger,.btn-outline-danger:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,0.5)}.btn-check:checked+.btn-outline-danger,.btn-check:active+.btn-outline-danger,.btn-outline-danger:active,.btn-outline-danger.active,.btn-outline-danger.dropdown-toggle.show{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-check:checked+.btn-outline-danger:focus,.btn-check:active+.btn-outline-danger:focus,.btn-outline-danger:active:focus,.btn-outline-danger.active:focus,.btn-outline-danger.dropdown-toggle.show:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,0.5)}.btn-outline-danger:disabled,.btn-outline-danger.disabled{color:#dc3545;background-color:transparent}.btn-outline-light{color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:hover{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-check:focus+.btn-outline-light,.btn-outline-light:focus{box-shadow:0 0 0 .25rem rgba(248,249,250,0.5)}.btn-check:checked+.btn-outline-light,.btn-check:active+.btn-outline-light,.btn-outline-light:active,.btn-outline-light.active,.btn-outline-light.dropdown-toggle.show{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-check:checked+.btn-outline-light:focus,.btn-check:active+.btn-outline-light:focus,.btn-outline-light:active:focus,.btn-outline-light.active:focus,.btn-outline-light.dropdown-toggle.show:focus{box-shadow:0 0 0 .25rem rgba(248,249,250,0.5)}.btn-outline-light:disabled,.btn-outline-light.disabled{color:#f8f9fa;background-color:transparent}.btn-outline-dark{color:#212529;border-color:#212529}.btn-outline-dark:hover{color:#fff;background-color:#212529;border-color:#212529}.btn-check:focus+.btn-outline-dark,.btn-outline-dark:focus{box-shadow:0 0 0 .25rem rgba(33,37,41,0.5)}.btn-check:checked+.btn-outline-dark,.btn-check:active+.btn-outline-dark,.btn-outline-dark:active,.btn-outline-dark.active,.btn-outline-dark.dropdown-toggle.show{color:#fff;background-color:#212529;border-color:#212529}.btn-check:checked+.btn-outline-dark:focus,.btn-check:active+.btn-outline-dark:focus,.btn-outline-dark:active:focus,.btn-outline-dark.active:focus,.btn-outline-dark.dropdown-toggle.show:focus{box-shadow:0 0 0 .25rem rgba(33,37,41,0.5)}.btn-outline-dark:disabled,.btn-outline-dark.disabled{color:#212529;background-color:transparent}.btn-link{font-weight:400;color:#c5050c;text-decoration:underline}.btn-link:hover{color:#9e040a}.btn-link:disabled,.btn-link.disabled{color:#6c757d}.btn-lg,.btn-group-lg>.btn{padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.btn-sm,.btn-group-sm>.btn{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.fade{transition:opacity 0.15s linear}@media (prefers-reduced-motion: reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{height:0;overflow:hidden;transition:height 0.35s ease}@media (prefers-reduced-motion: reduce){.collapsing{transition:none}}.dropup,.dropend,.dropdown,.dropstart{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{position:absolute;z-index:1000;display:none;min-width:10rem;padding:.5rem 0;margin:0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.15);border-radius:.25rem}.dropdown-menu[data-bs-popper]{top:100%;left:0;margin-top:.125rem}.dropdown-menu-start{--bs-position: start}.dropdown-menu-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-end{--bs-position: end}.dropdown-menu-end[data-bs-popper]{right:0;left:auto}@media (min-width: 576px){.dropdown-menu-sm-start{--bs-position: start}.dropdown-menu-sm-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-sm-end{--bs-position: end}.dropdown-menu-sm-end[data-bs-popper]{right:0;left:auto}}@media (min-width: 768px){.dropdown-menu-md-start{--bs-position: start}.dropdown-menu-md-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-md-end{--bs-position: end}.dropdown-menu-md-end[data-bs-popper]{right:0;left:auto}}@media (min-width: 992px){.dropdown-menu-lg-start{--bs-position: start}.dropdown-menu-lg-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-lg-end{--bs-position: end}.dropdown-menu-lg-end[data-bs-popper]{right:0;left:auto}}@media (min-width: 1200px){.dropdown-menu-xl-start{--bs-position: start}.dropdown-menu-xl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xl-end{--bs-position: end}.dropdown-menu-xl-end[data-bs-popper]{right:0;left:auto}}@media (min-width: 1400px){.dropdown-menu-xxl-start{--bs-position: start}.dropdown-menu-xxl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xxl-end{--bs-position: end}.dropdown-menu-xxl-end[data-bs-popper]{right:0;left:auto}}.dropup .dropdown-menu[data-bs-popper]{top:auto;bottom:100%;margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-menu[data-bs-popper]{top:0;right:auto;left:100%;margin-top:0;margin-left:.125rem}.dropend .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropend .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-toggle::after{vertical-align:0}.dropstart .dropdown-menu[data-bs-popper]{top:0;right:100%;left:auto;margin-top:0;margin-right:.125rem}.dropstart .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:""}.dropstart .dropdown-toggle::after{display:none}.dropstart .dropdown-toggle::before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropstart .dropdown-toggle:empty::after{margin-left:0}.dropstart .dropdown-toggle::before{vertical-align:0}.dropdown-divider{height:0;margin:.5rem 0;overflow:hidden;border-top:1px solid rgba(0,0,0,0.15)}.dropdown-item{display:block;width:100%;padding:.25rem 1rem;clear:both;font-weight:400;color:#212529;text-align:inherit;text-decoration:none;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:hover,.dropdown-item:focus{color:#1e2125;background-color:#e9ecef}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#c5050c}.dropdown-item.disabled,.dropdown-item:disabled{color:#adb5bd;pointer-events:none;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.dropdown-item-text{display:block;padding:.25rem 1rem;color:#212529}.dropdown-menu-dark{color:#dee2e6;background-color:#343a40;border-color:rgba(0,0,0,0.15)}.dropdown-menu-dark .dropdown-item{color:#dee2e6}.dropdown-menu-dark .dropdown-item:hover,.dropdown-menu-dark .dropdown-item:focus{color:#fff;background-color:rgba(255,255,255,0.15)}.dropdown-menu-dark .dropdown-item.active,.dropdown-menu-dark .dropdown-item:active{color:#fff;background-color:#c5050c}.dropdown-menu-dark .dropdown-item.disabled,.dropdown-menu-dark .dropdown-item:disabled{color:#adb5bd}.dropdown-menu-dark .dropdown-divider{border-color:rgba(0,0,0,0.15)}.dropdown-menu-dark .dropdown-item-text{color:#dee2e6}.dropdown-menu-dark .dropdown-header{color:#adb5bd}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;flex:1 1 auto}.btn-group>.btn-check:checked+.btn,.btn-group>.btn-check:focus+.btn,.btn-group>.btn:hover,.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn-check:checked+.btn,.btn-group-vertical>.btn-check:focus+.btn,.btn-group-vertical>.btn:hover,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn.active{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn:not(:first-child),.btn-group>.btn-group:not(:first-child){margin-left:-1px}.btn-group>.btn:not(:last-child):not(.dropdown-toggle),.btn-group>.btn-group:not(:last-child)>.btn{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:nth-child(n + 3),.btn-group>:not(.btn-check)+.btn,.btn-group>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after,.dropend .dropdown-toggle-split::after{margin-left:0}.dropstart .dropdown-toggle-split::before{margin-right:0}.btn-sm+.dropdown-toggle-split,.btn-group-sm>.btn+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-lg+.dropdown-toggle-split,.btn-group-lg>.btn+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn:not(:first-child),.btn-group-vertical>.btn-group:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle),.btn-group-vertical>.btn-group:not(:last-child)>.btn{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn~.btn,.btn-group-vertical>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-top-right-radius:0}.nav{display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:.5rem 1rem;color:#c5050c;text-decoration:none;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.nav-link{transition:none}}.nav-link:hover,.nav-link:focus{color:#9e040a}.nav-link.disabled{color:#6c757d;pointer-events:none;cursor:default}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-link{margin-bottom:-1px;background:none;border:1px solid transparent;border-top-left-radius:.25rem;border-top-right-radius:.25rem}.nav-tabs .nav-link:hover,.nav-tabs .nav-link:focus{border-color:#e9ecef #e9ecef #dee2e6;isolation:isolate}.nav-tabs .nav-link.disabled{color:#6c757d;background-color:transparent;border-color:transparent}.nav-tabs .nav-link.active,.nav-tabs .nav-item.show .nav-link{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{background:none;border:0;border-radius:.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#c5050c}.nav-fill>.nav-link,.nav-fill .nav-item{flex:1 1 auto;text-align:center}.nav-justified>.nav-link,.nav-justified .nav-item{flex-basis:0;flex-grow:1;text-align:center}.nav-fill .nav-item .nav-link,.nav-justified .nav-item .nav-link{width:100%}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between;padding-top:.5rem;padding-bottom:.5rem}.navbar>.container,.navbar>.container-fluid,.navbar>.container-sm,.navbar>.container-md,.navbar>.container-lg,.navbar>.container-xl,.navbar>.container-xxl{display:flex;flex-wrap:inherit;align-items:center;justify-content:space-between}.navbar-brand{padding-top:.3125rem;padding-bottom:.3125rem;margin-right:1rem;font-size:1.25rem;text-decoration:none;white-space:nowrap}.navbar-nav{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static}.navbar-text{padding-top:.5rem;padding-bottom:.5rem}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:.25rem;transition:box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.navbar-toggler{transition:none}}.navbar-toggler:hover{text-decoration:none}.navbar-toggler:focus{text-decoration:none;outline:0;box-shadow:0 0 0 .25rem}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;background-repeat:no-repeat;background-position:center;background-size:100%}.navbar-nav-scroll{max-height:var(--bs-scroll-height, 75vh);overflow-y:auto}@media (min-width: 576px){.navbar-expand-sm{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm .navbar-nav-scroll{overflow:visible}.navbar-expand-sm .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}}@media (min-width: 768px){.navbar-expand-md{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md .navbar-nav-scroll{overflow:visible}.navbar-expand-md .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}}@media (min-width: 992px){.navbar-expand-lg{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg .navbar-nav-scroll{overflow:visible}.navbar-expand-lg .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}}@media (min-width: 1200px){.navbar-expand-xl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl .navbar-nav-scroll{overflow:visible}.navbar-expand-xl .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}}@media (min-width: 1400px){.navbar-expand-xxl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xxl .navbar-nav{flex-direction:row}.navbar-expand-xxl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xxl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xxl .navbar-nav-scroll{overflow:visible}.navbar-expand-xxl .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-xxl .navbar-toggler{display:none}}.navbar-expand{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand .navbar-nav-scroll{overflow:visible}.navbar-expand .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand{color:rgba(0,0,0,0.9)}.navbar-light .navbar-brand:hover,.navbar-light .navbar-brand:focus{color:rgba(0,0,0,0.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,0.55)}.navbar-light .navbar-nav .nav-link:hover,.navbar-light .navbar-nav .nav-link:focus{color:rgba(0,0,0,0.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,0.3)}.navbar-light .navbar-nav .show>.nav-link,.navbar-light .navbar-nav .nav-link.active{color:rgba(0,0,0,0.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,0.55);border-color:rgba(0,0,0,0.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%280,0,0,0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-light .navbar-text{color:rgba(0,0,0,0.55)}.navbar-light .navbar-text a,.navbar-light .navbar-text a:hover,.navbar-light .navbar-text a:focus{color:rgba(0,0,0,0.9)}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:hover,.navbar-dark .navbar-brand:focus{color:#fff}.navbar-dark .navbar-nav .nav-link{color:rgba(255,255,255,0.55)}.navbar-dark .navbar-nav .nav-link:hover,.navbar-dark .navbar-nav .nav-link:focus{color:rgba(255,255,255,0.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:rgba(255,255,255,0.25)}.navbar-dark .navbar-nav .show>.nav-link,.navbar-dark .navbar-nav .nav-link.active{color:#fff}.navbar-dark .navbar-toggler{color:rgba(255,255,255,0.55);border-color:rgba(255,255,255,0.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255,255,255,0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-dark .navbar-text{color:rgba(255,255,255,0.55)}.navbar-dark .navbar-text a,.navbar-dark .navbar-text a:hover,.navbar-dark .navbar-text a:focus{color:#fff}.card{position:relative;display:flex;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,0.125);border-radius:.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group{border-top:inherit;border-bottom:inherit}.card>.list-group:first-child{border-top-width:0;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card>.list-group:last-child{border-bottom-width:0;border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card>.card-header+.list-group,.card>.list-group+.card-footer{border-top:0}.card-body{flex:1 1 auto;padding:1rem 1rem}.card-title{margin-bottom:.5rem}.card-subtitle{margin-top:-.25rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1rem}.card-header{padding:.5rem 1rem;margin-bottom:0;background-color:rgba(0,0,0,0.03);border-bottom:1px solid rgba(0,0,0,0.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-footer{padding:.5rem 1rem;background-color:rgba(0,0,0,0.03);border-top:1px solid rgba(0,0,0,0.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-right:-.5rem;margin-bottom:-.5rem;margin-left:-.5rem;border-bottom:0}.card-header-pills{margin-right:-.5rem;margin-left:-.5rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(.25rem - 1px)}.card-img,.card-img-top,.card-img-bottom{width:100%}.card-img,.card-img-top{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card-group>.card{margin-bottom:.75rem}@media (min-width: 576px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-img-top,.card-group>.card:not(:last-child) .card-header{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-img-bottom,.card-group>.card:not(:last-child) .card-footer{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-img-top,.card-group>.card:not(:first-child) .card-header{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-img-bottom,.card-group>.card:not(:first-child) .card-footer{border-bottom-left-radius:0}}.accordion-button{position:relative;display:flex;align-items:center;width:100%;padding:.5em 0;font-size:1rem;color:#212529;text-align:left;background-color:#fff;border:0;border-radius:0;overflow-anchor:none;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out,border-radius 0.15s ease}@media (prefers-reduced-motion: reduce){.accordion-button{transition:none}}.accordion-button:not(.collapsed){color:#b1050b;background-color:#f9e6e7;box-shadow:inset 0 -1px 0 rgba(0,0,0,0.125)}.accordion-button:not(.collapsed)::after{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23b1050b'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");transform:rotate(-180deg)}.accordion-button::after{flex-shrink:0;width:.9rem;height:.9rem;margin-left:auto;content:"";background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23212529'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");background-repeat:no-repeat;background-size:.9rem;transition:transform 0.2s ease-in-out}@media (prefers-reduced-motion: reduce){.accordion-button::after{transition:none}}.accordion-button:hover{z-index:2}.accordion-button:focus{z-index:3;border-color:#e28286;outline:0;box-shadow:0 0 0 .25rem rgba(197,5,12,0.25)}.accordion-header{margin-bottom:0}.accordion-item{background-color:#fff;border:1px solid rgba(0,0,0,0.125)}.accordion-item:first-of-type{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.accordion-item:first-of-type .accordion-button{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.accordion-item:not(:first-of-type){border-top:0}.accordion-item:last-of-type{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.accordion-item:last-of-type .accordion-button.collapsed{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.accordion-item:last-of-type .accordion-collapse{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.accordion-body{padding:.5em 0}.accordion-flush .accordion-collapse{border-width:0}.accordion-flush .accordion-item{border-right:0;border-left:0;border-radius:0}.accordion-flush .accordion-item:first-child{border-top:0}.accordion-flush .accordion-item:last-child{border-bottom:0}.accordion-flush .accordion-item .accordion-button{border-radius:0}.breadcrumb{display:flex;flex-wrap:wrap;padding:0 0;margin-bottom:1rem;list-style:none}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item::before{float:left;padding-right:.5rem;color:#6c757d;content:var(--bs-breadcrumb-divider, "/") /* rtl: var(--bs-breadcrumb-divider, "/") */}.breadcrumb-item.active{color:#6c757d}.pagination{display:flex;padding-left:0;list-style:none}.page-link{position:relative;display:block;color:#c5050c;text-decoration:none;background-color:#fff;border:1px solid #dee2e6;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.page-link{transition:none}}.page-link:hover{z-index:2;color:#9e040a;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:3;color:#9e040a;background-color:#e9ecef;outline:0;box-shadow:0 0 0 .25rem rgba(197,5,12,0.25)}.page-item:not(:first-child) .page-link{margin-left:-1px}.page-item.active .page-link{z-index:3;color:#fff;background-color:#c5050c;border-color:#c5050c}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;background-color:#fff;border-color:#dee2e6}.page-link{padding:.375rem .75rem}.page-item:first-child .page-link{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.page-item:last-child .page-link{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:.3rem;border-bottom-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:.3rem;border-bottom-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:.2rem;border-bottom-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:.2rem;border-bottom-right-radius:.2rem}.badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.alert{position:relative;padding:1rem 1rem;margin-bottom:1rem;border:1px solid transparent;border-radius:.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:3rem}.alert-dismissible .btn-close{position:absolute;top:0;right:0;z-index:2;padding:1.25rem 1rem}.alert-primary{color:#760307;background-color:#f3cdce;border-color:#eeb4b6}.alert-primary .alert-link{color:#5e0206}.alert-secondary{color:#41464b;background-color:#e2e3e5;border-color:#d3d6d8}.alert-secondary .alert-link{color:#34383c}.alert-success{color:#0f5132;background-color:#d1e7dd;border-color:#badbcc}.alert-success .alert-link{color:#0c4128}.alert-info{color:#055160;background-color:#cff4fc;border-color:#b6effb}.alert-info .alert-link{color:#04414d}.alert-warning{color:#664d03;background-color:#fff3cd;border-color:#ffecb5}.alert-warning .alert-link{color:#523e02}.alert-danger{color:#842029;background-color:#f8d7da;border-color:#f5c2c7}.alert-danger .alert-link{color:#6a1a21}.alert-light{color:#636464;background-color:#fefefe;border-color:#fdfdfe}.alert-light .alert-link{color:#4f5050}.alert-dark{color:#141619;background-color:#d3d3d4;border-color:#bcbebf}.alert-dark .alert-link{color:#101214}@keyframes progress-bar-stripes{0%{background-position-x:1rem}}.progress{display:flex;height:1rem;overflow:hidden;font-size:.75rem;background-color:#e9ecef;border-radius:.25rem}.progress-bar{display:flex;flex-direction:column;justify-content:center;overflow:hidden;color:#fff;text-align:center;white-space:nowrap;background-color:#c5050c;transition:width 0.6s ease}@media (prefers-reduced-motion: reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-size:1rem 1rem}.progress-bar-animated{animation:1s linear infinite progress-bar-stripes}@media (prefers-reduced-motion: reduce){.progress-bar-animated{animation:none}}.list-group{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-numbered{list-style-type:none;counter-reset:section}.list-group-numbered>li::before{content:counters(section, ".") ". ";counter-increment:section}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:hover,.list-group-item-action:focus{z-index:1;color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.5rem 1rem;color:#212529;text-decoration:none;background-color:#fff;border:1px solid #000}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;pointer-events:none;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#c5050c;border-color:#c5050c}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:-1px;border-top-width:1px}.list-group-horizontal{flex-direction:row}.list-group-horizontal>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}@media (min-width: 576px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-sm>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width: 768px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-md>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-md>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width: 992px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-lg>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width: 1200px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width: 1400px){.list-group-horizontal-xxl{flex-direction:row}.list-group-horizontal-xxl>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xxl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xxl>.list-group-item.active{margin-top:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}.list-group-flush{border-radius:0}.list-group-flush>.list-group-item{border-width:0 0 1px}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{color:#760307;background-color:#f3cdce}.list-group-item-primary.list-group-item-action:hover,.list-group-item-primary.list-group-item-action:focus{color:#760307;background-color:#dbb9b9}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#760307;border-color:#760307}.list-group-item-secondary{color:#41464b;background-color:#e2e3e5}.list-group-item-secondary.list-group-item-action:hover,.list-group-item-secondary.list-group-item-action:focus{color:#41464b;background-color:#cbccce}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#41464b;border-color:#41464b}.list-group-item-success{color:#0f5132;background-color:#d1e7dd}.list-group-item-success.list-group-item-action:hover,.list-group-item-success.list-group-item-action:focus{color:#0f5132;background-color:#bcd0c7}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#0f5132;border-color:#0f5132}.list-group-item-info{color:#055160;background-color:#cff4fc}.list-group-item-info.list-group-item-action:hover,.list-group-item-info.list-group-item-action:focus{color:#055160;background-color:#badce3}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#055160;border-color:#055160}.list-group-item-warning{color:#664d03;background-color:#fff3cd}.list-group-item-warning.list-group-item-action:hover,.list-group-item-warning.list-group-item-action:focus{color:#664d03;background-color:#e6dbb9}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#664d03;border-color:#664d03}.list-group-item-danger{color:#842029;background-color:#f8d7da}.list-group-item-danger.list-group-item-action:hover,.list-group-item-danger.list-group-item-action:focus{color:#842029;background-color:#dfc2c4}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#842029;border-color:#842029}.list-group-item-light{color:#636464;background-color:#fefefe}.list-group-item-light.list-group-item-action:hover,.list-group-item-light.list-group-item-action:focus{color:#636464;background-color:#e5e5e5}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#636464;border-color:#636464}.list-group-item-dark{color:#141619;background-color:#d3d3d4}.list-group-item-dark.list-group-item-action:hover,.list-group-item-dark.list-group-item-action:focus{color:#141619;background-color:#bebebf}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#141619;border-color:#141619}.btn-close{box-sizing:content-box;width:1em;height:1em;padding:.25em .25em;color:#000;background:transparent url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23000'%3e%3cpath d='M.293.293a1 1 0 011.414 0L8 6.586 14.293.293a1 1 0 111.414 1.414L9.414 8l6.293 6.293a1 1 0 01-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 01-1.414-1.414L6.586 8 .293 1.707a1 1 0 010-1.414z'/%3e%3c/svg%3e") center/1em auto no-repeat;border:0;border-radius:.25rem;opacity:.5}.btn-close:hover{color:#000;text-decoration:none;opacity:.75}.btn-close:focus{outline:0;box-shadow:0 0 0 .25rem rgba(197,5,12,0.25);opacity:1}.btn-close:disabled,.btn-close.disabled{pointer-events:none;user-select:none;opacity:.25}.btn-close-white{filter:invert(1) grayscale(100%) brightness(200%)}.toast{width:350px;max-width:100%;font-size:.875rem;pointer-events:auto;background-color:rgba(255,255,255,0.85);background-clip:padding-box;border:1px solid rgba(0,0,0,0.1);box-shadow:0 0.5rem 1rem rgba(0,0,0,0.15);border-radius:.25rem}.toast:not(.showing):not(.show){opacity:0}.toast.hide{display:none}.toast-container{width:max-content;max-width:100%;pointer-events:none}.toast-container>:not(:last-child){margin-bottom:.75rem}.toast-header{display:flex;align-items:center;padding:.5rem .75rem;color:#6c757d;background-color:rgba(255,255,255,0.85);background-clip:padding-box;border-bottom:1px solid rgba(0,0,0,0.05);border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.toast-header .btn-close{margin-right:-.375rem;margin-left:.75rem}.toast-body{padding:.75rem;word-wrap:break-word}.modal{position:fixed;top:0;left:0;z-index:1060;display:none;width:100%;height:100%;overflow-x:hidden;overflow-y:auto;outline:0}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{transition:transform 0.3s ease-out;transform:translate(0, -50px)}@media (prefers-reduced-motion: reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{height:calc(100% - 1rem)}.modal-dialog-scrollable .modal-content{max-height:100%;overflow:hidden}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - 1rem)}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:.3rem;outline:0}.modal-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:flex;flex-shrink:0;align-items:center;justify-content:space-between;padding:1rem 1rem;border-bottom:1px solid #dee2e6;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.modal-header .btn-close{padding:.5rem .5rem;margin:-.5rem -.5rem -.5rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;flex:1 1 auto;padding:1rem}.modal-footer{display:flex;flex-wrap:wrap;flex-shrink:0;align-items:center;justify-content:flex-end;padding:.75rem;border-top:1px solid #dee2e6;border-bottom-right-radius:calc(.3rem - 1px);border-bottom-left-radius:calc(.3rem - 1px)}.modal-footer>*{margin:.25rem}@media (min-width: 576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-scrollable{height:calc(100% - 3.5rem)}.modal-dialog-centered{min-height:calc(100% - 3.5rem)}.modal-sm{max-width:300px}}@media (min-width: 992px){.modal-lg,.modal-xl{max-width:800px}}@media (min-width: 1200px){.modal-xl{max-width:1140px}}.modal-fullscreen{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen .modal-header{border-radius:0}.modal-fullscreen .modal-body{overflow-y:auto}.modal-fullscreen .modal-footer{border-radius:0}@media (max-width: 575.98px){.modal-fullscreen-sm-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-sm-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-sm-down .modal-header{border-radius:0}.modal-fullscreen-sm-down .modal-body{overflow-y:auto}.modal-fullscreen-sm-down .modal-footer{border-radius:0}}@media (max-width: 767.98px){.modal-fullscreen-md-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-md-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-md-down .modal-header{border-radius:0}.modal-fullscreen-md-down .modal-body{overflow-y:auto}.modal-fullscreen-md-down .modal-footer{border-radius:0}}@media (max-width: 991.98px){.modal-fullscreen-lg-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-lg-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-lg-down .modal-header{border-radius:0}.modal-fullscreen-lg-down .modal-body{overflow-y:auto}.modal-fullscreen-lg-down .modal-footer{border-radius:0}}@media (max-width: 1199.98px){.modal-fullscreen-xl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xl-down .modal-header{border-radius:0}.modal-fullscreen-xl-down .modal-body{overflow-y:auto}.modal-fullscreen-xl-down .modal-footer{border-radius:0}}@media (max-width: 1399.98px){.modal-fullscreen-xxl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xxl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xxl-down .modal-header{border-radius:0}.modal-fullscreen-xxl-down .modal-body{overflow-y:auto}.modal-fullscreen-xxl-down .modal-footer{border-radius:0}}.tooltip{position:absolute;z-index:1080;display:block;margin:0;font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .tooltip-arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .tooltip-arrow::before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-top,.bs-tooltip-auto[data-popper-placement^="top"]{padding:.4rem 0}.bs-tooltip-top .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^="top"] .tooltip-arrow{bottom:0}.bs-tooltip-top .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^="top"] .tooltip-arrow::before{top:-1px;border-width:.4rem .4rem 0;border-top-color:#000}.bs-tooltip-end,.bs-tooltip-auto[data-popper-placement^="right"]{padding:0 .4rem}.bs-tooltip-end .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^="right"] .tooltip-arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-end .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^="right"] .tooltip-arrow::before{right:-1px;border-width:.4rem .4rem .4rem 0;border-right-color:#000}.bs-tooltip-bottom,.bs-tooltip-auto[data-popper-placement^="bottom"]{padding:.4rem 0}.bs-tooltip-bottom .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^="bottom"] .tooltip-arrow{top:0}.bs-tooltip-bottom .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^="bottom"] .tooltip-arrow::before{bottom:-1px;border-width:0 .4rem .4rem;border-bottom-color:#000}.bs-tooltip-start,.bs-tooltip-auto[data-popper-placement^="left"]{padding:0 .4rem}.bs-tooltip-start .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^="left"] .tooltip-arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-start .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^="left"] .tooltip-arrow::before{left:-1px;border-width:.4rem 0 .4rem .4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#000;border-radius:.25rem}.popover{position:absolute;top:0;left:0 /* rtl:ignore */;z-index:1070;display:block;max-width:276px;font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:.3rem}.popover .popover-arrow{position:absolute;display:block;width:1rem;height:.5rem}.popover .popover-arrow::before,.popover .popover-arrow::after{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-top>.popover-arrow,.bs-popover-auto[data-popper-placement^="top"]>.popover-arrow{bottom:calc(-.5rem - 1px)}.bs-popover-top>.popover-arrow::before,.bs-popover-auto[data-popper-placement^="top"]>.popover-arrow::before{bottom:0;border-width:.5rem .5rem 0;border-top-color:rgba(0,0,0,0.25)}.bs-popover-top>.popover-arrow::after,.bs-popover-auto[data-popper-placement^="top"]>.popover-arrow::after{bottom:1px;border-width:.5rem .5rem 0;border-top-color:#fff}.bs-popover-end>.popover-arrow,.bs-popover-auto[data-popper-placement^="right"]>.popover-arrow{left:calc(-.5rem - 1px);width:.5rem;height:1rem}.bs-popover-end>.popover-arrow::before,.bs-popover-auto[data-popper-placement^="right"]>.popover-arrow::before{left:0;border-width:.5rem .5rem .5rem 0;border-right-color:rgba(0,0,0,0.25)}.bs-popover-end>.popover-arrow::after,.bs-popover-auto[data-popper-placement^="right"]>.popover-arrow::after{left:1px;border-width:.5rem .5rem .5rem 0;border-right-color:#fff}.bs-popover-bottom>.popover-arrow,.bs-popover-auto[data-popper-placement^="bottom"]>.popover-arrow{top:calc(-.5rem - 1px)}.bs-popover-bottom>.popover-arrow::before,.bs-popover-auto[data-popper-placement^="bottom"]>.popover-arrow::before{top:0;border-width:0 .5rem .5rem .5rem;border-bottom-color:rgba(0,0,0,0.25)}.bs-popover-bottom>.popover-arrow::after,.bs-popover-auto[data-popper-placement^="bottom"]>.popover-arrow::after{top:1px;border-width:0 .5rem .5rem .5rem;border-bottom-color:#fff}.bs-popover-bottom .popover-header::before,.bs-popover-auto[data-popper-placement^="bottom"] .popover-header::before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f0f0f0}.bs-popover-start>.popover-arrow,.bs-popover-auto[data-popper-placement^="left"]>.popover-arrow{right:calc(-.5rem - 1px);width:.5rem;height:1rem}.bs-popover-start>.popover-arrow::before,.bs-popover-auto[data-popper-placement^="left"]>.popover-arrow::before{right:0;border-width:.5rem 0 .5rem .5rem;border-left-color:rgba(0,0,0,0.25)}.bs-popover-start>.popover-arrow::after,.bs-popover-auto[data-popper-placement^="left"]>.popover-arrow::after{right:1px;border-width:.5rem 0 .5rem .5rem;border-left-color:#fff}.popover-header{padding:.5rem 1rem;margin-bottom:0;font-size:1rem;background-color:#f0f0f0;border-bottom:1px solid rgba(0,0,0,0.2);border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:1rem 1rem;color:#212529}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner::after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;backface-visibility:hidden;transition:transform .6s ease-in-out}@media (prefers-reduced-motion: reduce){.carousel-item{transition:none}}.carousel-item.active,.carousel-item-next,.carousel-item-prev{display:block}.carousel-item-next:not(.carousel-item-start),.active.carousel-item-end{transform:translateX(100%)}.carousel-item-prev:not(.carousel-item-end),.active.carousel-item-start{transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item.active,.carousel-fade .carousel-item-next.carousel-item-start,.carousel-fade .carousel-item-prev.carousel-item-end{z-index:1;opacity:1}.carousel-fade .active.carousel-item-start,.carousel-fade .active.carousel-item-end{z-index:0;opacity:0;transition:opacity 0s .6s}@media (prefers-reduced-motion: reduce){.carousel-fade .active.carousel-item-start,.carousel-fade .active.carousel-item-end{transition:none}}.carousel-control-prev,.carousel-control-next{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;padding:0;color:#fff;text-align:center;background:none;border:0;opacity:.5;transition:opacity 0.15s ease}@media (prefers-reduced-motion: reduce){.carousel-control-prev,.carousel-control-next{transition:none}}.carousel-control-prev:hover,.carousel-control-prev:focus,.carousel-control-next:hover,.carousel-control-next:focus{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-prev-icon,.carousel-control-next-icon{display:inline-block;width:2rem;height:2rem;background-repeat:no-repeat;background-position:50%;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e")}.carousel-control-next-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:2;display:flex;justify-content:center;padding:0;margin-right:15%;margin-bottom:1rem;margin-left:15%;list-style:none}.carousel-indicators [data-bs-target]{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;padding:0;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border:0;border-top:10px solid transparent;border-bottom:10px solid transparent;opacity:.5;transition:opacity 0.6s ease}@media (prefers-reduced-motion: reduce){.carousel-indicators [data-bs-target]{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:1.25rem;left:15%;padding-top:1.25rem;padding-bottom:1.25rem;color:#fff;text-align:center}.carousel-dark .carousel-control-prev-icon,.carousel-dark .carousel-control-next-icon{filter:invert(1) grayscale(100)}.carousel-dark .carousel-indicators [data-bs-target]{background-color:#000}.carousel-dark .carousel-caption{color:#000}@keyframes spinner-border{to{transform:rotate(360deg) /* rtl:ignore */}}.spinner-border{display:inline-block;width:2rem;height:2rem;vertical-align:-.125em;border:.25em solid currentColor;border-right-color:transparent;border-radius:50%;animation:.75s linear infinite spinner-border}.spinner-border-sm{width:1rem;height:1rem;border-width:.2em}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}.spinner-grow{display:inline-block;width:2rem;height:2rem;vertical-align:-.125em;background-color:currentColor;border-radius:50%;opacity:0;animation:.75s linear infinite spinner-grow}.spinner-grow-sm{width:1rem;height:1rem}@media (prefers-reduced-motion: reduce){.spinner-border,.spinner-grow{animation-duration:1.5s}}.offcanvas{position:fixed;bottom:0;z-index:1050;display:flex;flex-direction:column;max-width:100%;visibility:hidden;background-color:#fff;background-clip:padding-box;outline:0;transition:transform .3s ease-in-out}@media (prefers-reduced-motion: reduce){.offcanvas{transition:none}}.offcanvas-header{display:flex;align-items:center;justify-content:space-between;padding:1rem 1rem}.offcanvas-header .btn-close{padding:.5rem .5rem;margin-top:-.5rem;margin-right:-.5rem;margin-bottom:-.5rem}.offcanvas-title{margin-bottom:0;line-height:1.5}.offcanvas-body{flex-grow:1;padding:1rem 1rem;overflow-y:auto}.offcanvas-start{top:0;left:0;width:400px;border-right:1px solid rgba(0,0,0,0.2);transform:translateX(-100%)}.offcanvas-end{top:0;right:0;width:400px;border-left:1px solid rgba(0,0,0,0.2);transform:translateX(100%)}.offcanvas-top{top:0;right:0;left:0;height:30vh;max-height:100%;border-bottom:1px solid rgba(0,0,0,0.2);transform:translateY(-100%)}.offcanvas-bottom{right:0;left:0;height:30vh;max-height:100%;border-top:1px solid rgba(0,0,0,0.2);transform:translateY(100%)}.offcanvas.show{transform:none}.clearfix::after{display:block;clear:both;content:""}.link-primary{color:#c5050c}.link-primary:hover,.link-primary:focus{color:#9e040a}.link-secondary{color:#6c757d}.link-secondary:hover,.link-secondary:focus{color:#565e64}.link-success{color:#198754}.link-success:hover,.link-success:focus{color:#146c43}.link-info{color:#0dcaf0}.link-info:hover,.link-info:focus{color:#3dd5f3}.link-warning{color:#ffc107}.link-warning:hover,.link-warning:focus{color:#ffcd39}.link-danger{color:#dc3545}.link-danger:hover,.link-danger:focus{color:#b02a37}.link-light{color:#f8f9fa}.link-light:hover,.link-light:focus{color:#f9fafb}.link-dark{color:#212529}.link-dark:hover,.link-dark:focus{color:#1a1e21}.ratio{position:relative;width:100%}.ratio::before{display:block;padding-top:var(--bs-aspect-ratio);content:""}.ratio>*{position:absolute;top:0;left:0;width:100%;height:100%}.ratio-1x1{--bs-aspect-ratio: 100%}.ratio-4x3{--bs-aspect-ratio: calc(3 / 4 * 100%)}.ratio-16x9{--bs-aspect-ratio: calc(9 / 16 * 100%)}.ratio-21x9{--bs-aspect-ratio: calc(9 / 21 * 100%)}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}.sticky-top{position:sticky;top:0;z-index:1020}@media (min-width: 576px){.sticky-sm-top{position:sticky;top:0;z-index:1020}}@media (min-width: 768px){.sticky-md-top{position:sticky;top:0;z-index:1020}}@media (min-width: 992px){.sticky-lg-top{position:sticky;top:0;z-index:1020}}@media (min-width: 1200px){.sticky-xl-top{position:sticky;top:0;z-index:1020}}@media (min-width: 1400px){.sticky-xxl-top{position:sticky;top:0;z-index:1020}}.visually-hidden,.visually-hidden-focusable:not(:focus):not(:focus-within){position:absolute !important;width:1px !important;height:1px !important;padding:0 !important;margin:-1px !important;overflow:hidden !important;clip:rect(0, 0, 0, 0) !important;white-space:nowrap !important;border:0 !important}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.align-baseline{vertical-align:baseline !important}.align-top{vertical-align:top !important}.align-middle{vertical-align:middle !important}.align-bottom{vertical-align:bottom !important}.align-text-bottom{vertical-align:text-bottom !important}.align-text-top{vertical-align:text-top !important}.float-start{float:left !important}.float-end{float:right !important}.float-none{float:none !important}.overflow-auto{overflow:auto !important}.overflow-hidden{overflow:hidden !important}.overflow-visible{overflow:visible !important}.overflow-scroll{overflow:scroll !important}.d-inline{display:inline !important}.d-inline-block{display:inline-block !important}.d-block{display:block !important}.d-grid{display:grid !important}.d-table{display:table !important}.d-table-row{display:table-row !important}.d-table-cell{display:table-cell !important}.d-flex{display:flex !important}.d-inline-flex{display:inline-flex !important}.d-none{display:none !important}.shadow{box-shadow:0 0.5rem 1rem rgba(0,0,0,0.15) !important}.shadow-sm{box-shadow:0 0.125rem 0.25rem rgba(0,0,0,0.075) !important}.shadow-lg{box-shadow:0 1rem 3rem rgba(0,0,0,0.175) !important}.shadow-none{box-shadow:none !important}.position-static{position:static !important}.position-relative{position:relative !important}.position-absolute{position:absolute !important}.position-fixed{position:fixed !important}.position-sticky{position:sticky !important}.top-0{top:0 !important}.top-50{top:50% !important}.top-100{top:100% !important}.bottom-0{bottom:0 !important}.bottom-50{bottom:50% !important}.bottom-100{bottom:100% !important}.start-0{left:0 !important}.start-50{left:50% !important}.start-100{left:100% !important}.end-0{right:0 !important}.end-50{right:50% !important}.end-100{right:100% !important}.translate-middle{transform:translate(-50%, -50%) !important}.translate-middle-x{transform:translateX(-50%) !important}.translate-middle-y{transform:translateY(-50%) !important}.border{border:1px solid #dee2e6 !important}.border-0{border:0 !important}.border-top{border-top:1px solid #dee2e6 !important}.border-top-0{border-top:0 !important}.border-end{border-right:1px solid #dee2e6 !important}.border-end-0{border-right:0 !important}.border-bottom{border-bottom:1px solid #dee2e6 !important}.border-bottom-0{border-bottom:0 !important}.border-start{border-left:1px solid #dee2e6 !important}.border-start-0{border-left:0 !important}.border-primary{border-color:#c5050c !important}.border-secondary{border-color:#6c757d !important}.border-success{border-color:#198754 !important}.border-info{border-color:#0dcaf0 !important}.border-warning{border-color:#ffc107 !important}.border-danger{border-color:#dc3545 !important}.border-light{border-color:#f8f9fa !important}.border-dark{border-color:#212529 !important}.border-white{border-color:#fff !important}.border-1{border-width:1px !important}.border-2{border-width:2px !important}.border-3{border-width:3px !important}.border-4{border-width:4px !important}.border-5{border-width:5px !important}.w-25{width:25% !important}.w-50{width:50% !important}.w-75{width:75% !important}.w-100{width:100% !important}.w-auto{width:auto !important}.mw-100{max-width:100% !important}.vw-100{width:100vw !important}.min-vw-100{min-width:100vw !important}.h-25{height:25% !important}.h-50{height:50% !important}.h-75{height:75% !important}.h-100{height:100% !important}.h-auto{height:auto !important}.mh-100{max-height:100% !important}.vh-100{height:100vh !important}.min-vh-100{min-height:100vh !important}.flex-fill{flex:1 1 auto !important}.flex-row{flex-direction:row !important}.flex-column{flex-direction:column !important}.flex-row-reverse{flex-direction:row-reverse !important}.flex-column-reverse{flex-direction:column-reverse !important}.flex-grow-0{flex-grow:0 !important}.flex-grow-1{flex-grow:1 !important}.flex-shrink-0{flex-shrink:0 !important}.flex-shrink-1{flex-shrink:1 !important}.flex-wrap{flex-wrap:wrap !important}.flex-nowrap{flex-wrap:nowrap !important}.flex-wrap-reverse{flex-wrap:wrap-reverse !important}.gap-0{gap:0 !important}.gap-1{gap:.25rem !important}.gap-2{gap:.5rem !important}.gap-3{gap:1rem !important}.gap-4{gap:1.5rem !important}.gap-5{gap:3rem !important}.justify-content-start{justify-content:flex-start !important}.justify-content-end{justify-content:flex-end !important}.justify-content-center{justify-content:center !important}.justify-content-between{justify-content:space-between !important}.justify-content-around{justify-content:space-around !important}.justify-content-evenly{justify-content:space-evenly !important}.align-items-start{align-items:flex-start !important}.align-items-end{align-items:flex-end !important}.align-items-center{align-items:center !important}.align-items-baseline{align-items:baseline !important}.align-items-stretch{align-items:stretch !important}.align-content-start{align-content:flex-start !important}.align-content-end{align-content:flex-end !important}.align-content-center{align-content:center !important}.align-content-between{align-content:space-between !important}.align-content-around{align-content:space-around !important}.align-content-stretch{align-content:stretch !important}.align-self-auto{align-self:auto !important}.align-self-start{align-self:flex-start !important}.align-self-end{align-self:flex-end !important}.align-self-center{align-self:center !important}.align-self-baseline{align-self:baseline !important}.align-self-stretch{align-self:stretch !important}.order-first{order:-1 !important}.order-0{order:0 !important}.order-1{order:1 !important}.order-2{order:2 !important}.order-3{order:3 !important}.order-4{order:4 !important}.order-5{order:5 !important}.order-last{order:6 !important}.m-0{margin:0 !important}.m-1{margin:.25rem !important}.m-2{margin:.5rem !important}.m-3{margin:1rem !important}.m-4{margin:1.5rem !important}.m-5{margin:3rem !important}.m-auto{margin:auto !important}.mx-0{margin-right:0 !important;margin-left:0 !important}.mx-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-3{margin-right:1rem !important;margin-left:1rem !important}.mx-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-5{margin-right:3rem !important;margin-left:3rem !important}.mx-auto{margin-right:auto !important;margin-left:auto !important}.my-0{margin-top:0 !important;margin-bottom:0 !important}.my-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-0{margin-top:0 !important}.mt-1{margin-top:.25rem !important}.mt-2{margin-top:.5rem !important}.mt-3{margin-top:1rem !important}.mt-4{margin-top:1.5rem !important}.mt-5{margin-top:3rem !important}.mt-auto{margin-top:auto !important}.me-0{margin-right:0 !important}.me-1{margin-right:.25rem !important}.me-2{margin-right:.5rem !important}.me-3{margin-right:1rem !important}.me-4{margin-right:1.5rem !important}.me-5{margin-right:3rem !important}.me-auto{margin-right:auto !important}.mb-0{margin-bottom:0 !important}.mb-1{margin-bottom:.25rem !important}.mb-2{margin-bottom:.5rem !important}.mb-3{margin-bottom:1rem !important}.mb-4{margin-bottom:1.5rem !important}.mb-5{margin-bottom:3rem !important}.mb-auto{margin-bottom:auto !important}.ms-0{margin-left:0 !important}.ms-1{margin-left:.25rem !important}.ms-2{margin-left:.5rem !important}.ms-3{margin-left:1rem !important}.ms-4{margin-left:1.5rem !important}.ms-5{margin-left:3rem !important}.ms-auto{margin-left:auto !important}.p-0{padding:0 !important}.p-1{padding:.25rem !important}.p-2{padding:.5rem !important}.p-3{padding:1rem !important}.p-4{padding:1.5rem !important}.p-5{padding:3rem !important}.px-0{padding-right:0 !important;padding-left:0 !important}.px-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-3{padding-right:1rem !important;padding-left:1rem !important}.px-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-5{padding-right:3rem !important;padding-left:3rem !important}.py-0{padding-top:0 !important;padding-bottom:0 !important}.py-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-0{padding-top:0 !important}.pt-1{padding-top:.25rem !important}.pt-2{padding-top:.5rem !important}.pt-3{padding-top:1rem !important}.pt-4{padding-top:1.5rem !important}.pt-5{padding-top:3rem !important}.pe-0{padding-right:0 !important}.pe-1{padding-right:.25rem !important}.pe-2{padding-right:.5rem !important}.pe-3{padding-right:1rem !important}.pe-4{padding-right:1.5rem !important}.pe-5{padding-right:3rem !important}.pb-0{padding-bottom:0 !important}.pb-1{padding-bottom:.25rem !important}.pb-2{padding-bottom:.5rem !important}.pb-3{padding-bottom:1rem !important}.pb-4{padding-bottom:1.5rem !important}.pb-5{padding-bottom:3rem !important}.ps-0{padding-left:0 !important}.ps-1{padding-left:.25rem !important}.ps-2{padding-left:.5rem !important}.ps-3{padding-left:1rem !important}.ps-4{padding-left:1.5rem !important}.ps-5{padding-left:3rem !important}.font-monospace{font-family:var(--bs-font-monospace) !important}.fs-1{font-size:calc(1.375rem + 1.5vw) !important}.fs-2{font-size:calc(1.325rem + .9vw) !important}.fs-3{font-size:calc(1.3rem + .6vw) !important}.fs-4{font-size:calc(1.275rem + .3vw) !important}.fs-5{font-size:1.25rem !important}.fs-6{font-size:1rem !important}.fst-italic{font-style:italic !important}.fst-normal{font-style:normal !important}.fw-light{font-weight:300 !important}.fw-lighter{font-weight:lighter !important}.fw-normal{font-weight:400 !important}.fw-bold{font-weight:700 !important}.fw-bolder{font-weight:bolder !important}.lh-1{line-height:1 !important}.lh-sm{line-height:1.25 !important}.lh-base{line-height:1.5 !important}.lh-lg{line-height:2 !important}.text-start{text-align:left !important}.text-end{text-align:right !important}.text-center{text-align:center !important}.text-decoration-none{text-decoration:none !important}.text-decoration-underline{text-decoration:underline !important}.text-decoration-line-through{text-decoration:line-through !important}.text-lowercase{text-transform:lowercase !important}.text-uppercase{text-transform:uppercase !important}.text-capitalize{text-transform:capitalize !important}.text-wrap{white-space:normal !important}.text-nowrap{white-space:nowrap !important}.text-break{word-wrap:break-word !important;word-break:break-word !important}.text-primary{color:#c5050c !important}.text-secondary{color:#6c757d !important}.text-success{color:#198754 !important}.text-info{color:#0dcaf0 !important}.text-warning{color:#ffc107 !important}.text-danger{color:#dc3545 !important}.text-light{color:#f8f9fa !important}.text-dark{color:#212529 !important}.text-white{color:#fff !important}.text-body{color:#212529 !important}.text-muted{color:#6c757d !important}.text-black-50{color:rgba(0,0,0,0.5) !important}.text-white-50{color:rgba(255,255,255,0.5) !important}.text-reset{color:inherit !important}.bg-primary{background-color:#c5050c !important}.bg-secondary{background-color:#6c757d !important}.bg-success{background-color:#198754 !important}.bg-info{background-color:#0dcaf0 !important}.bg-warning{background-color:#ffc107 !important}.bg-danger{background-color:#dc3545 !important}.bg-light{background-color:#f8f9fa !important}.bg-dark{background-color:#212529 !important}.bg-body{background-color:#fff !important}.bg-white{background-color:#fff !important}.bg-transparent{background-color:rgba(0,0,0,0) !important}.bg-gradient{background-image:var(--bs-gradient) !important}.user-select-all{user-select:all !important}.user-select-auto{user-select:auto !important}.user-select-none{user-select:none !important}.pe-none{pointer-events:none !important}.pe-auto{pointer-events:auto !important}.rounded{border-radius:.25rem !important}.rounded-0{border-radius:0 !important}.rounded-1{border-radius:.2rem !important}.rounded-2{border-radius:.25rem !important}.rounded-3{border-radius:.3rem !important}.rounded-circle{border-radius:50% !important}.rounded-pill{border-radius:50rem !important}.rounded-top{border-top-left-radius:.25rem !important;border-top-right-radius:.25rem !important}.rounded-end{border-top-right-radius:.25rem !important;border-bottom-right-radius:.25rem !important}.rounded-bottom{border-bottom-right-radius:.25rem !important;border-bottom-left-radius:.25rem !important}.rounded-start{border-bottom-left-radius:.25rem !important;border-top-left-radius:.25rem !important}.visible{visibility:visible !important}.invisible{visibility:hidden !important}@media (min-width: 576px){.float-sm-start{float:left !important}.float-sm-end{float:right !important}.float-sm-none{float:none !important}.d-sm-inline{display:inline !important}.d-sm-inline-block{display:inline-block !important}.d-sm-block{display:block !important}.d-sm-grid{display:grid !important}.d-sm-table{display:table !important}.d-sm-table-row{display:table-row !important}.d-sm-table-cell{display:table-cell !important}.d-sm-flex{display:flex !important}.d-sm-inline-flex{display:inline-flex !important}.d-sm-none{display:none !important}.flex-sm-fill{flex:1 1 auto !important}.flex-sm-row{flex-direction:row !important}.flex-sm-column{flex-direction:column !important}.flex-sm-row-reverse{flex-direction:row-reverse !important}.flex-sm-column-reverse{flex-direction:column-reverse !important}.flex-sm-grow-0{flex-grow:0 !important}.flex-sm-grow-1{flex-grow:1 !important}.flex-sm-shrink-0{flex-shrink:0 !important}.flex-sm-shrink-1{flex-shrink:1 !important}.flex-sm-wrap{flex-wrap:wrap !important}.flex-sm-nowrap{flex-wrap:nowrap !important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse !important}.gap-sm-0{gap:0 !important}.gap-sm-1{gap:.25rem !important}.gap-sm-2{gap:.5rem !important}.gap-sm-3{gap:1rem !important}.gap-sm-4{gap:1.5rem !important}.gap-sm-5{gap:3rem !important}.justify-content-sm-start{justify-content:flex-start !important}.justify-content-sm-end{justify-content:flex-end !important}.justify-content-sm-center{justify-content:center !important}.justify-content-sm-between{justify-content:space-between !important}.justify-content-sm-around{justify-content:space-around !important}.justify-content-sm-evenly{justify-content:space-evenly !important}.align-items-sm-start{align-items:flex-start !important}.align-items-sm-end{align-items:flex-end !important}.align-items-sm-center{align-items:center !important}.align-items-sm-baseline{align-items:baseline !important}.align-items-sm-stretch{align-items:stretch !important}.align-content-sm-start{align-content:flex-start !important}.align-content-sm-end{align-content:flex-end !important}.align-content-sm-center{align-content:center !important}.align-content-sm-between{align-content:space-between !important}.align-content-sm-around{align-content:space-around !important}.align-content-sm-stretch{align-content:stretch !important}.align-self-sm-auto{align-self:auto !important}.align-self-sm-start{align-self:flex-start !important}.align-self-sm-end{align-self:flex-end !important}.align-self-sm-center{align-self:center !important}.align-self-sm-baseline{align-self:baseline !important}.align-self-sm-stretch{align-self:stretch !important}.order-sm-first{order:-1 !important}.order-sm-0{order:0 !important}.order-sm-1{order:1 !important}.order-sm-2{order:2 !important}.order-sm-3{order:3 !important}.order-sm-4{order:4 !important}.order-sm-5{order:5 !important}.order-sm-last{order:6 !important}.m-sm-0{margin:0 !important}.m-sm-1{margin:.25rem !important}.m-sm-2{margin:.5rem !important}.m-sm-3{margin:1rem !important}.m-sm-4{margin:1.5rem !important}.m-sm-5{margin:3rem !important}.m-sm-auto{margin:auto !important}.mx-sm-0{margin-right:0 !important;margin-left:0 !important}.mx-sm-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-sm-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-sm-3{margin-right:1rem !important;margin-left:1rem !important}.mx-sm-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-sm-5{margin-right:3rem !important;margin-left:3rem !important}.mx-sm-auto{margin-right:auto !important;margin-left:auto !important}.my-sm-0{margin-top:0 !important;margin-bottom:0 !important}.my-sm-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-sm-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-sm-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-sm-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-sm-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-sm-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-sm-0{margin-top:0 !important}.mt-sm-1{margin-top:.25rem !important}.mt-sm-2{margin-top:.5rem !important}.mt-sm-3{margin-top:1rem !important}.mt-sm-4{margin-top:1.5rem !important}.mt-sm-5{margin-top:3rem !important}.mt-sm-auto{margin-top:auto !important}.me-sm-0{margin-right:0 !important}.me-sm-1{margin-right:.25rem !important}.me-sm-2{margin-right:.5rem !important}.me-sm-3{margin-right:1rem !important}.me-sm-4{margin-right:1.5rem !important}.me-sm-5{margin-right:3rem !important}.me-sm-auto{margin-right:auto !important}.mb-sm-0{margin-bottom:0 !important}.mb-sm-1{margin-bottom:.25rem !important}.mb-sm-2{margin-bottom:.5rem !important}.mb-sm-3{margin-bottom:1rem !important}.mb-sm-4{margin-bottom:1.5rem !important}.mb-sm-5{margin-bottom:3rem !important}.mb-sm-auto{margin-bottom:auto !important}.ms-sm-0{margin-left:0 !important}.ms-sm-1{margin-left:.25rem !important}.ms-sm-2{margin-left:.5rem !important}.ms-sm-3{margin-left:1rem !important}.ms-sm-4{margin-left:1.5rem !important}.ms-sm-5{margin-left:3rem !important}.ms-sm-auto{margin-left:auto !important}.p-sm-0{padding:0 !important}.p-sm-1{padding:.25rem !important}.p-sm-2{padding:.5rem !important}.p-sm-3{padding:1rem !important}.p-sm-4{padding:1.5rem !important}.p-sm-5{padding:3rem !important}.px-sm-0{padding-right:0 !important;padding-left:0 !important}.px-sm-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-sm-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-sm-3{padding-right:1rem !important;padding-left:1rem !important}.px-sm-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-sm-5{padding-right:3rem !important;padding-left:3rem !important}.py-sm-0{padding-top:0 !important;padding-bottom:0 !important}.py-sm-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-sm-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-sm-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-sm-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-sm-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-sm-0{padding-top:0 !important}.pt-sm-1{padding-top:.25rem !important}.pt-sm-2{padding-top:.5rem !important}.pt-sm-3{padding-top:1rem !important}.pt-sm-4{padding-top:1.5rem !important}.pt-sm-5{padding-top:3rem !important}.pe-sm-0{padding-right:0 !important}.pe-sm-1{padding-right:.25rem !important}.pe-sm-2{padding-right:.5rem !important}.pe-sm-3{padding-right:1rem !important}.pe-sm-4{padding-right:1.5rem !important}.pe-sm-5{padding-right:3rem !important}.pb-sm-0{padding-bottom:0 !important}.pb-sm-1{padding-bottom:.25rem !important}.pb-sm-2{padding-bottom:.5rem !important}.pb-sm-3{padding-bottom:1rem !important}.pb-sm-4{padding-bottom:1.5rem !important}.pb-sm-5{padding-bottom:3rem !important}.ps-sm-0{padding-left:0 !important}.ps-sm-1{padding-left:.25rem !important}.ps-sm-2{padding-left:.5rem !important}.ps-sm-3{padding-left:1rem !important}.ps-sm-4{padding-left:1.5rem !important}.ps-sm-5{padding-left:3rem !important}.text-sm-start{text-align:left !important}.text-sm-end{text-align:right !important}.text-sm-center{text-align:center !important}}@media (min-width: 768px){.float-md-start{float:left !important}.float-md-end{float:right !important}.float-md-none{float:none !important}.d-md-inline{display:inline !important}.d-md-inline-block{display:inline-block !important}.d-md-block{display:block !important}.d-md-grid{display:grid !important}.d-md-table{display:table !important}.d-md-table-row{display:table-row !important}.d-md-table-cell{display:table-cell !important}.d-md-flex{display:flex !important}.d-md-inline-flex{display:inline-flex !important}.d-md-none{display:none !important}.flex-md-fill{flex:1 1 auto !important}.flex-md-row{flex-direction:row !important}.flex-md-column{flex-direction:column !important}.flex-md-row-reverse{flex-direction:row-reverse !important}.flex-md-column-reverse{flex-direction:column-reverse !important}.flex-md-grow-0{flex-grow:0 !important}.flex-md-grow-1{flex-grow:1 !important}.flex-md-shrink-0{flex-shrink:0 !important}.flex-md-shrink-1{flex-shrink:1 !important}.flex-md-wrap{flex-wrap:wrap !important}.flex-md-nowrap{flex-wrap:nowrap !important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse !important}.gap-md-0{gap:0 !important}.gap-md-1{gap:.25rem !important}.gap-md-2{gap:.5rem !important}.gap-md-3{gap:1rem !important}.gap-md-4{gap:1.5rem !important}.gap-md-5{gap:3rem !important}.justify-content-md-start{justify-content:flex-start !important}.justify-content-md-end{justify-content:flex-end !important}.justify-content-md-center{justify-content:center !important}.justify-content-md-between{justify-content:space-between !important}.justify-content-md-around{justify-content:space-around !important}.justify-content-md-evenly{justify-content:space-evenly !important}.align-items-md-start{align-items:flex-start !important}.align-items-md-end{align-items:flex-end !important}.align-items-md-center{align-items:center !important}.align-items-md-baseline{align-items:baseline !important}.align-items-md-stretch{align-items:stretch !important}.align-content-md-start{align-content:flex-start !important}.align-content-md-end{align-content:flex-end !important}.align-content-md-center{align-content:center !important}.align-content-md-between{align-content:space-between !important}.align-content-md-around{align-content:space-around !important}.align-content-md-stretch{align-content:stretch !important}.align-self-md-auto{align-self:auto !important}.align-self-md-start{align-self:flex-start !important}.align-self-md-end{align-self:flex-end !important}.align-self-md-center{align-self:center !important}.align-self-md-baseline{align-self:baseline !important}.align-self-md-stretch{align-self:stretch !important}.order-md-first{order:-1 !important}.order-md-0{order:0 !important}.order-md-1{order:1 !important}.order-md-2{order:2 !important}.order-md-3{order:3 !important}.order-md-4{order:4 !important}.order-md-5{order:5 !important}.order-md-last{order:6 !important}.m-md-0{margin:0 !important}.m-md-1{margin:.25rem !important}.m-md-2{margin:.5rem !important}.m-md-3{margin:1rem !important}.m-md-4{margin:1.5rem !important}.m-md-5{margin:3rem !important}.m-md-auto{margin:auto !important}.mx-md-0{margin-right:0 !important;margin-left:0 !important}.mx-md-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-md-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-md-3{margin-right:1rem !important;margin-left:1rem !important}.mx-md-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-md-5{margin-right:3rem !important;margin-left:3rem !important}.mx-md-auto{margin-right:auto !important;margin-left:auto !important}.my-md-0{margin-top:0 !important;margin-bottom:0 !important}.my-md-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-md-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-md-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-md-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-md-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-md-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-md-0{margin-top:0 !important}.mt-md-1{margin-top:.25rem !important}.mt-md-2{margin-top:.5rem !important}.mt-md-3{margin-top:1rem !important}.mt-md-4{margin-top:1.5rem !important}.mt-md-5{margin-top:3rem !important}.mt-md-auto{margin-top:auto !important}.me-md-0{margin-right:0 !important}.me-md-1{margin-right:.25rem !important}.me-md-2{margin-right:.5rem !important}.me-md-3{margin-right:1rem !important}.me-md-4{margin-right:1.5rem !important}.me-md-5{margin-right:3rem !important}.me-md-auto{margin-right:auto !important}.mb-md-0{margin-bottom:0 !important}.mb-md-1{margin-bottom:.25rem !important}.mb-md-2{margin-bottom:.5rem !important}.mb-md-3{margin-bottom:1rem !important}.mb-md-4{margin-bottom:1.5rem !important}.mb-md-5{margin-bottom:3rem !important}.mb-md-auto{margin-bottom:auto !important}.ms-md-0{margin-left:0 !important}.ms-md-1{margin-left:.25rem !important}.ms-md-2{margin-left:.5rem !important}.ms-md-3{margin-left:1rem !important}.ms-md-4{margin-left:1.5rem !important}.ms-md-5{margin-left:3rem !important}.ms-md-auto{margin-left:auto !important}.p-md-0{padding:0 !important}.p-md-1{padding:.25rem !important}.p-md-2{padding:.5rem !important}.p-md-3{padding:1rem !important}.p-md-4{padding:1.5rem !important}.p-md-5{padding:3rem !important}.px-md-0{padding-right:0 !important;padding-left:0 !important}.px-md-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-md-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-md-3{padding-right:1rem !important;padding-left:1rem !important}.px-md-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-md-5{padding-right:3rem !important;padding-left:3rem !important}.py-md-0{padding-top:0 !important;padding-bottom:0 !important}.py-md-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-md-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-md-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-md-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-md-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-md-0{padding-top:0 !important}.pt-md-1{padding-top:.25rem !important}.pt-md-2{padding-top:.5rem !important}.pt-md-3{padding-top:1rem !important}.pt-md-4{padding-top:1.5rem !important}.pt-md-5{padding-top:3rem !important}.pe-md-0{padding-right:0 !important}.pe-md-1{padding-right:.25rem !important}.pe-md-2{padding-right:.5rem !important}.pe-md-3{padding-right:1rem !important}.pe-md-4{padding-right:1.5rem !important}.pe-md-5{padding-right:3rem !important}.pb-md-0{padding-bottom:0 !important}.pb-md-1{padding-bottom:.25rem !important}.pb-md-2{padding-bottom:.5rem !important}.pb-md-3{padding-bottom:1rem !important}.pb-md-4{padding-bottom:1.5rem !important}.pb-md-5{padding-bottom:3rem !important}.ps-md-0{padding-left:0 !important}.ps-md-1{padding-left:.25rem !important}.ps-md-2{padding-left:.5rem !important}.ps-md-3{padding-left:1rem !important}.ps-md-4{padding-left:1.5rem !important}.ps-md-5{padding-left:3rem !important}.text-md-start{text-align:left !important}.text-md-end{text-align:right !important}.text-md-center{text-align:center !important}}@media (min-width: 992px){.float-lg-start{float:left !important}.float-lg-end{float:right !important}.float-lg-none{float:none !important}.d-lg-inline{display:inline !important}.d-lg-inline-block{display:inline-block !important}.d-lg-block{display:block !important}.d-lg-grid{display:grid !important}.d-lg-table{display:table !important}.d-lg-table-row{display:table-row !important}.d-lg-table-cell{display:table-cell !important}.d-lg-flex{display:flex !important}.d-lg-inline-flex{display:inline-flex !important}.d-lg-none{display:none !important}.flex-lg-fill{flex:1 1 auto !important}.flex-lg-row{flex-direction:row !important}.flex-lg-column{flex-direction:column !important}.flex-lg-row-reverse{flex-direction:row-reverse !important}.flex-lg-column-reverse{flex-direction:column-reverse !important}.flex-lg-grow-0{flex-grow:0 !important}.flex-lg-grow-1{flex-grow:1 !important}.flex-lg-shrink-0{flex-shrink:0 !important}.flex-lg-shrink-1{flex-shrink:1 !important}.flex-lg-wrap{flex-wrap:wrap !important}.flex-lg-nowrap{flex-wrap:nowrap !important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse !important}.gap-lg-0{gap:0 !important}.gap-lg-1{gap:.25rem !important}.gap-lg-2{gap:.5rem !important}.gap-lg-3{gap:1rem !important}.gap-lg-4{gap:1.5rem !important}.gap-lg-5{gap:3rem !important}.justify-content-lg-start{justify-content:flex-start !important}.justify-content-lg-end{justify-content:flex-end !important}.justify-content-lg-center{justify-content:center !important}.justify-content-lg-between{justify-content:space-between !important}.justify-content-lg-around{justify-content:space-around !important}.justify-content-lg-evenly{justify-content:space-evenly !important}.align-items-lg-start{align-items:flex-start !important}.align-items-lg-end{align-items:flex-end !important}.align-items-lg-center{align-items:center !important}.align-items-lg-baseline{align-items:baseline !important}.align-items-lg-stretch{align-items:stretch !important}.align-content-lg-start{align-content:flex-start !important}.align-content-lg-end{align-content:flex-end !important}.align-content-lg-center{align-content:center !important}.align-content-lg-between{align-content:space-between !important}.align-content-lg-around{align-content:space-around !important}.align-content-lg-stretch{align-content:stretch !important}.align-self-lg-auto{align-self:auto !important}.align-self-lg-start{align-self:flex-start !important}.align-self-lg-end{align-self:flex-end !important}.align-self-lg-center{align-self:center !important}.align-self-lg-baseline{align-self:baseline !important}.align-self-lg-stretch{align-self:stretch !important}.order-lg-first{order:-1 !important}.order-lg-0{order:0 !important}.order-lg-1{order:1 !important}.order-lg-2{order:2 !important}.order-lg-3{order:3 !important}.order-lg-4{order:4 !important}.order-lg-5{order:5 !important}.order-lg-last{order:6 !important}.m-lg-0{margin:0 !important}.m-lg-1{margin:.25rem !important}.m-lg-2{margin:.5rem !important}.m-lg-3{margin:1rem !important}.m-lg-4{margin:1.5rem !important}.m-lg-5{margin:3rem !important}.m-lg-auto{margin:auto !important}.mx-lg-0{margin-right:0 !important;margin-left:0 !important}.mx-lg-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-lg-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-lg-3{margin-right:1rem !important;margin-left:1rem !important}.mx-lg-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-lg-5{margin-right:3rem !important;margin-left:3rem !important}.mx-lg-auto{margin-right:auto !important;margin-left:auto !important}.my-lg-0{margin-top:0 !important;margin-bottom:0 !important}.my-lg-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-lg-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-lg-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-lg-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-lg-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-lg-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-lg-0{margin-top:0 !important}.mt-lg-1{margin-top:.25rem !important}.mt-lg-2{margin-top:.5rem !important}.mt-lg-3{margin-top:1rem !important}.mt-lg-4{margin-top:1.5rem !important}.mt-lg-5{margin-top:3rem !important}.mt-lg-auto{margin-top:auto !important}.me-lg-0{margin-right:0 !important}.me-lg-1{margin-right:.25rem !important}.me-lg-2{margin-right:.5rem !important}.me-lg-3{margin-right:1rem !important}.me-lg-4{margin-right:1.5rem !important}.me-lg-5{margin-right:3rem !important}.me-lg-auto{margin-right:auto !important}.mb-lg-0{margin-bottom:0 !important}.mb-lg-1{margin-bottom:.25rem !important}.mb-lg-2{margin-bottom:.5rem !important}.mb-lg-3{margin-bottom:1rem !important}.mb-lg-4{margin-bottom:1.5rem !important}.mb-lg-5{margin-bottom:3rem !important}.mb-lg-auto{margin-bottom:auto !important}.ms-lg-0{margin-left:0 !important}.ms-lg-1{margin-left:.25rem !important}.ms-lg-2{margin-left:.5rem !important}.ms-lg-3{margin-left:1rem !important}.ms-lg-4{margin-left:1.5rem !important}.ms-lg-5{margin-left:3rem !important}.ms-lg-auto{margin-left:auto !important}.p-lg-0{padding:0 !important}.p-lg-1{padding:.25rem !important}.p-lg-2{padding:.5rem !important}.p-lg-3{padding:1rem !important}.p-lg-4{padding:1.5rem !important}.p-lg-5{padding:3rem !important}.px-lg-0{padding-right:0 !important;padding-left:0 !important}.px-lg-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-lg-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-lg-3{padding-right:1rem !important;padding-left:1rem !important}.px-lg-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-lg-5{padding-right:3rem !important;padding-left:3rem !important}.py-lg-0{padding-top:0 !important;padding-bottom:0 !important}.py-lg-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-lg-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-lg-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-lg-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-lg-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-lg-0{padding-top:0 !important}.pt-lg-1{padding-top:.25rem !important}.pt-lg-2{padding-top:.5rem !important}.pt-lg-3{padding-top:1rem !important}.pt-lg-4{padding-top:1.5rem !important}.pt-lg-5{padding-top:3rem !important}.pe-lg-0{padding-right:0 !important}.pe-lg-1{padding-right:.25rem !important}.pe-lg-2{padding-right:.5rem !important}.pe-lg-3{padding-right:1rem !important}.pe-lg-4{padding-right:1.5rem !important}.pe-lg-5{padding-right:3rem !important}.pb-lg-0{padding-bottom:0 !important}.pb-lg-1{padding-bottom:.25rem !important}.pb-lg-2{padding-bottom:.5rem !important}.pb-lg-3{padding-bottom:1rem !important}.pb-lg-4{padding-bottom:1.5rem !important}.pb-lg-5{padding-bottom:3rem !important}.ps-lg-0{padding-left:0 !important}.ps-lg-1{padding-left:.25rem !important}.ps-lg-2{padding-left:.5rem !important}.ps-lg-3{padding-left:1rem !important}.ps-lg-4{padding-left:1.5rem !important}.ps-lg-5{padding-left:3rem !important}.text-lg-start{text-align:left !important}.text-lg-end{text-align:right !important}.text-lg-center{text-align:center !important}}@media (min-width: 1200px){.float-xl-start{float:left !important}.float-xl-end{float:right !important}.float-xl-none{float:none !important}.d-xl-inline{display:inline !important}.d-xl-inline-block{display:inline-block !important}.d-xl-block{display:block !important}.d-xl-grid{display:grid !important}.d-xl-table{display:table !important}.d-xl-table-row{display:table-row !important}.d-xl-table-cell{display:table-cell !important}.d-xl-flex{display:flex !important}.d-xl-inline-flex{display:inline-flex !important}.d-xl-none{display:none !important}.flex-xl-fill{flex:1 1 auto !important}.flex-xl-row{flex-direction:row !important}.flex-xl-column{flex-direction:column !important}.flex-xl-row-reverse{flex-direction:row-reverse !important}.flex-xl-column-reverse{flex-direction:column-reverse !important}.flex-xl-grow-0{flex-grow:0 !important}.flex-xl-grow-1{flex-grow:1 !important}.flex-xl-shrink-0{flex-shrink:0 !important}.flex-xl-shrink-1{flex-shrink:1 !important}.flex-xl-wrap{flex-wrap:wrap !important}.flex-xl-nowrap{flex-wrap:nowrap !important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse !important}.gap-xl-0{gap:0 !important}.gap-xl-1{gap:.25rem !important}.gap-xl-2{gap:.5rem !important}.gap-xl-3{gap:1rem !important}.gap-xl-4{gap:1.5rem !important}.gap-xl-5{gap:3rem !important}.justify-content-xl-start{justify-content:flex-start !important}.justify-content-xl-end{justify-content:flex-end !important}.justify-content-xl-center{justify-content:center !important}.justify-content-xl-between{justify-content:space-between !important}.justify-content-xl-around{justify-content:space-around !important}.justify-content-xl-evenly{justify-content:space-evenly !important}.align-items-xl-start{align-items:flex-start !important}.align-items-xl-end{align-items:flex-end !important}.align-items-xl-center{align-items:center !important}.align-items-xl-baseline{align-items:baseline !important}.align-items-xl-stretch{align-items:stretch !important}.align-content-xl-start{align-content:flex-start !important}.align-content-xl-end{align-content:flex-end !important}.align-content-xl-center{align-content:center !important}.align-content-xl-between{align-content:space-between !important}.align-content-xl-around{align-content:space-around !important}.align-content-xl-stretch{align-content:stretch !important}.align-self-xl-auto{align-self:auto !important}.align-self-xl-start{align-self:flex-start !important}.align-self-xl-end{align-self:flex-end !important}.align-self-xl-center{align-self:center !important}.align-self-xl-baseline{align-self:baseline !important}.align-self-xl-stretch{align-self:stretch !important}.order-xl-first{order:-1 !important}.order-xl-0{order:0 !important}.order-xl-1{order:1 !important}.order-xl-2{order:2 !important}.order-xl-3{order:3 !important}.order-xl-4{order:4 !important}.order-xl-5{order:5 !important}.order-xl-last{order:6 !important}.m-xl-0{margin:0 !important}.m-xl-1{margin:.25rem !important}.m-xl-2{margin:.5rem !important}.m-xl-3{margin:1rem !important}.m-xl-4{margin:1.5rem !important}.m-xl-5{margin:3rem !important}.m-xl-auto{margin:auto !important}.mx-xl-0{margin-right:0 !important;margin-left:0 !important}.mx-xl-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-xl-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-xl-3{margin-right:1rem !important;margin-left:1rem !important}.mx-xl-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-xl-5{margin-right:3rem !important;margin-left:3rem !important}.mx-xl-auto{margin-right:auto !important;margin-left:auto !important}.my-xl-0{margin-top:0 !important;margin-bottom:0 !important}.my-xl-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-xl-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-xl-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-xl-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-xl-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-xl-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-xl-0{margin-top:0 !important}.mt-xl-1{margin-top:.25rem !important}.mt-xl-2{margin-top:.5rem !important}.mt-xl-3{margin-top:1rem !important}.mt-xl-4{margin-top:1.5rem !important}.mt-xl-5{margin-top:3rem !important}.mt-xl-auto{margin-top:auto !important}.me-xl-0{margin-right:0 !important}.me-xl-1{margin-right:.25rem !important}.me-xl-2{margin-right:.5rem !important}.me-xl-3{margin-right:1rem !important}.me-xl-4{margin-right:1.5rem !important}.me-xl-5{margin-right:3rem !important}.me-xl-auto{margin-right:auto !important}.mb-xl-0{margin-bottom:0 !important}.mb-xl-1{margin-bottom:.25rem !important}.mb-xl-2{margin-bottom:.5rem !important}.mb-xl-3{margin-bottom:1rem !important}.mb-xl-4{margin-bottom:1.5rem !important}.mb-xl-5{margin-bottom:3rem !important}.mb-xl-auto{margin-bottom:auto !important}.ms-xl-0{margin-left:0 !important}.ms-xl-1{margin-left:.25rem !important}.ms-xl-2{margin-left:.5rem !important}.ms-xl-3{margin-left:1rem !important}.ms-xl-4{margin-left:1.5rem !important}.ms-xl-5{margin-left:3rem !important}.ms-xl-auto{margin-left:auto !important}.p-xl-0{padding:0 !important}.p-xl-1{padding:.25rem !important}.p-xl-2{padding:.5rem !important}.p-xl-3{padding:1rem !important}.p-xl-4{padding:1.5rem !important}.p-xl-5{padding:3rem !important}.px-xl-0{padding-right:0 !important;padding-left:0 !important}.px-xl-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-xl-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-xl-3{padding-right:1rem !important;padding-left:1rem !important}.px-xl-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-xl-5{padding-right:3rem !important;padding-left:3rem !important}.py-xl-0{padding-top:0 !important;padding-bottom:0 !important}.py-xl-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-xl-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-xl-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-xl-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-xl-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-xl-0{padding-top:0 !important}.pt-xl-1{padding-top:.25rem !important}.pt-xl-2{padding-top:.5rem !important}.pt-xl-3{padding-top:1rem !important}.pt-xl-4{padding-top:1.5rem !important}.pt-xl-5{padding-top:3rem !important}.pe-xl-0{padding-right:0 !important}.pe-xl-1{padding-right:.25rem !important}.pe-xl-2{padding-right:.5rem !important}.pe-xl-3{padding-right:1rem !important}.pe-xl-4{padding-right:1.5rem !important}.pe-xl-5{padding-right:3rem !important}.pb-xl-0{padding-bottom:0 !important}.pb-xl-1{padding-bottom:.25rem !important}.pb-xl-2{padding-bottom:.5rem !important}.pb-xl-3{padding-bottom:1rem !important}.pb-xl-4{padding-bottom:1.5rem !important}.pb-xl-5{padding-bottom:3rem !important}.ps-xl-0{padding-left:0 !important}.ps-xl-1{padding-left:.25rem !important}.ps-xl-2{padding-left:.5rem !important}.ps-xl-3{padding-left:1rem !important}.ps-xl-4{padding-left:1.5rem !important}.ps-xl-5{padding-left:3rem !important}.text-xl-start{text-align:left !important}.text-xl-end{text-align:right !important}.text-xl-center{text-align:center !important}}@media (min-width: 1400px){.float-xxl-start{float:left !important}.float-xxl-end{float:right !important}.float-xxl-none{float:none !important}.d-xxl-inline{display:inline !important}.d-xxl-inline-block{display:inline-block !important}.d-xxl-block{display:block !important}.d-xxl-grid{display:grid !important}.d-xxl-table{display:table !important}.d-xxl-table-row{display:table-row !important}.d-xxl-table-cell{display:table-cell !important}.d-xxl-flex{display:flex !important}.d-xxl-inline-flex{display:inline-flex !important}.d-xxl-none{display:none !important}.flex-xxl-fill{flex:1 1 auto !important}.flex-xxl-row{flex-direction:row !important}.flex-xxl-column{flex-direction:column !important}.flex-xxl-row-reverse{flex-direction:row-reverse !important}.flex-xxl-column-reverse{flex-direction:column-reverse !important}.flex-xxl-grow-0{flex-grow:0 !important}.flex-xxl-grow-1{flex-grow:1 !important}.flex-xxl-shrink-0{flex-shrink:0 !important}.flex-xxl-shrink-1{flex-shrink:1 !important}.flex-xxl-wrap{flex-wrap:wrap !important}.flex-xxl-nowrap{flex-wrap:nowrap !important}.flex-xxl-wrap-reverse{flex-wrap:wrap-reverse !important}.gap-xxl-0{gap:0 !important}.gap-xxl-1{gap:.25rem !important}.gap-xxl-2{gap:.5rem !important}.gap-xxl-3{gap:1rem !important}.gap-xxl-4{gap:1.5rem !important}.gap-xxl-5{gap:3rem !important}.justify-content-xxl-start{justify-content:flex-start !important}.justify-content-xxl-end{justify-content:flex-end !important}.justify-content-xxl-center{justify-content:center !important}.justify-content-xxl-between{justify-content:space-between !important}.justify-content-xxl-around{justify-content:space-around !important}.justify-content-xxl-evenly{justify-content:space-evenly !important}.align-items-xxl-start{align-items:flex-start !important}.align-items-xxl-end{align-items:flex-end !important}.align-items-xxl-center{align-items:center !important}.align-items-xxl-baseline{align-items:baseline !important}.align-items-xxl-stretch{align-items:stretch !important}.align-content-xxl-start{align-content:flex-start !important}.align-content-xxl-end{align-content:flex-end !important}.align-content-xxl-center{align-content:center !important}.align-content-xxl-between{align-content:space-between !important}.align-content-xxl-around{align-content:space-around !important}.align-content-xxl-stretch{align-content:stretch !important}.align-self-xxl-auto{align-self:auto !important}.align-self-xxl-start{align-self:flex-start !important}.align-self-xxl-end{align-self:flex-end !important}.align-self-xxl-center{align-self:center !important}.align-self-xxl-baseline{align-self:baseline !important}.align-self-xxl-stretch{align-self:stretch !important}.order-xxl-first{order:-1 !important}.order-xxl-0{order:0 !important}.order-xxl-1{order:1 !important}.order-xxl-2{order:2 !important}.order-xxl-3{order:3 !important}.order-xxl-4{order:4 !important}.order-xxl-5{order:5 !important}.order-xxl-last{order:6 !important}.m-xxl-0{margin:0 !important}.m-xxl-1{margin:.25rem !important}.m-xxl-2{margin:.5rem !important}.m-xxl-3{margin:1rem !important}.m-xxl-4{margin:1.5rem !important}.m-xxl-5{margin:3rem !important}.m-xxl-auto{margin:auto !important}.mx-xxl-0{margin-right:0 !important;margin-left:0 !important}.mx-xxl-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-xxl-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-xxl-3{margin-right:1rem !important;margin-left:1rem !important}.mx-xxl-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-xxl-5{margin-right:3rem !important;margin-left:3rem !important}.mx-xxl-auto{margin-right:auto !important;margin-left:auto !important}.my-xxl-0{margin-top:0 !important;margin-bottom:0 !important}.my-xxl-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-xxl-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-xxl-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-xxl-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-xxl-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-xxl-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-xxl-0{margin-top:0 !important}.mt-xxl-1{margin-top:.25rem !important}.mt-xxl-2{margin-top:.5rem !important}.mt-xxl-3{margin-top:1rem !important}.mt-xxl-4{margin-top:1.5rem !important}.mt-xxl-5{margin-top:3rem !important}.mt-xxl-auto{margin-top:auto !important}.me-xxl-0{margin-right:0 !important}.me-xxl-1{margin-right:.25rem !important}.me-xxl-2{margin-right:.5rem !important}.me-xxl-3{margin-right:1rem !important}.me-xxl-4{margin-right:1.5rem !important}.me-xxl-5{margin-right:3rem !important}.me-xxl-auto{margin-right:auto !important}.mb-xxl-0{margin-bottom:0 !important}.mb-xxl-1{margin-bottom:.25rem !important}.mb-xxl-2{margin-bottom:.5rem !important}.mb-xxl-3{margin-bottom:1rem !important}.mb-xxl-4{margin-bottom:1.5rem !important}.mb-xxl-5{margin-bottom:3rem !important}.mb-xxl-auto{margin-bottom:auto !important}.ms-xxl-0{margin-left:0 !important}.ms-xxl-1{margin-left:.25rem !important}.ms-xxl-2{margin-left:.5rem !important}.ms-xxl-3{margin-left:1rem !important}.ms-xxl-4{margin-left:1.5rem !important}.ms-xxl-5{margin-left:3rem !important}.ms-xxl-auto{margin-left:auto !important}.p-xxl-0{padding:0 !important}.p-xxl-1{padding:.25rem !important}.p-xxl-2{padding:.5rem !important}.p-xxl-3{padding:1rem !important}.p-xxl-4{padding:1.5rem !important}.p-xxl-5{padding:3rem !important}.px-xxl-0{padding-right:0 !important;padding-left:0 !important}.px-xxl-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-xxl-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-xxl-3{padding-right:1rem !important;padding-left:1rem !important}.px-xxl-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-xxl-5{padding-right:3rem !important;padding-left:3rem !important}.py-xxl-0{padding-top:0 !important;padding-bottom:0 !important}.py-xxl-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-xxl-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-xxl-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-xxl-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-xxl-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-xxl-0{padding-top:0 !important}.pt-xxl-1{padding-top:.25rem !important}.pt-xxl-2{padding-top:.5rem !important}.pt-xxl-3{padding-top:1rem !important}.pt-xxl-4{padding-top:1.5rem !important}.pt-xxl-5{padding-top:3rem !important}.pe-xxl-0{padding-right:0 !important}.pe-xxl-1{padding-right:.25rem !important}.pe-xxl-2{padding-right:.5rem !important}.pe-xxl-3{padding-right:1rem !important}.pe-xxl-4{padding-right:1.5rem !important}.pe-xxl-5{padding-right:3rem !important}.pb-xxl-0{padding-bottom:0 !important}.pb-xxl-1{padding-bottom:.25rem !important}.pb-xxl-2{padding-bottom:.5rem !important}.pb-xxl-3{padding-bottom:1rem !important}.pb-xxl-4{padding-bottom:1.5rem !important}.pb-xxl-5{padding-bottom:3rem !important}.ps-xxl-0{padding-left:0 !important}.ps-xxl-1{padding-left:.25rem !important}.ps-xxl-2{padding-left:.5rem !important}.ps-xxl-3{padding-left:1rem !important}.ps-xxl-4{padding-left:1.5rem !important}.ps-xxl-5{padding-left:3rem !important}.text-xxl-start{text-align:left !important}.text-xxl-end{text-align:right !important}.text-xxl-center{text-align:center !important}}@media (min-width: 1200px){.fs-1{font-size:2.5rem !important}.fs-2{font-size:2rem !important}.fs-3{font-size:1.75rem !important}.fs-4{font-size:1.5rem !important}}@media print{.d-print-inline{display:inline !important}.d-print-inline-block{display:inline-block !important}.d-print-block{display:block !important}.d-print-grid{display:grid !important}.d-print-table{display:table !important}.d-print-table-row{display:table-row !important}.d-print-table-cell{display:table-cell !important}.d-print-flex{display:flex !important}.d-print-inline-flex{display:inline-flex !important}.d-print-none{display:none !important}}html,body,div,span,applet,object,iframe,h1,.h1,h2,.h2,h3,.h3,h4,.h4,h5,.h5,h6,.h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,font,ins,kbd,q,s,samp,small,.small,strike,strong,sub,sup,tt,var,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,button,table,caption,tbody,tfoot,thead,tr,th,td{border:0;font-family:inherit;font-size:100%;font-style:inherit;margin:0;padding:0;vertical-align:baseline}body{line-height:1;-webkit-font-smoothing:antialiased}ol,ul{list-style:none}table{border-collapse:separate;border-spacing:0}caption,th,td{font-weight:normal;text-align:left}blockquote:before,blockquote:after,q:before,q:after{content:""}blockquote,q{quotes:"" ""}a img{border:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,main,summary{display:block}button{cursor:pointer}img{max-width:100%;height:auto}.spell-out{speak-as:spell-out}*{box-sizing:border-box}:root{--uwSiteTitleFont: Red Hat Display,sans-serif;--uwSiteTaglineFont: Red Hat Display,sans-serif;--uwTextFont: Red Hat Text,sans-serif;--uwCopyFont: Red Hat Text,sans-serif;--uwDisplayFont: Red Hat Display,sans-serif;--uwButtonFont: Red Hat Text,sans-serif;--uwCaptionFont: Red Hat Text,sans-serif;--uwBlockquoteFont: Red Hat Display,sans-serif}/*! Copyright 2021 The Red Hat Project Authors (https://github.com/RedHatOfficial/RedHatFont) +This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is available with a FAQ at:https://scripts.sil.org/OFL */@font-face{font-family:'Red Hat Display';font-style:italic;font-weight:300 900;font-display:swap;src:url("/assets/fonts/redhat-display-italic-latin.v14.woff2") format("woff2");unicode-range:U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD}@font-face{font-family:'Red Hat Display';font-style:italic;font-weight:300 900;font-display:swap;src:url("/assets/fonts/redhat-display-italic-latin-ext.v14.woff2") format("woff2") format("woff2");unicode-range:U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF}@font-face{font-family:'Red Hat Display';font-style:normal;font-weight:300 900;font-display:swap;src:url("/assets/fonts/redhat-display-latin.v14.woff2") format("woff2");unicode-range:U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD}@font-face{font-family:'Red Hat Display';font-style:normal;font-weight:300 900;font-display:swap;src:url("/assets/fonts/redhat-display-latin-ext.v14.woff2") format("woff2") format("woff2");unicode-range:U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF}@font-face{font-family:'Red Hat Text';font-style:italic;font-weight:300 625;font-display:swap;src:url("/assets/fonts/redhat-text-italic-latin.v13.woff2") format("woff2");unicode-range:U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD}@font-face{font-family:'Red Hat Text';font-style:italic;font-weight:300 625;font-display:swap;src:url("/assets/fonts/redhat-text-italic-latin-ext.v13.woff2") format("woff2");unicode-range:U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF}@font-face{font-family:'Red Hat Text';font-style:normal;font-weight:300 625;font-display:swap;src:url("/assets/fonts/redhat-text-latin.v13.woff2") format("woff2");unicode-range:U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD}@font-face{font-family:'Red Hat Text';font-style:normal;font-weight:300 625;font-display:swap;src:url("/assets/fonts/redhat-text-latin-ext.v13.woff2") format("woff2");unicode-range:U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF}body{color:#333;font-family:var(--uwTextFont);font-weight:400;line-height:1.625;position:relative;background-color:#fff}body.uw-white-bg{background-color:#fff}body.uw-light-gray-bg{background-color:#f7f7f7}abbr[title],acronym[title]{border-bottom:none;text-decoration:none;color:currentColor}p,li,dd,dt{font-size:1.125rem;margin-bottom:1.5rem}main p,main li,main dd,main dt{-webkit-font-smoothing:auto}.uw-body p,.uw-body li,.uw-body dd,.uw-body dt{font-family:var(--uwCopyFont)}.uw-body p a,.uw-body li a,.uw-body dd a,.uw-body dt a{text-decoration:underline}a{color:#0479a8;text-decoration:none}.uw-no-case-transform{text-transform:none}h1,.h1,.uw-h1{font-weight:630;font-size:1.5rem;font-family:var(--uwDisplayFont);line-height:1.3}h2,.h2,.uw-h2{font-size:1.3rem;font-weight:675;font-family:var(--uwDisplayFont);margin-top:32px;margin-top:2rem;margin-bottom:16px;margin-bottom:1rem;line-height:1.4}h3,.h3,.uw-h3{font-size:1.125rem;font-family:var(--uwDisplayFont);font-weight:675;margin-top:28px;margin-top:1.8rem;margin-bottom:14px;margin-bottom:.9rem;line-height:1.4}h4,.h4,.uw-h4,h5,.h5,h6,.h6{font-weight:675;font-size:1rem;font-family:var(--uwDisplayFont);margin-top:28px;margin-top:1.8rem;line-height:1.4}h5,.h5{font-size:.925rem}h6,.h6{font-size:.85rem}@media screen and (min-width: 40em){h1,.h1,.uw-h1{font-size:2.25rem}h2,.h2,.uw-h2{font-size:1.75rem}h3,.h3,.uw-h3{font-size:1.375rem}h4,.h4,.uw-h4{font-size:1.125rem}h5,.h5{font-size:1rem}h6,.h6{font-size:0.875rem}}h2+ul,.h2+ul{margin-top:16px;margin-top:1rem}ul,ol{margin-left:1.2rem}ul li ul,ul li ol,ol li ul,ol li ol{margin-top:1.5rem;margin-left:2.4rem}ul{list-style:disc}ol{list-style:decimal}ol ol{list-style:lower-alpha}ol ol ol{list-style:lower-roman}ol ol ol ol{list-style:decimal}.uw-list-tight{margin-top:-1rem}.uw-list-tight li{margin-bottom:0}.uw-list-no_bullets{margin-left:0;list-style:none}.uw-list-multi_column{display:flex;flex-wrap:wrap}.uw-list-multi_column li{flex-basis:100%;max-width:100%}@media screen and (min-width: 37.5em){.uw-list-multi_column li{flex-basis:50%;max-width:50%;padding-right:3rem}}@media screen and (min-width: 56.25em){.uw-list-multi_column li{flex-basis:33.333333%;max-width:33.333333%}}@media screen and (min-width: 75em){.uw-list-multi_column li{flex-basis:25%;max-width:25%}}.uw-list-inline{display:inline;list-style:none;margin-left:0}.uw-list-inline li{display:inline}.uw-list-inline li:before{content:"\00b7";padding:0 0.5rem}.uw-list-inline li:first-child:before{content:"";padding:0}dl{margin:0}dl dd,dl dt{font-size:1.0625rem}dl dt{font-weight:625}dl dd{margin-bottom:1.5rem}cite,em,i{font-style:italic}b,strong{font-weight:625}td,th,caption{font-size:1.125rem}blockquote{font-family:var(--uwBlockquoteFont);-webkit-font-smoothing:auto;font-style:italic;font-size:1.25rem;font-weight:450;letter-spacing:0.02rem;padding-left:1.5rem;margin-bottom:2rem;border-left:0.25rem solid #c5050c}blockquote cite,blockquote+cite{font-style:normal;font-family:var(--uwTextFont);font-size:1rem;font-weight:400;margin:1rem 0 0 1.5rem;display:block}blockquote cite:before,blockquote+cite:before{content:"\2014"}blockquote cite{margin-left:0}blockquote p{font-size:1.25rem}blockquote.uw-stylized-quote{font-family:var(--uwBlockquoteFont);margin-bottom:0;border-left:none;padding-left:0;text-align:center}blockquote.uw-stylized-quote p{font-family:var(--uwBlockquoteFont);font-size:1.25rem}blockquote.uw-stylized-quote p:last-of-type{margin-bottom:.5rem}blockquote.uw-stylized-quote cite{text-align:center}blockquote.uw-mini-bar-center{margin-top:2rem}blockquote.uw-mini-bar-center:before{margin-top:-1rem;height:6px;width:3rem}@media screen and (min-width: 0em) and (max-width: 39.9375em){blockquote.uw-mini-bar-center{margin-top:2rem}}blockquote em,blockquote i,blockquote cite{font-style:normal}address{display:block;margin:0 0 1.625em}pre{background:#f4f4f4;font:1rem "Courier 10 Pitch", Courier, monospace;line-height:1.5;margin-bottom:1.625em;overflow:auto;padding:0.75em 1.625em}code,kbd,samp,var{font:1rem Monaco, Consolas, "Andale Mono", "DejaVu Sans Mono", monospace}ins{background:#fff9c0;text-decoration:none}sup,sub{font-size:.9rem;height:0;line-height:1;position:relative;vertical-align:baseline}sup{bottom:1ex}sub{top:.5ex}q:before{content:"\201C"}q:after{content:"\201D"}.uw-double-size-text{font-size:1.25rem;line-height:1.5}.uw-small-text{font-size:.9rem}.uw-smaller-text{font-size:.8rem}@media screen and (min-width: 40em){.uw-double-size-text{font-size:1.375rem;line-height:1.625}}.uw-more-link{text-transform:uppercase;font-size:0.875rem;font-weight:650}.uw-more-link.uw-more-link-black{color:#282728}.uw-more-link svg{width:.75rem;height:.75rem;vertical-align:-0.05rem;margin-left:-.1rem;fill:#c5050c}.uw-gray-med-bg{background-color:#8e8e92}.uw-greyblue-bg{background-color:#dadfe1}.uw-white-bg{background-color:white}.uw-light-grer-bg{background-color:#f7f7f7}.uw-red-bg{background-color:#c5050c}.uw-red-bg,.uw-gray-med-bg{color:white}.uw-red-bg p,.uw-red-bg li,.uw-red-bg blockquote,.uw-red-bg a,.uw-red-bg a.uw-more-link,.uw-red-bg dd,.uw-red-bg dt,.uw-red-bg abbr,.uw-red-bg acronym,.uw-red-bg cite,.uw-red-bg strong,.uw-gray-med-bg p,.uw-gray-med-bg li,.uw-gray-med-bg blockquote,.uw-gray-med-bg a,.uw-gray-med-bg a.uw-more-link,.uw-gray-med-bg dd,.uw-gray-med-bg dt,.uw-gray-med-bg abbr,.uw-gray-med-bg acronym,.uw-gray-med-bg cite,.uw-gray-med-bg strong{color:white !important}.uw-red-bg svg,.uw-gray-med-bg svg{fill:white}.uw-red-bg blockquote,.uw-gray-med-bg blockquote{font-weight:635}.uw-red-bg a.uw-more-link,.uw-gray-med-bg a.uw-more-link{color:white}.uw-nowrap{white-space:nowrap}.uw-text-center{text-align:center}.uw-text-left{text-align:left}.uw-text-right{text-align:right}@media screen and (min-width: 40em){.uw-pad-xs{padding:.25rem}.uw-pad-t-xs{padding-top:.25rem}.uw-pad-b-xs{padding-bottom:.25rem}.uw-pad-l-xs{padding-left:.25rem}.uw-pad-r-xs{padding-right:.25rem}.uw-pad-tb-xs{padding-top:.25rem;padding-bottom:.25rem}.uw-mg-xs{margin:.25rem}.uw-mg-t-xs{margin-top:.25rem}.uw-mg-b-xs{margin-bottom:.25rem}.uw-mg-l-xs{margin-left:.25rem}.uw-mg-r-xs{margin-right:.25rem}.uw-mg-tb-xs{margin-top:.25rem;margin-bottom:.25rem}}@media screen and (min-width: 40em){.uw-pad-s{padding:.5rem}.uw-pad-t-s{padding-top:.5rem}.uw-pad-b-s{padding-bottom:.5rem}.uw-pad-l-s{padding-left:.5rem}.uw-pad-r-s{padding-right:.5rem}.uw-pad-tb-s{padding-top:.5rem;padding-bottom:.5rem}.uw-mg-s{margin:.5rem}.uw-mg-t-s{margin-top:.5rem}.uw-mg-b-s{margin-bottom:.5rem}.uw-mg-l-s{margin-left:.5rem}.uw-mg-r-s{margin-right:.5rem}.uw-mg-tb-s{margin-top:.5rem;margin-bottom:.5rem}}@media screen and (min-width: 40em){.uw-pad-m{padding:1rem}.uw-pad-t-m{padding-top:1rem}.uw-pad-b-m{padding-bottom:1rem}.uw-pad-l-m{padding-left:1rem}.uw-pad-r-m{padding-right:1rem}.uw-pad-tb-m{padding-top:1rem;padding-bottom:1rem}.uw-mg-m{margin:1rem}.uw-mg-t-m{margin-top:1rem}.uw-mg-b-m{margin-bottom:1rem}.uw-mg-l-m{margin-left:1rem}.uw-mg-r-m{margin-right:1rem}.uw-mg-tb-m{margin-top:1rem;margin-bottom:1rem}}.uw-pad-l{padding:1rem}.uw-pad-t-l{padding-top:1rem}.uw-pad-b-l{padding-bottom:1rem}.uw-pad-l-l{padding-left:1rem}.uw-pad-r-l{padding-right:1rem}.uw-pad-tb-l{padding-top:1rem;padding-bottom:1rem}.uw-mg-l{margin:1rem}.uw-mg-t-l{margin-top:1rem}.uw-mg-b-l{margin-bottom:1rem}.uw-mg-l-l{margin-left:1rem}.uw-mg-r-l{margin-right:1rem}.uw-mg-tb-l{margin-top:1rem;margin-bottom:1rem}@media screen and (min-width: 40em){.uw-pad-l{padding:2rem}.uw-pad-t-l{padding-top:2rem}.uw-pad-b-l{padding-bottom:2rem}.uw-pad-l-l{padding-left:2rem}.uw-pad-r-l{padding-right:2rem}.uw-pad-tb-l{padding-top:2rem;padding-bottom:2rem}.uw-mg-l{margin:2rem}.uw-mg-t-l{margin-top:2rem}.uw-mg-b-l{margin-bottom:2rem}.uw-mg-l-l{margin-left:2rem}.uw-mg-r-l{margin-right:2rem}.uw-mg-tb-l{margin-top:2rem;margin-bottom:2rem}}.uw-pad-xl{padding:2rem}.uw-pad-t-xl{padding-top:2rem}.uw-pad-b-xl{padding-bottom:2rem}.uw-pad-l-xl{padding-left:2rem}.uw-pad-r-xl{padding-right:2rem}.uw-pad-tb-xl{padding-top:2rem;padding-bottom:2rem}.uw-mg-xl{margin:2rem}.uw-mg-t-xl{margin-top:2rem}.uw-mg-b-xl{margin-bottom:2rem}.uw-mg-l-xl{margin-left:2rem}.uw-mg-r-xl{margin-right:2rem}.uw-mg-tb-xl{margin-top:2rem;margin-bottom:2rem}@media screen and (min-width: 40em){.uw-pad-xl{padding:4rem}.uw-pad-t-xl{padding-top:4rem}.uw-pad-b-xl{padding-bottom:4rem}.uw-pad-l-xl{padding-left:4rem}.uw-pad-r-xl{padding-right:4rem}.uw-pad-tb-xl{padding-top:4rem;padding-bottom:4rem}.uw-mg-xl{margin:4rem}.uw-mg-t-xl{margin-top:4rem}.uw-mg-b-xl{margin-bottom:4rem}.uw-mg-l-xl{margin-left:4rem}.uw-mg-r-xl{margin-right:4rem}.uw-mg-tb-xl{margin-top:4rem;margin-bottom:4rem}}.uw-pad-xxl{padding:4rem}.uw-pad-t-xxl{padding-top:4rem}.uw-pad-b-xxl{padding-bottom:4rem}.uw-pad-l-xxl{padding-left:4rem}.uw-pad-r-xxl{padding-right:4rem}.uw-pad-tb-xxl{padding-top:4rem;padding-bottom:4rem}.uw-mg-xxl{margin:4rem}.uw-mg-t-xxl{margin-top:4rem}.uw-mg-b-xxl{margin-bottom:4rem}.uw-mg-l-xxl{margin-left:4rem}.uw-mg-r-xxl{margin-right:4rem}.uw-mg-tb-xxl{margin-top:4rem;margin-bottom:4rem}@media screen and (min-width: 40em){.uw-pad-xxl{padding:8rem}.uw-pad-t-xxl{padding-top:8rem}.uw-pad-b-xxl{padding-bottom:8rem}.uw-pad-l-xxl{padding-left:8rem}.uw-pad-r-xxl{padding-right:8rem}.uw-pad-tb-xxl{padding-top:8rem;padding-bottom:8rem}.uw-mg-xxl{margin:8rem}.uw-mg-t-xxl{margin-top:8rem}.uw-mg-b-xxl{margin-bottom:8rem}.uw-mg-l-xxl{margin-left:8rem}.uw-mg-r-xxl{margin-right:8rem}.uw-mg-tb-xxl{margin-top:8rem;margin-bottom:8rem}}@media screen and (max-width: 1320px){.uw-pad-tb-none-sm{padding-top:0;padding-bottom:0}.uw-pad-t-none-sm{padding-top:0}.uw-pad-b-none-sm{padding-bottom:0}}.uw-show-for-sr-only,.uw-sr-only{position:absolute !important;width:1px;height:1px;overflow:hidden;clip:rect(0, 0, 0, 0)}.uw-show-on-focus{position:absolute;width:1px;height:1px;overflow:hidden;clip:rect(0, 0, 0, 0)}.uw-show-on-focus:focus{position:static;height:auto;width:auto;overflow:visible;clip:auto}#uw-skip-link{position:absolute !important;z-index:40;color:#0479a8;padding:.3rem;background-color:#fff}.uw-row{max-width:1320px;margin-left:auto;margin-right:auto;display:flex;flex-wrap:wrap;position:relative}.uw-full-row-has-bg-img{background-repeat:no-repeat;background-size:cover}.uw-row-page-title{padding-bottom:2rem}.uw-col{flex-basis:100%;max-width:100%;padding:0 1rem}@media screen and (min-width: 40em){.uw-col{flex-basis:100%;max-width:100%}}.uw-flex-reverse{flex-direction:row-reverse}.uw-clearfix::before,.uw-clearfix::after{content:' ';display:table}.uw-clearfix::after{clear:both}.uw-hero{line-height:.5}.uw-hero img{width:100%}@media screen and (min-width: 75em){.uw-hero.uw-hero-constrained-height img{object-fit:cover;object-position:100% 100%;max-height:400px}}.uw-float-right{float:right}.uw-float-left{float:left}figure{margin:0 auto 1rem}figure.uw-float-right,figure.uw-float-left{float:none}@media screen and (min-width: 31.25em){figure{margin:0}figure.uw-float-right{float:right;margin:0.5rem 0 2rem 2rem}figure.uw-float-left{float:left;margin:0.5rem 2rem 2rem 0}figure.uw-float-25{width:25%}figure.uw-float-33{width:33.3333%}figure.uw-float-50{width:50%}}figure img{border:1px solid #cfcfcf}figure img.uw-no-border{border:none}figure figcaption{font-size:1rem;font-family:var(--uwCaptionFont);line-height:1.4}.uw-credit{text-transform:uppercase;font-family:var(--uwCaptionFont);font-size:0.725rem}.uw-content-box{background-color:#fff;border-bottom:4px solid #c5050c;padding:1.5rem 2rem 1.5rem 2rem;background-color:#f7f7f7}@media screen and (min-width: 40em){.uw-content-box{border-right:1px solid #cfcfcf}}.uw-content-box.uw-content-box-bleed{padding:0 0 1.5rem 0}.uw-content-box.uw-content-box-bleed>*{padding-left:2rem;padding-right:2rem}.uw-content-box.uw-content-box-bleed>.bleed{padding-left:0;padding-right:0}.uw-content-box+.uw-content-box{margin-top:3rem}body.uw-light-gray-bg .uw-content-box{background-color:#fff}.uw-mini-bar{position:relative;margin-top:2.2rem}.uw-mini-bar:before{position:absolute;left:0;height:4px;content:'';width:2rem;top:-12px;background-color:#c5050c}h1.uw-mini-bar:before,.uw-mini-bar.h1:before{height:5px;width:3rem}.uw-mini-bar-center{position:relative;margin-top:2.2rem}.uw-mini-bar-center:before{position:absolute;left:0;height:4px;content:'';width:2rem;top:-12px;background-color:#c5050c;right:0;margin:0 auto}.uw-mini-bar-white{position:relative;margin-top:2.2rem}.uw-mini-bar-white:before{position:absolute;left:0;height:4px;content:'';width:2rem;top:-12px;background-color:#fff}.uw-mini-bar-white-center{position:relative;margin-top:2.2rem}.uw-mini-bar-white-center:before{position:absolute;left:0;height:4px;content:'';width:2rem;top:-12px;background-color:#fff;right:0;margin:0 auto}.uw-global-bar{background-color:#c5050c;color:white;padding:0 1rem;display:flex;justify-content:space-between;text-transform:uppercase;font-size:.825rem;font-weight:620;font-family:var(--uwDisplayFont)}.uw-global-bar.uw-global-bar-inverse{background-color:white;border-bottom:1px solid #cfcfcf;position:relative;z-index:10}.uw-global-bar.uw-global-bar-inverse a,.uw-global-bar.uw-global-bar-inverse a:visited,.uw-global-bar.uw-global-bar-inverse a:active,.uw-global-bar.uw-global-bar-inverse a:hover{color:#282728}.uw-global-name-link{color:white;line-height:2.2rem}.uw-global-name-link:hover,.uw-global-name-link:visited,.uw-global-name-link:active{text-decoration:none}.uw-global-name-link span{display:none}.uw-global-name-link .uw-of{text-transform:none}@media screen and (min-width: 17.5em){.uw-global-name-link span{display:inline}}.uw-header{display:flex;justify-content:center;background-color:white}.uw-header.uw-has-search .uw-header-crest-title{width:auto;width:100%}@media screen and (min-width: 31.25em){.uw-header.uw-has-search .uw-header-crest-title{width:70%;padding-right:5%}}@media screen and (min-width: 40em){.uw-header.uw-has-search .uw-header-crest-title{width:75%;padding-right:5%}}.uw-header.uw-has-search .uw-header-search{padding-right:1rem}@media screen and (min-width: 31.25em){.uw-header.uw-has-search .uw-header-search{width:30%}}@media screen and (min-width: 40em){.uw-header.uw-has-search .uw-header-search{width:25%}}.uw-header-container{padding:1rem 0;flex:0 0 100%;max-width:100%;display:flex;justify-content:space-between;position:relative}@media screen and (min-width: 40em){.uw-header-container{max-width:1320px}}.uw-header-crest-title{display:flex;justify-content:space-between;align-items:center;width:100%;padding-left:1rem;padding-right:1rem}.uw-header-crest{flex-basis:auto;margin-right:.5rem}.uw-crest-svg{width:10rem}.uw-title-tagline{flex-basis:100%}.uw-site-title{font-weight:800;font-family:var(--uwSiteTitleFont);font-size:1.25rem;line-height:1.1;margin-bottom:.1rem}.uw-site-title a{color:#c5050c}.uw-site-title a:hover{text-decoration:none}.uw-site-tagline{font-family:var(--uwSiteTaglineFont);font-size:.95rem;font-weight:600;color:#333;text-transform:none;margin:.2rem 0 0 .1rem;line-height:1.2}.uw-search-form{display:flex}.uw-search-input[type="text"]{flex:1 1 80%;max-width:80%}.uw-search-submit{width:2.5rem;height:2.5rem;color:white;background-color:#c5050c;flex:0 0 auto}.uw-search-submit svg{width:1.2rem;height:1.2rem;fill:white}.uw-header-search .uw-search-form{margin-top:0.5rem}li.uw-search-list-item form{margin-top:0;margin-bottom:0;padding:0.75rem 1rem;border-bottom:1px solid #cfcfcf;text-align:center;justify-content:center}li.uw-search-list-item .uw-search-input[type="text"]{float:none;width:100%;max-width:400px;transition:none}@media screen and (min-width: 27.5em){.uw-site-title{font-size:1.5rem}}@media screen and (min-width: 40em){.uw-header-crest{margin-right:.8rem}.uw-header-crest img{width:10rem}.uw-site-title{font-size:1.7rem}.uw-site-tagline{margin:-.1rem 0 0 .1rem}}@media screen and (min-width: 64em){.uw-header-crest img{width:10rem}.uw-site-title{font-size:1.8rem}.uw-header-search .uw-search-form{margin-top:1rem}}@media screen and (min-width: 75em){.uw-header-crest img{width:10rem}.uw-site-title{font-size:2rem}}.uw-mobile-menu-button-bar{display:none;width:100%;background-color:#c5050c;border:0;padding:1.1rem 1rem 1.1rem 1rem;color:#fff;font-weight:600;font-size:1rem;line-height:1rem}.uw-mobile-menu-button-bar.uw-is-visible{display:block}.uw-mobile-menu-button-bar:focus{outline:none}.uw-mobile-menu-button-bar svg{width:1.2rem;height:1.2rem;vertical-align:middle;margin-top:-.3rem;margin-left:.7rem;fill:#fff}.uw-mobile-menu-button-bar svg:last-child{display:inline}.uw-mobile-menu-button-bar[aria-expanded="true"] svg{display:none}.uw-mobile-menu-button-bar[aria-expanded="true"] svg:last-child{display:inline}.uw-mobile-menu-button-bar[aria-expanded="false"] svg{display:inline}.uw-mobile-menu-button-bar[aria-expanded="false"] svg:last-child{display:none}.uw-mobile-menu-button-bar.uw-mobile-menu-button-bar-reversed{background-color:#fff;color:#282728;border-top:1px solid #cfcfcf;border-bottom:1px solid #cfcfcf}.uw-mobile-menu-button-bar.uw-mobile-menu-button-bar-reversed svg{fill:#282728}.no-js #uw-top-menus.uw-is-visible.uw-hidden{display:block;visibility:visible}#uw-top-menus.uw-display-none{display:none}#uw-top-menus.uw-is-visible{display:block}#uw-top-menus.uw-is-visible.uw-hidden{visibility:hidden}#uw-top-menus .uw-nav-menu a{display:inline-block;font-family:var(--uwDisplayFont);font-size:1rem;font-weight:600;line-height:1.25;color:white;border-bottom:0.2rem solid rgba(0,0,0,0)}#uw-top-menus .uw-nav-menu a:hover{text-decoration:none;border-bottom-color:white}#uw-top-menus .uw-nav-menu a:active,#uw-top-menus .uw-nav-menu a:focus{text-decoration:none}#uw-top-menus .uw-nav-menu li{list-style:none}#uw-top-menus .uw-nav-menu li.uw-search-form{margin-top:16px;margin-top:1rem}#uw-top-menus .uw-nav-menu ul{padding:0 16px;padding:0 1rem;line-height:1.625}#uw-top-menus .uw-nav-menu ul ul{margin:0}#uw-top-menus .uw-nav-menu ul ul a{font-size:.9rem;font-weight:600;text-transform:none}#uw-top-menus .uw-nav-menu li{margin-bottom:0}#uw-top-menus .uw-nav-menu ul ul{display:none;transform:scaleY(0);transform-origin:top;opacity:0;transition:transformY .2s ease-in-out, opacity .2s ease-in-out;padding-bottom:.5rem}#uw-top-menus .uw-nav-menu ul li.uw-dropdown.uw-is-active>ul{display:block;opacity:1.0;transform:scaleY(1);animation:reveal .2s ease-in-out}#uw-top-menus .uw-nav-menu ul ul ul{display:none;left:100%;top:0}#uw-top-menus .uw-nav-menu svg.uw-caret{width:1rem;height:1rem;fill:white;vertical-align:-3px;display:inline;margin-right:-1rem}#uw-top-menus .uw-nav-menu svg.uw-caret:last-child{display:none}#uw-top-menus .uw-nav-menu .uw-is-active svg.uw-caret{display:none}#uw-top-menus .uw-nav-menu .uw-is-active svg.uw-caret:last-child{display:inline}#uw-top-menus .uw-nav-menu.uw-nav-menu-reverse svg.uw-caret,#uw-top-menus .uw-nav-menu.uw-nav-menu-secondary-reverse svg.uw-caret{fill:#c5050c}#uw-top-menus.uw-horizontal .uw-nav-menu{clear:both;margin:0 auto;width:100%;background-color:#c5050c}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse{background-color:white}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-secondary{position:absolute;top:0;right:0;z-index:20;margin-bottom:0;background-color:transparent;width:auto;display:inline-block}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-secondary>ul>li{float:right}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-secondary>ul>li>a{font-weight:620;font-size:.825rem;font-family:var(--uwDisplayFont);-webkit-font-smoothing:antialiased;line-height:2.2rem;text-transform:uppercase;border-bottom:none;padding-top:0;padding-bottom:0;margin-bottom:0;background-color:#c5050c}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-secondary>ul>li>a:hover{border-bottom:none}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-secondary.uw-nav-menu-secondary-reverse a{color:#282728;background-color:#fff}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-secondary.uw-nav-menu-secondary-reverse ul ul a:hover{border-bottom:0.2rem solid #c5050c}#uw-top-menus.uw-horizontal .uw-nav-menu ul{display:flex;align-items:center;margin:0 auto;max-width:1320px}#uw-top-menus.uw-horizontal .uw-nav-menu li{display:table-cell;position:relative;vertical-align:middle;padding:0 16px;padding:0 .95rem}#uw-top-menus.uw-horizontal .uw-nav-menu li li{display:block;padding:8px;padding:.5rem .5rem;line-height:1;text-align:left}#uw-top-menus.uw-horizontal .uw-nav-menu>ul>li{text-align:center}#uw-top-menus.uw-horizontal .uw-nav-menu>ul>li>a{padding:17px 0 1px;padding:1.05rem 0 0.1rem;margin-bottom:13px;margin-bottom:.8rem}#uw-top-menus.uw-horizontal .uw-nav-menu ul li.uw-dropdown>ul{background-color:#c5050c}#uw-top-menus.uw-horizontal .uw-nav-menu-reverse ul li.uw-dropdown>ul,#uw-top-menus.uw-horizontal .uw-nav-menu-reverse .uw-nav-menu-secondary-reverse ul li.uw-dropdown>ul,#uw-top-menus.uw-horizontal .uw-nav-menu-secondary-reverse ul li.uw-dropdown>ul,#uw-top-menus.uw-horizontal .uw-nav-menu-secondary-reverse .uw-nav-menu-secondary-reverse ul li.uw-dropdown>ul{background-color:white}#uw-top-menus.uw-horizontal .uw-nav-menu-reverse ul li.uw-dropdown>ul a,#uw-top-menus.uw-horizontal .uw-nav-menu-reverse .uw-nav-menu-secondary-reverse ul li.uw-dropdown>ul a,#uw-top-menus.uw-horizontal .uw-nav-menu-secondary-reverse ul li.uw-dropdown>ul a,#uw-top-menus.uw-horizontal .uw-nav-menu-secondary-reverse .uw-nav-menu-secondary-reverse ul li.uw-dropdown>ul a{color:#282728;background-color:transparent}#uw-top-menus.uw-horizontal .uw-nav-menu-reverse ul li.uw-dropdown>ul{box-shadow:0 2px 4px rgba(0,0,0,0.1)}#uw-top-menus.uw-horizontal .uw-nav-menu-reverse-flat ul li.uw-dropdown>ul,#uw-top-menus.uw-horizontal .uw-nav-menu-secondary-reverse ul li.uw-dropdown>ul{box-shadow:none;border-left:1px solid #cfcfcf;border-right:1px solid #cfcfcf;border-bottom:1px solid #cfcfcf}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse ul ul a{font-weight:650;font-size:0.9rem}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse ul ul .uw-dropdown>a{color:#c5050c}#uw-top-menus.uw-horizontal .uw-nav-menu>ul>li.uw-dropdown>a,#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse>ul>li.uw-dropdown>a{padding-right:1rem}#uw-top-menus.uw-horizontal .uw-nav-menu>ul>li.uw-dropdown.uw-is-active>ul{display:block}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse a{color:#282728;font-weight:650}#uw-top-menus.uw-horizontal .uw-nav-menu ul ul{position:absolute;left:0;width:188px;z-index:99999;display:none}#uw-top-menus.uw-horizontal .uw-nav-menu>ul>li:first-child{padding-left:0}#uw-top-menus.uw-horizontal .uw-nav-menu>ul>li.current-menu-item>a,#uw-top-menus.uw-horizontal .uw-nav-menu>ul>li.current-menu-parent>a{border-bottom-color:white}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse{border-top:1px solid #cfcfcf;border-bottom:1px solid #cfcfcf}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse>ul>li:first-child{padding-left:0}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse>ul>li>a{padding:.3rem 0 .1rem;margin-bottom:.8rem;margin-top:.8rem;border-bottom:.2rem solid transparent}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse>ul>li>a:hover{border-bottom:0.2rem solid #c5050c}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse>ul>li>a+ul>li>a:hover{border-bottom:0.2rem solid #c5050c}#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse>ul>li.current-menu-item>a,#uw-top-menus.uw-horizontal .uw-nav-menu.uw-nav-menu-reverse>ul>li.current-menu-parent>a{border-bottom:0.2rem solid #c5050c}#uw-top-menus.uw-horizontal .uw-nav-menu ul ul a{display:inline;margin-bottom:0;padding-bottom:0;height:auto;width:168px}#uw-top-menus.uw-stacked .uw-nav-menu{background-color:#f2f2f2}#uw-top-menus.uw-stacked .uw-nav-menu a{color:#282728}#uw-top-menus.uw-stacked .uw-nav-menu a:hover{border-bottom-color:#cfcfcf;background-color:#e8e8e8}#uw-top-menus.uw-stacked .uw-nav-menu ul{margin:0;padding-top:.5rem;padding-bottom:.5rem;padding:0;display:block}#uw-top-menus.uw-stacked .uw-nav-menu ul ul{position:static;padding:0;display:none}#uw-top-menus.uw-stacked .uw-nav-menu ul ul a{font-weight:400}#uw-top-menus.uw-stacked .uw-nav-menu ul ul ul{margin-top:0}#uw-top-menus.uw-stacked .uw-nav-menu>ul>li{display:block;text-align:left;padding:0}#uw-top-menus.uw-stacked .uw-nav-menu>ul>li li a{padding-left:2rem}#uw-top-menus.uw-stacked .uw-nav-menu>ul>li a{padding:1rem;margin-bottom:0;margin-top:0;display:block;border:none;border-bottom:1px solid #cfcfcf}#uw-top-menus.uw-stacked .uw-nav-menu.uw-nav-menu-secondary{border-top:2px solid #cfcfcf}#uw-top-menus.uw-stacked .uw-nav-menu.uw-nav-menu-secondary li a{text-transform:none;font-weight:400}#uw-top-menus.uw-stacked .uw-nav-menu svg.uw-caret{color:#c5050c;fill:#c5050c;width:1rem;height:1rem;vertical-align:-.2rem}#test-get-computed-style-width{width:100px;padding:10px;display:inline-block;position:absolute;bottom:0}.uw-footer{clear:both;background-color:#282728;border-top:5px solid #c5050c;color:#adadad}.uw-footer a{color:#adadad}.uw-footer a:hover{color:#f7f7f7}.uw-footer-content{padding-top:2rem;display:flex;flex-wrap:wrap;justify-content:space-around;max-width:1200px;margin:0 auto}.uw-footer-content>div{flex:1 0 100%;max-width:100%;padding:0 2rem;text-align:center}.uw-footer-content>div.uw-logo{padding-bottom:2rem;text-align:center}.uw-footer-content ul{margin-left:0;margin-bottom:2rem}.uw-footer-content p,.uw-footer-content li{font-size:.9rem;margin:0 0 .5rem;line-height:1.25;list-style:none}@media screen and (min-width: 40em){.uw-footer-content>div{flex:1 0 33.333%;max-width:33.333%;padding:0 2rem;text-align:center}.uw-footer-content>div.uw-logo:nth-last-child(2):nth-child(1){flex-basis:50%;max-width:50%;text-align:right}.uw-footer-content>div:nth-last-child(1):nth-child(2){flex-basis:50%;max-width:50%;text-align:left}.uw-footer-content>div.uw-logo:nth-last-child(3):nth-child(1){flex-basis:33.333333%;max-width:33.333333%;text-align:right}.uw-footer-content>div:nth-last-child(2):nth-child(2){flex-basis:33.333333%;max-width:33.333333%;text-align:left}.uw-footer-content>div:nth-last-child(1):nth-child(3){flex-basis:33.333333%;max-width:33.333333%;text-align:left}.uw-footer-content .uw-logo{flex:0 1 100%;max-width:100%;padding-bottom:2rem}.uw-footer-content p,.uw-footer-content li{font-size:1rem;margin-bottom:.75rem}}@media screen and (min-width: 60.75em){.uw-footer-content{padding-top:4rem}.uw-footer-content>div{flex:1 0 25%;max-width:25%;padding:0 2rem;text-align:left}.uw-footer-content .uw-logo{flex-basis:25%;max-width:25%}}.uw-footer-menu ul ul{margin:.75rem 0}.uw-logo{padding-bottom:1.625rem}.uw-logo a svg{width:200px;height:150px;min-width:175px;fill:#adadad;color:#adadad}.uw-logo a:hover svg{fill:#f7f7f7;color:#f7f7f7}.uw-footer-header{color:#adadad;font-family:"Red Hat Text",sans-serif;font-weight:625;font-size:.9rem;line-height:1.25;margin:0 0 1rem;text-transform:uppercase}@media screen and (min-width: 60.75em){.uw-footer-header{font-size:1.125rem}}.uw-footer-contact{text-align:center}.uw-contact-list{margin-left:0}.uw-contact-item{list-style:none;margin-bottom:.75rem;line-height:1.25}[class="uw-contact-item"]>a{text-decoration:underline}.uw-map-marker,.uw-footer-icon{width:1rem;height:1rem;vertical-align:-2px;fill:#adadad}.uw-map-marker:hover,.uw-footer-icon:hover{fill:#f7f7f7}.uw-copyright{padding:2rem 0.5em 1rem;text-align:center}.uw-copyright p{margin-bottom:.2rem;font-size:.925rem}.uw-copyright p a{text-decoration:underline}.uw-social-icons{margin-top:1rem;margin-left:0}.uw-social-icons .uw-social-icon{display:inline-block;margin:0 .5rem}.uw-social-icons .uw-social-icon:first-child{margin-left:0}.uw-social-icons a{display:inline-block;background-color:#adadad;padding:0.4rem;font-size:1.3rem;line-height:1.3rem;border-radius:1.05rem}.uw-social-icons a:hover{background-color:#f7f7f7}.uw-social-icons svg{display:inline-block;vertical-align:top;width:1.3rem;height:1.3rem;margin:0;padding:0;fill:#282728}.uw-button,.button,.button-cta{display:inline-block;text-align:center;font-family:var(--uwButtonFont);font-weight:620;font-size:1rem;line-height:1;padding:0.75rem 1.25rem;margin:0;background:#0479a8;color:#fff;box-shadow:0 1.5px 4px rgba(0,0,0,0.24),0 1.5px 6px rgba(0,0,0,0.12);position:relative;cursor:pointer;-webkit-appearance:none;transition:all 0.25s ease-out;vertical-align:middle;border:2px solid #0479a8;border-radius:2px;text-decoration:none !important}.uw-button:hover,.uw-button:focus,.button:hover,.button:focus,.button-cta:hover,.button-cta:focus{color:#0479a8;background-color:#ffffff;text-decoration:none;transition:all 0.25s ease-in-out;outline:none}.uw-button:hover:after,.uw-button:focus:after,.button:hover:after,.button:focus:after,.button-cta:hover:after,.button-cta:focus:after{opacity:1}.uw-button.uw-button-large{padding:0.625em 1.625rem;font-size:1.125rem}.uw-button.uw-button-expanded{display:block;width:100%}.uw-button.uw-button-reverse{background-color:white;border:2px solid #0479a8;color:#0479a8}.uw-button.uw-button-reverse:hover,.uw-button.uw-button-reverse:focus{background-color:#0479a8;border:2px solid #0479a8;color:#fff}.uw-button.uw-button-transparent{background-color:rgba(0,0,0,0.5);border:2px solid #ffffff}.uw-button.uw-button-transparent:hover,.uw-button.uw-button-transparent:focus{background-color:#ffffff;color:#333}.uw-button.uw-button-red{background-color:#c5050c;color:#fff;border:2px solid #c5050c}.uw-button.uw-button-red:hover,.uw-button.uw-button-red:focus{background-color:#fff;color:#c5050c}.uw-button.uw-button-red-reverse{background-color:#fff;color:#c5050c;border:2px solid #c5050c}.uw-button.uw-button-red-reverse:hover,.uw-button.uw-button-red-reverse:focus{background-color:#c5050c;border:2px solid #c5050c;color:#fff}.uw-button-cta,.button-cta{text-transform:uppercase}.button-cta.button-cta-reverse{background-color:#fff;color:#c5050c;border:2px solid #c5050c}.button-cta.button-cta-reverse:hover,.button-cta.button-cta-reverse:focus{background-color:#c5050c;border:2px solid #0479a8;color:#fff}.row-dark-background .button,.row-dark-background .uw-button,.row-dark-background .uw-button.uw-button-reverse,.row-dark-background .uw-button.uw-button-red-reverse,.row-dark-background .button-cta,.has_background-image .button,.has_background-image .uw-button,.has_background-image .uw-button.uw-button-reverse,.has_background-image .uw-button.uw-button-red-reverse,.has_background-image .button-cta,.carousel-content .button,.carousel-content .uw-button,.carousel-content .uw-button.uw-button-reverse,.carousel-content .uw-button.uw-button-red-reverse,.carousel-content .button-cta{border:2px solid #fff}.row-dark-background .uw-drop-shadow .button,.row-dark-background .uw-drop-shadow .uw-button,.row-dark-background .uw-drop-shadow .button-cta,.row-dark-background .uw-content-box .button,.row-dark-background .uw-content-box .uw-button,.row-dark-background .uw-content-box .button-cta,.row-dark-background .tabs-content .button,.row-dark-background .tabs-content .uw-button,.row-dark-background .tabs-content .button-cta,.row-dark-background .uw-accordion-panel-inner .button,.row-dark-background .uw-accordion-panel-inner .uw-button,.row-dark-background .uw-accordion-panel-inner .button-cta,.row-dark-background .faculty-member-content .button,.row-dark-background .faculty-member-content .uw-button,.row-dark-background .faculty-member-content .button-cta,.row-dark-background .alternating-content-box .button,.row-dark-background .alternating-content-box .uw-button,.row-dark-background .alternating-content-box .button-cta,.has_background-image .uw-drop-shadow .button,.has_background-image .uw-drop-shadow .uw-button,.has_background-image .uw-drop-shadow .button-cta,.has_background-image .uw-content-box .button,.has_background-image .uw-content-box .uw-button,.has_background-image .uw-content-box .button-cta,.has_background-image .tabs-content .button,.has_background-image .tabs-content .uw-button,.has_background-image .tabs-content .button-cta,.has_background-image .uw-accordion-panel-inner .button,.has_background-image .uw-accordion-panel-inner .uw-button,.has_background-image .uw-accordion-panel-inner .button-cta,.has_background-image .faculty-member-content .button,.has_background-image .faculty-member-content .uw-button,.has_background-image .faculty-member-content .button-cta,.has_background-image .alternating-content-box .button,.has_background-image .alternating-content-box .uw-button,.has_background-image .alternating-content-box .button-cta{border-color:#0479a8}.row-dark-background .uw-drop-shadow .uw-button.uw-button-red,.row-dark-background .uw-content-box .uw-button.uw-button-red,.row-dark-background .tabs-content .uw-button.uw-button-red,.row-dark-background .uw-accordion-panel-inner .uw-button.uw-button-red,.row-dark-background .faculty-member-content .uw-button.uw-button-red,.row-dark-background .alternating-content-box .uw-button.uw-button-red,.has_background-image .uw-drop-shadow .uw-button.uw-button-red,.has_background-image .uw-content-box .uw-button.uw-button-red,.has_background-image .tabs-content .uw-button.uw-button-red,.has_background-image .uw-accordion-panel-inner .uw-button.uw-button-red,.has_background-image .faculty-member-content .uw-button.uw-button-red,.has_background-image .alternating-content-box .uw-button.uw-button-red{border-color:#c5050c}.row-dark-background .uw-drop-shadow .uw-button.uw-button-reverse,.row-dark-background .uw-content-box .uw-button.uw-button-reverse,.row-dark-background .tabs-content .uw-button.uw-button-reverse,.row-dark-background .uw-accordion-panel-inner .uw-button.uw-button-reverse,.row-dark-background .faculty-member-content .uw-button.uw-button-reverse,.row-dark-background .alternating-content-box .uw-button.uw-button-reverse,.has_background-image .uw-drop-shadow .uw-button.uw-button-reverse,.has_background-image .uw-content-box .uw-button.uw-button-reverse,.has_background-image .tabs-content .uw-button.uw-button-reverse,.has_background-image .uw-accordion-panel-inner .uw-button.uw-button-reverse,.has_background-image .faculty-member-content .uw-button.uw-button-reverse,.has_background-image .alternating-content-box .uw-button.uw-button-reverse{border-color:#0479a8}.row-dark-background .uw-drop-shadow .uw-button.uw-button-red-reverse,.row-dark-background .uw-content-box .uw-button.uw-button-red-reverse,.row-dark-background .tabs-content .uw-button.uw-button-red-reverse,.row-dark-background .uw-accordion-panel-inner .uw-button.uw-button-red-reverse,.row-dark-background .faculty-member-content .uw-button.uw-button-red-reverse,.row-dark-background .alternating-content-box .uw-button.uw-button-red-reverse,.has_background-image .uw-drop-shadow .uw-button.uw-button-red-reverse,.has_background-image .uw-content-box .uw-button.uw-button-red-reverse,.has_background-image .tabs-content .uw-button.uw-button-red-reverse,.has_background-image .uw-accordion-panel-inner .uw-button.uw-button-red-reverse,.has_background-image .faculty-member-content .uw-button.uw-button-red-reverse,.has_background-image .alternating-content-box .uw-button.uw-button-red-reverse{border-color:#c5050c}.uw-pe-text_block li .uw-button,.uw-pe-text_block li .button,.uw-pe-text_block li .button-cta,.uw-pe-text_block p .uw-button,.uw-pe-text_block p .button,.uw-pe-text_block p .button-cta{-webkit-font-smoothing:antialiased}.gform_wrapper .gform_footer input.uw-button[type=submit]{font-size:0.925rem}.uw-pagination{background-color:#f7f7f7;padding:0 1rem}.uw-pagination-prev-next{display:flex;justify-content:space-between;align-items:center}.uw-pagination-prev-next>[class*="uw-pagination"]{display:inline-block}.uw-pagination-menu ul{background-color:transparent;padding-left:0;padding-right:0;display:flex;justify-content:center}.uw-pagination-menu ul>li{list-style:none}.uw-pagination-menu ul>li>a{background-color:#f7f7f7;display:inline-block;padding:0.75rem 1rem 1rem;border-right:1px solid #dedede;border-top:1px solid #dedede;border-bottom:1px solid #dedede;font-size:0.875rem;line-height:16px;height:2.5rem}.uw-pagination-menu ul>li>a.uw-page-previous,.uw-pagination-menu ul>li>a.uw-page-next{font-size:1rem}.uw-pagination-menu ul>li>a.uw-page-previous svg,.uw-pagination-menu ul>li>a.uw-page-next svg{width:0.65rem;height:auto;vertical-align:-2px}.uw-pagination-menu ul>li>a:first-child{border-left:1px solid #dedede}.uw-pagination-menu ul>li>a[aria-disabled=true]{pointer-events:none;color:#717171}.uw-pagination-menu ul>li>a:hover,.uw-pagination-menu ul>li>a:focus{background-color:#0479a8;color:#fff}.uw-breadcrumbs{max-width:1320px;margin-left:auto;margin-right:auto;display:flex;flex-flow:row wrap}.uw-breadcrumbs ul,.uw-breadcrumbs ol{padding-top:2rem;padding-left:1rem;margin-left:0;margin-bottom:1rem;list-style:none}.uw-breadcrumbs li{float:left;color:black;display:flex;align-items:center;font-size:.875rem;font-family:var(--uwDisplayFont);font-weight:500;line-height:2.1;margin-bottom:0;-webkit-font-smoothing:antialiased}.uw-breadcrumbs li a[aria-current=page]{color:#282728}.uw-breadcrumbs li:not(:last-child)::after{color:#646569;content:"/";margin:0 .5rem;position:relative}input,textarea,select,option,optgroup,legend,fieldset{font-size:1rem;color:#333;vertical-align:top;display:block;margin:0}datalist{font-size:1rem}label{display:block;font-weight:625;margin:0}fieldset label{font-weight:400}.uw-input-row{margin:0 0 1rem 0}input[type="text"],input[type="email"],input[type="password"],input[type="search"],input[type="color"],input[type="date"],input[type="datetime-local"],input[type="month"],input[type="number"],input[type="tel"],input[type="time"],input[type="url"],input[type="week"],input[list],input[type="file"],select,textarea{width:auto;max-width:100%;padding:.5rem;background-color:#fff;border-radius:0px;border:1px solid #c8c8c8}input[type="text"],input[type="email"],input[type="password"],input[type="search"],input[type="color"],input[type="date"],input[type="datetime-local"],input[type="month"],input[type="number"],input[type="tel"],input[type="time"],input[type="url"],input[type="week"],input[list]{height:2.5rem}textarea{overflow:auto}input[type="range"]{height:2.5rem;width:100%;max-width:100%}input[type="file"]{min-height:2.5rem}input[type="search"]{-webkit-appearance:none;height:2.5rem}input[type="checkbox"],input[type="radio"]{display:inline-block;vertical-align:-0.05rem;margin:0 .1rem}input::file-selector-button{display:inline-block;text-align:center;font-family:var(--uwButtonFont);font-weight:620;font-size:1rem;line-height:1;padding:0.75rem 1.25rem;margin:0;background:#0479a8;color:#fff;box-shadow:0 1.5px 4px rgba(0,0,0,0.24),0 1.5px 6px rgba(0,0,0,0.12);position:relative;cursor:pointer;-webkit-appearance:none;transition:all 0.25s ease-out;vertical-align:middle;border:2px solid #0479a8;border-radius:2px;text-decoration:none !important;margin-right:1rem;color:#0479a8;background-color:white;padding:0.5rem}input::file-selector-button:hover,input::file-selector-button:focus{color:#0479a8;background-color:#ffffff;text-decoration:none;transition:all 0.25s ease-in-out;outline:none}input::file-selector-button:hover:after,input::file-selector-button:focus:after{opacity:1}select{height:2.5rem}select[multiple]{height:auto;min-height:2.5rem;padding:0}select[multiple] option{margin:0;padding:.5rem}fieldset{padding:10px 25px;border-radius:0px;border:1px solid #c8c8c8;margin-bottom:1rem}legend{padding:0 5px;font-weight:625}input[type="button"],input[type="submit"],input[type="reset"],input[type="image"]{display:inline-block;text-align:center;font-family:var(--uwButtonFont);font-weight:620;font-size:1rem;line-height:1;padding:0.75rem 1.25rem;margin:0;background:#0479a8;color:#fff;box-shadow:0 1.5px 4px rgba(0,0,0,0.24),0 1.5px 6px rgba(0,0,0,0.12);position:relative;cursor:pointer;-webkit-appearance:none;transition:all 0.25s ease-out;vertical-align:middle;border:2px solid #0479a8;border-radius:2px;text-decoration:none !important;width:auto;max-width:inherit;background-color:#0479a8;cursor:pointer;color:#fff;font-weight:620;font-family:var(--uwTextFont);border-radius:0px;border:1px solid #c8c8c8;text-transform:uppercase;-webkit-font-smoothing:antialiased}input[type="button"]:hover,input[type="button"]:focus,input[type="submit"]:hover,input[type="submit"]:focus,input[type="reset"]:hover,input[type="reset"]:focus,input[type="image"]:hover,input[type="image"]:focus{color:#0479a8;background-color:#ffffff;text-decoration:none;transition:all 0.25s ease-in-out;outline:none}input[type="button"]:hover:after,input[type="button"]:focus:after,input[type="submit"]:hover:after,input[type="submit"]:focus:after,input[type="reset"]:hover:after,input[type="reset"]:focus:after,input[type="image"]:hover:after,input[type="image"]:focus:after{opacity:1}input[type="image"]{text-align:center;padding:.5rem}input[disabled],textarea[disabled],select[disabled],option[disabled]{cursor:not-allowed}input:focus,textarea:focus,select:focus,option:focus{background-color:inherit;border-color:#c8c8c8}input[type="checkbox"]:focus,input[type="radio"]:focus{outline:#c8c8c8 solid 2px}input[type="button"]:hover,input[type="submit"]:hover,input[type="reset"]:hover,input[type="button"]:focus,input[type="submit"]:focus,input[type="reset"]:focus{background-color:#03678f;color:#fff}table{width:100%;margin-bottom:2rem;border-collapse:separate;border-spacing:0;border:1px solid #dbdbdb}tfoot,thead{background:#f7f7f7;color:#333;border:1px solid #e4e4e4}caption{font-weight:650;text-align:center;margin-top:1rem;margin-bottom:0.5rem}tbody{border:1px solid #e4e4e4;background-color:#fff}tr{background-color:transparent}table tbody tr:nth-child(even){background-color:#f7f7f7}th,td{padding:.4rem;line-height:1.35}th p,th li,th dd,th dt,td p,td li,td dd,td dt{font-size:1rem;line-height:1.6}th{font-weight:625;font-size:1rem}td{font-size:1rem}.uw-side-nav{background-color:#fff;border-bottom:4px solid #c5050c;padding:1.5rem 2rem 1.5rem 2rem;background-color:#f7f7f7;padding:0;border-left:none;border-right:none !important}@media screen and (min-width: 40em){.uw-side-nav{border-right:1px solid #cfcfcf}}.uw-side-nav+div{margin-top:2rem}@media screen and (min-width: 40em){.uw-side-nav+div{margin-top:4rem}}.uw-side-nav ul{margin-left:0}.uw-side-nav li{position:relative;margin-bottom:0;list-style:none;border-bottom:1px solid #e4e4e4;transition:.25s border-left ease-in;font-size:1rem;font-weight:600;line-height:1.35}.uw-side-nav li a{display:block;padding:1rem;position:relative}.uw-side-nav li ul{margin-top:0;margin-left:2rem;margin-bottom:.5rem}.uw-side-nav li ul li{font-weight:400;border-bottom:1px solid transparent}.uw-side-nav li ul li a{padding:0.5rem 1rem;line-height:1}.uw-side-nav li ul li a.uw-current-menu-item,.uw-side-nav li ul li a:hover{color:#c5050c}.uw-side-nav li ul li:first-child>a{padding-top:0}.uw-side-nav>ul>li>a{border-left:.25rem solid transparent}.uw-side-nav>ul>li>a.uw-current-menu-item,.uw-side-nav>ul>li>a:hover{border-left:0.25rem solid #c5050c;color:#c5050c}body.uw-light-gray-bg .uw-side-nav{background-color:#fff;border:1px solid #e4e4e4}.uw-side-nav-ui{text-align:right}.uw-side-nav-button{display:none;background-color:transparent;border-bottom:0;padding:6px 6px 6px;position:absolute;left:1rem;top:-0.75rem;width:calc(100% - 2rem);text-align:left;font-size:.7rem;font-family:verdana;line-height:1}.uw-side-nav-button .uw-side-nav{display:none}.uw-side-nav-button svg{width:1rem;height:1rem;fill:#0479a8;vertical-align:-4px}.uw-side-nav-button{display:none}.uw-side-nav-button:focus{outline:none}.uw-side-nav-is-hidden .uw-side-nav{max-height:0;overflow:hidden;transition:0.25s max-height ease-in;border-bottom:none}.uw-side-nav-is-hidden .uw-side-nav-button{background-color:#f7f7f7;box-shadow:none;transition:.25s box-shadow ease-in;border:1px solid #aec2ca}.uw-side-nav-is-hidden .uw-side-nav-button svg{transform:rotate(0deg);transition:0.25s transform ease-in}.uw-side-nav-is-hidden .uw-side-nav-button:focus{border-color:#0479a8;box-shadow:0 0 4px rgba(0,0,154,0.5)}.uw-overlay{position:absolute;background-color:transparent;width:100%;height:100%;top:0;left:0;z-index:-1;transition:0.25s background-color ease-in}.uw-overlay.uw-is-active{position:absolute;z-index:1;background-color:rgba(0,0,0,0.3)}@media (max-width: 40rem){.uw-side-nav{margin-top:1.7rem;position:absolute;width:calc(100% - 2rem);top:2px;left:1rem;max-height:800px;transition:0.25s max-height ease-in;border-bottom:0.25rem solid #c5050c;z-index:20;box-shadow:0 0 4px rgba(0,0,0,0.3)}body.uw-light-gray-bg .uw-side-nav{border:none}.uw-side-nav-button{display:block;z-index:10;top:0.25rem;box-shadow:0 0 4px rgba(0,0,0,0.3);background-color:#f7f7f7;border:1px solid transparent}.uw-side-nav-button svg{transform:rotate(180deg);transition:0.25s transform ease-in}.uw-side-nav-button:focus{border-color:#0479a8;box-shadow:0 0 4px rgba(0,0,154,0.5)}.uw-sidebar{margin-bottom:0;order:2}.uw-body.uw-body{padding-top:3rem}}.uw-card{padding:.5rem;display:flex}.uw-card .uw-card-content{background-color:#fff;border-radius:5px}.uw-card .uw-card-content img{width:100%}.uw-card .uw-card-content h2,.uw-card .uw-card-content .h2{font-size:1.375rem;font-weight:650;margin-bottom:.225rem}.uw-card .uw-card-copy{padding:0 1rem 1rem}.uw-card .uw-card-copy p,.uw-card .uw-card-copy li{line-height:1.6}.uw-card .uw-card-copy p:last-child,.uw-card .uw-card-copy li:last-child{margin-bottom:0}.uw-body{flex-basis:100%;max-width:100%;margin-bottom:3rem}@media screen and (min-width: 40em){.uw-body{flex-basis:66.6667%;max-width:66.6667%}}.uw-body p,.uw-body li,.uw-body dd,.uw-body dt{font-family:var(--uwCopyFont)}.uw-body:first-child:last-child{flex-basis:100%;max-width:100%;padding-top:0}@media screen and (min-width: 40em){.uw-body:first-child:last-child{flex-basis:100%;max-width:100%}}.uw-body h2.uw-mini-bar,.uw-body .uw-mini-bar.h2{margin-top:3.2rem}.uw-sidebar{flex-basis:100%;max-width:100%;margin-bottom:3rem}@media screen and (min-width: 40em){.uw-sidebar{flex-basis:33.3333%;max-width:33.3333%}}.uw-sidebar-box{background-color:#fff;border-bottom:4px solid #c5050c;padding:1rem 1.25rem}@media screen and (min-width: 40em){.uw-sidebar-box{border-right:1px solid #cfcfcf}}.uw-sidebar-box>h3,.uw-sidebar-box>.h3{position:relative;margin-top:2.2rem}.uw-sidebar-box>h3:before,.uw-sidebar-box>.h3:before{position:absolute;left:0;height:4px;content:'';width:2rem;top:-12px;background-color:#c5050c}.uw-sidebar-box>p,.uw-sidebar-box li{font-size:1rem;margin-bottom:1.35rem;line-height:1.35}@media screen and (min-width: 40em){.uw-sidebar-box{border-top:1px solid #cfcfcf;border-left:1px solid #cfcfcf}}html{font-size:14px}@media (min-width: 576px){html{font-size:16px}}@media (min-width: 1200px){html{font-size:18px}}.search-bar{display:flex;flex-direction:column;margin:auto;color:#6c757d;padding:.5rem}@media (min-width: 768px){.search-bar{padding-left:0px;padding-right:0px}}.search-bar .result{box-shadow:0px 5px 5px #0000001c}.search-bar .results-dropdown{height:0px;z-index:100}#uw-sub-menus nav.uw-nav-menu{background-color:#f7f7f7 !important}#branding-sub{background-color:#f7f7f7 !important}.table-container{overflow-x:scroll;margin-bottom:2rem}.table-container table{margin-bottom:0px}.uw-card.promoted{flex-basis:100%;max-width:100%;padding:1rem;display:flex;flex-direction:row}@media screen and (min-width: 40em){.uw-card.promoted{flex-basis:50%;max-width:50%}}.uw-card.promoted .uw-card-content{background-color:#fff;flex-direction:row}.uw-card.promoted .uw-card-content img{width:100%}.uw-card.promoted .uw-card-copy{padding:0 1rem 1rem}.team-card .uw-card-content{padding:15px;display:flex;flex-direction:column}.team-card .uw-card-copy{display:flex;flex-direction:column;justify-content:space-between;flex:1}.team-card img{aspect-ratio:1/1.2;width:100%;object-fit:cover}.team-card .institution{color:grey}.news-post:hover{text-decoration:none}.news-post:hover>div{transition:background-color .5s, transform 1s;background-color:#dfdfdf;box-shadow:5px 0px 5px #ffffff}.news-post .img-container{height:180px}.news-post img{height:100%;width:100%;object-fit:cover}.mw-1000{max-width:1000px}#hero-image-ultrawide{height:50vw;max-height:350px;width:auto;max-width:none;margin-left:50%;transform:translateX(-50%)}.stat-card{padding:20px;background-color:#f7f7f7;margin-bottom:1.5rem}.circuit-background{background:url(/web-preview/preview-fall2024-info/images/circuit_board_light.svg) no-repeat center;-webkit-background-size:cover;-moz-background-size:cover;-o-background-size:cover;background-size:cover;box-shadow:inset 0px 0px 10px white}#guide-sidebar{top:1rem;position:sticky;line-height:1rem}#guide-sidebar .accordion-button{transition:padding .2s;transition-timing-function:ease-out}#guide-sidebar .accordion-button:focus{padding-left:0.4rem;padding-right:0.4rem}#guide-sidebar .accordion-button:not(.collapsed){padding-left:0.4rem;padding-right:0.4rem}#guide-sidebar .document-link-wrapper{border-bottom:1px solid rgba(0,0,0,0.125);padding:.4rem;background-color:#fffcfc}#guide-sidebar button{font-size:1rem}#guide-sidebar a{font-size:.8rem;color:black}#guide-sidebar a:hover{color:#c5050c}.primary-callout{background-color:#c5050c;padding:0.25rem 1.5rem;margin-bottom:1rem;border-radius:.25rem;color:white;box-shadow:0 0.5rem 1rem rgba(0,0,0,0.15)}.primary-callout a{color:#bcecff;text-decoration:underline}.primary-callout a:hover{color:#4ec0ed}.btn-guide{display:inline-block;width:97%;box-sizing:border-box;color:#212529;background-color:#f8f9fa;border-color:#e2e6ea;border-width:2px;text-align:center;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;transition:background-color 0.3s, border-color 0.3s}.side-divider{border-left:5px solid #B1050B;display:none}.btn-guide-highlighted{background-color:#F9E6E7;border-color:#B1050B}.img-btn-guide{padding-right:10px;padding-bottom:5px;max-width:100%;height:80px}.no-gutters{padding-right:0;padding-left:0;margin-bottom:5px}.news-bar{width:auto;height:200px;padding:0%} + +/*# sourceMappingURL=style-v10.css.map */ \ No newline at end of file diff --git a/preview-fall2024-info/assets/css/style-v10.css.map b/preview-fall2024-info/assets/css/style-v10.css.map new file mode 100644 index 000000000..02c177f86 --- /dev/null +++ b/preview-fall2024-info/assets/css/style-v10.css.map @@ -0,0 +1,214 @@ +{ + "version": 3, + "file": "style-v10.css", + "sources": [ + "style-v10.scss", + "_sass/bootstrap/bootstrap.scss", + "_sass/bootstrap/_functions.scss", + "_sass/bootstrap/_variables.scss", + "_sass/bootstrap/_mixins.scss", + "_sass/bootstrap/vendor/_rfs.scss", + "_sass/bootstrap/mixins/_deprecate.scss", + "_sass/bootstrap/mixins/_breakpoints.scss", + "_sass/bootstrap/mixins/_color-scheme.scss", + "_sass/bootstrap/mixins/_image.scss", + "_sass/bootstrap/mixins/_resize.scss", + "_sass/bootstrap/mixins/_visually-hidden.scss", + "_sass/bootstrap/mixins/_reset-text.scss", + "_sass/bootstrap/mixins/_text-truncate.scss", + "_sass/bootstrap/mixins/_utilities.scss", + "_sass/bootstrap/mixins/_alert.scss", + "_sass/bootstrap/mixins/_buttons.scss", + "_sass/bootstrap/mixins/_caret.scss", + "_sass/bootstrap/mixins/_pagination.scss", + "_sass/bootstrap/mixins/_lists.scss", + "_sass/bootstrap/mixins/_list-group.scss", + "_sass/bootstrap/mixins/_forms.scss", + "_sass/bootstrap/mixins/_table-variants.scss", + "_sass/bootstrap/mixins/_border-radius.scss", + "_sass/bootstrap/mixins/_box-shadow.scss", + "_sass/bootstrap/mixins/_gradients.scss", + "_sass/bootstrap/mixins/_transition.scss", + "_sass/bootstrap/mixins/_clearfix.scss", + "_sass/bootstrap/mixins/_container.scss", + "_sass/bootstrap/mixins/_grid.scss", + "_sass/bootstrap/_utilities.scss", + "_sass/bootstrap/_root.scss", + "_sass/bootstrap/_reboot.scss", + "_sass/bootstrap/_type.scss", + "_sass/bootstrap/_images.scss", + "_sass/bootstrap/_containers.scss", + "_sass/bootstrap/_grid.scss", + "_sass/bootstrap/_tables.scss", + "_sass/bootstrap/_forms.scss", + "_sass/bootstrap/forms/_labels.scss", + "_sass/bootstrap/forms/_form-text.scss", + "_sass/bootstrap/forms/_form-control.scss", + "_sass/bootstrap/forms/_form-select.scss", + "_sass/bootstrap/forms/_form-check.scss", + "_sass/bootstrap/forms/_form-range.scss", + "_sass/bootstrap/forms/_floating-labels.scss", + "_sass/bootstrap/forms/_input-group.scss", + "_sass/bootstrap/forms/_validation.scss", + "_sass/bootstrap/_buttons.scss", + "_sass/bootstrap/_transitions.scss", + "_sass/bootstrap/_dropdown.scss", + "_sass/bootstrap/_button-group.scss", + "_sass/bootstrap/_nav.scss", + "_sass/bootstrap/_navbar.scss", + "_sass/bootstrap/_card.scss", + "_sass/bootstrap/_accordion.scss", + "_sass/bootstrap/_breadcrumb.scss", + "_sass/bootstrap/_pagination.scss", + "_sass/bootstrap/_badge.scss", + "_sass/bootstrap/_alert.scss", + "_sass/bootstrap/_progress.scss", + "_sass/bootstrap/_list-group.scss", + "_sass/bootstrap/_close.scss", + "_sass/bootstrap/_toasts.scss", + "_sass/bootstrap/_modal.scss", + "_sass/bootstrap/_tooltip.scss", + "_sass/bootstrap/_popover.scss", + "_sass/bootstrap/_carousel.scss", + "_sass/bootstrap/_spinners.scss", + "_sass/bootstrap/_offcanvas.scss", + "_sass/bootstrap/_helpers.scss", + "_sass/bootstrap/helpers/_clearfix.scss", + "_sass/bootstrap/helpers/_colored-links.scss", + "_sass/bootstrap/helpers/_ratio.scss", + "_sass/bootstrap/helpers/_position.scss", + "_sass/bootstrap/helpers/_visually-hidden.scss", + "_sass/bootstrap/helpers/_stretched-link.scss", + "_sass/bootstrap/helpers/_text-truncation.scss", + "_sass/bootstrap/utilities/_api.scss", + "_sass/UW-theming/uw_style.scss", + "_sass/UW-theming/vendor/foundation/_unit.scss", + "_sass/UW-theming/vendor/foundation/_breakpoint.scss", + "_sass/UW-theming/_uw_reset.scss", + "_sass/UW-theming/_uw_mixins.scss", + "_sass/UW-theming/_uw_vars.scss", + "_sass/UW-theming/_uw_fonts.scss", + "_sass/UW-theming/_uw_typography.scss", + "_sass/UW-theming/_uw_utils.scss", + "_sass/UW-theming/_uw_content.scss", + "_sass/UW-theming/_uw_mini_bar.scss", + "_sass/UW-theming/_uw_global_bar.scss", + "_sass/UW-theming/_uw_header.scss", + "_sass/UW-theming/_uw_nav_menu.scss", + "_sass/UW-theming/_uw_footer.scss", + "_sass/UW-theming/_uw_button.scss", + "_sass/UW-theming/_uw_pagination.scss", + "_sass/UW-theming/_uw_breadcrumbs.scss", + "_sass/UW-theming/_uw_form.scss", + "_sass/UW-theming/_uw_table.scss", + "_sass/UW-theming/_uw_layouts.scss", + "_sass/UW-theming/_uw_side_nav.scss", + "_sass/UW-theming/_uw_card.scss" + ], + "sourcesContent": [ + "$grid-breakpoints: (\n xs: 0,\n sm: 576px,\n md: 768px,\n lg: 992px,\n xl: 1200px,\n xxl: 1400px\n);\n$primary: #c5050c;\n\n$uw-max-content-width: 1320px;\n\n$accordion-padding-x: 0;\n$accordion-padding-y: .5em;\n$accordion-icon-width: .9rem;\n\n$list-group-border-color: black;\n\n// Bootstrap\n@import 'bootstrap/bootstrap';\n\n// UW Theme\n@import 'UW-theming/uw_style';\n\n// Responsive Font Size\nhtml {\n font-size: 14px;\n}\n\n@include media-breakpoint-up(sm) {\n html {\n font-size: 16px;\n }\n}\n\n@include media-breakpoint-up(xl) {\n html {\n font-size: 18px;\n }\n}\n\n// Search Bar\n.search-bar {\n display: flex;\n flex-direction: column;\n margin: auto;\n color: $secondary;\n padding: map-get($spacers, 2);\n @include media-breakpoint-up(md){\n padding-left: 0px;\n padding-right: 0px;\n }\n .result {\n box-shadow: 0px 5px 5px #0000001c;\n }\n .results-dropdown {\n height: 0px;\n z-index: 100;\n }\n}\n\n// Navbar\n#uw-sub-menus {\n nav.uw-nav-menu {\n background-color: #f7f7f7 !important;\n }\n}\n\n#branding-sub {\n background-color: #f7f7f7 !important;\n}\n\n.table-container {\n table {\n margin-bottom: 0px;\n }\n overflow-x: scroll;\n margin-bottom: 2rem;\n}\n\n.uw-card.promoted {\n @include uw-flex-column(50%, map-get($breakpoints, medium));\n padding: $uw-padding*1;\n display: flex;\n flex-direction: row;\n .uw-card-content {\n background-color: $uw-white;\n flex-direction: row;\n img {\n width: 100%;\n }\n }\n .uw-card-copy {\n padding: 0 1rem 1rem;\n }\n}\n\n.team-card {\n .uw-card-content {\n padding: 15px;\n display: flex;\n flex-direction: column;\n }\n .uw-card-copy {\n display: flex;\n flex-direction: column;\n justify-content: space-between;\n flex: 1;\n }\n img{\n aspect-ratio: 1/1.2;\n width:100%;\n object-fit: cover;\n }\n .institution {\n color: grey;\n }\n}\n\n.news-post:hover {\n > div {\n transition: background-color .5s, transform 1s;\n background-color: #dfdfdf;\n box-shadow: 5px 0px 5px #ffffff;\n }\n text-decoration: none;\n}\n\n.news-post {\n .img-container{\n height: 180px;\n }\n img {\n height: 100%;\n width: 100%;\n object-fit: cover;\n }\n}\n\n// Homepage was designed with this applied and I don't want to redesign it at the moment\n.mw-1000 {\n max-width: 1000px;\n}\n\n#hero-image-ultrawide {\n height:50vw;\n max-height:350px;\n width: auto;\n max-width: none;\n margin-left: 50%;\n transform: translateX(-50%);\n}\n\n.stat-card {\n padding: 20px;\n background-color: #f7f7f7;\n margin-bottom: 1.5rem;\n}\n\n.circuit-background {\n background: url(/web-preview/preview-fall2024-info/images/circuit_board_light.svg) no-repeat center;\n -webkit-background-size: cover;\n -moz-background-size: cover;\n -o-background-size: cover;\n background-size: cover;\n\n box-shadow: inset 0px 0px 10px white;\n}\n\n\n#guide-sidebar {\n\n top: 1rem;\n position: sticky;\n line-height: 1rem;\n\n .accordion-button {\n transition: padding .2s;\n transition-timing-function: ease-out;\n }\n .accordion-button:focus {\n padding-left: 0.4rem;\n padding-right: 0.4rem;\n }\n .accordion-button:not(.collapsed) {\n padding-left: 0.4rem;\n padding-right: 0.4rem;\n }\n .document-link-wrapper {\n border-bottom: 1px solid rgba(0,0,0,0.125);\n padding: .4rem;\n background-color: #fffcfc;\n }\n button {\n font-size: 1rem;\n }\n a {\n font-size: .8rem;\n color: black;\n }\n a:hover {\n color: $primary;\n }\n}\n\n.primary-callout {\n background-color: $primary;\n padding: 0.25rem 1.5rem;\n margin-bottom: 1rem;\n border-radius: .25rem;\n color: white;\n box-shadow: 0 0.5rem 1rem rgba(0,0,0,0.15);\n a {\n color: #bcecff;\n text-decoration: underline;\n &:hover {\n color: #4ec0ed;\n }\n }\n}\n\n.btn-guide {\n display: inline-block;\n width: 97%;\n box-sizing: border-box;\n color: #212529;\n background-color: #f8f9fa;\n border-color: #e2e6ea;\n border-width: 2px;\n text-align: center; \n white-space: nowrap; \n overflow: hidden; \n text-overflow: ellipsis; \n transition: background-color 0.3s, border-color 0.3s;\n}\n\n.side-divider {\n border-left: 5px solid #B1050B;\n display: none;\n}\n\n.btn-guide-highlighted {\n background-color: #F9E6E7;\n border-color: #B1050B;\n}\n\n.img-btn-guide {\n padding-right: 10px;\n padding-bottom: 5px;\n max-width: 100%;\n height: 80px;\n}\n\n.no-gutters {\n padding-right: 0;\n padding-left: 0;\n margin-bottom: 5px;\n}\n\n.news-bar {\n width: auto;\n height: 200px;\n padding: 0%;\n}\n", + "/*!\n * Bootstrap v5.0.2 (https://getbootstrap.com/)\n * Copyright 2011-2021 The Bootstrap Authors\n * Copyright 2011-2021 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\n\n// scss-docs-start import-stack\n// Configuration\n@import \"functions\";\n@import \"variables\";\n@import \"mixins\";\n@import \"utilities\";\n\n// Layout & components\n@import \"root\";\n@import \"reboot\";\n@import \"type\";\n@import \"images\";\n@import \"containers\";\n@import \"grid\";\n@import \"tables\";\n@import \"forms\";\n@import \"buttons\";\n@import \"transitions\";\n@import \"dropdown\";\n@import \"button-group\";\n@import \"nav\";\n@import \"navbar\";\n@import \"card\";\n@import \"accordion\";\n@import \"breadcrumb\";\n@import \"pagination\";\n@import \"badge\";\n@import \"alert\";\n@import \"progress\";\n@import \"list-group\";\n@import \"close\";\n@import \"toasts\";\n@import \"modal\";\n@import \"tooltip\";\n@import \"popover\";\n@import \"carousel\";\n@import \"spinners\";\n@import \"offcanvas\";\n\n// Helpers\n@import \"helpers\";\n\n// Utilities\n@import \"utilities/api\";\n// scss-docs-end import-stack\n", + "// Bootstrap functions\n//\n// Utility mixins and functions for evaluating source code across our variables, maps, and mixins.\n\n// Ascending\n// Used to evaluate Sass maps like our grid breakpoints.\n@mixin _assert-ascending($map, $map-name) {\n $prev-key: null;\n $prev-num: null;\n @each $key, $num in $map {\n @if $prev-num == null or unit($num) == \"%\" or unit($prev-num) == \"%\" {\n // Do nothing\n } @else if not comparable($prev-num, $num) {\n @warn \"Potentially invalid value for #{$map-name}: This map must be in ascending order, but key '#{$key}' has value #{$num} whose unit makes it incomparable to #{$prev-num}, the value of the previous key '#{$prev-key}' !\";\n } @else if $prev-num >= $num {\n @warn \"Invalid value for #{$map-name}: This map must be in ascending order, but key '#{$key}' has value #{$num} which isn't greater than #{$prev-num}, the value of the previous key '#{$prev-key}' !\";\n }\n $prev-key: $key;\n $prev-num: $num;\n }\n}\n\n// Starts at zero\n// Used to ensure the min-width of the lowest breakpoint starts at 0.\n@mixin _assert-starts-at-zero($map, $map-name: \"$grid-breakpoints\") {\n @if length($map) > 0 {\n $values: map-values($map);\n $first-value: nth($values, 1);\n @if $first-value != 0 {\n @warn \"First breakpoint in #{$map-name} must start at 0, but starts at #{$first-value}.\";\n }\n }\n}\n\n// Internal Bootstrap function to turn maps into its negative variant.\n// It prefixes the keys with `n` and makes the value negative.\n@function negativify-map($map) {\n $result: ();\n @each $key, $value in $map {\n @if $key != 0 {\n $result: map-merge($result, (\"n\" + $key: (-$value)));\n }\n }\n @return $result;\n}\n\n// Get multiple keys from a sass map\n@function map-get-multiple($map, $values) {\n $result: ();\n @each $key, $value in $map {\n @if (index($values, $key) != null) {\n $result: map-merge($result, ($key: $value));\n }\n }\n @return $result;\n}\n\n// Replace `$search` with `$replace` in `$string`\n// Used on our SVG icon backgrounds for custom forms.\n//\n// @author Hugo Giraudel\n// @param {String} $string - Initial string\n// @param {String} $search - Substring to replace\n// @param {String} $replace ('') - New value\n// @return {String} - Updated string\n@function str-replace($string, $search, $replace: \"\") {\n $index: str-index($string, $search);\n\n @if $index {\n @return str-slice($string, 1, $index - 1) + $replace + str-replace(str-slice($string, $index + str-length($search)), $search, $replace);\n }\n\n @return $string;\n}\n\n// See https://codepen.io/kevinweber/pen/dXWoRw\n//\n// Requires the use of quotes around data URIs.\n\n@function escape-svg($string) {\n @if str-index($string, \"data:image/svg+xml\") {\n @each $char, $encoded in $escaped-characters {\n // Do not escape the url brackets\n @if str-index($string, \"url(\") == 1 {\n $string: url(\"#{str-replace(str-slice($string, 6, -3), $char, $encoded)}\");\n } @else {\n $string: str-replace($string, $char, $encoded);\n }\n }\n }\n\n @return $string;\n}\n\n// Color contrast\n// See https://github.com/twbs/bootstrap/pull/30168\n\n// A list of pre-calculated numbers of pow(divide((divide($value, 255) + .055), 1.055), 2.4). (from 0 to 255)\n// stylelint-disable-next-line scss/dollar-variable-default, scss/dollar-variable-pattern\n$_luminance-list: .0008 .001 .0011 .0013 .0015 .0017 .002 .0022 .0025 .0027 .003 .0033 .0037 .004 .0044 .0048 .0052 .0056 .006 .0065 .007 .0075 .008 .0086 .0091 .0097 .0103 .011 .0116 .0123 .013 .0137 .0144 .0152 .016 .0168 .0176 .0185 .0194 .0203 .0212 .0222 .0232 .0242 .0252 .0262 .0273 .0284 .0296 .0307 .0319 .0331 .0343 .0356 .0369 .0382 .0395 .0409 .0423 .0437 .0452 .0467 .0482 .0497 .0513 .0529 .0545 .0561 .0578 .0595 .0612 .063 .0648 .0666 .0685 .0704 .0723 .0742 .0762 .0782 .0802 .0823 .0844 .0865 .0887 .0908 .0931 .0953 .0976 .0999 .1022 .1046 .107 .1095 .1119 .1144 .117 .1195 .1221 .1248 .1274 .1301 .1329 .1356 .1384 .1413 .1441 .147 .15 .1529 .1559 .159 .162 .1651 .1683 .1714 .1746 .1779 .1812 .1845 .1878 .1912 .1946 .1981 .2016 .2051 .2086 .2122 .2159 .2195 .2232 .227 .2307 .2346 .2384 .2423 .2462 .2502 .2542 .2582 .2623 .2664 .2705 .2747 .2789 .2831 .2874 .2918 .2961 .3005 .305 .3095 .314 .3185 .3231 .3278 .3325 .3372 .3419 .3467 .3515 .3564 .3613 .3663 .3712 .3763 .3813 .3864 .3916 .3968 .402 .4072 .4125 .4179 .4233 .4287 .4342 .4397 .4452 .4508 .4564 .4621 .4678 .4735 .4793 .4851 .491 .4969 .5029 .5089 .5149 .521 .5271 .5333 .5395 .5457 .552 .5583 .5647 .5711 .5776 .5841 .5906 .5972 .6038 .6105 .6172 .624 .6308 .6376 .6445 .6514 .6584 .6654 .6724 .6795 .6867 .6939 .7011 .7084 .7157 .7231 .7305 .7379 .7454 .7529 .7605 .7682 .7758 .7835 .7913 .7991 .807 .8148 .8228 .8308 .8388 .8469 .855 .8632 .8714 .8796 .8879 .8963 .9047 .9131 .9216 .9301 .9387 .9473 .956 .9647 .9734 .9823 .9911 1;\n\n@function color-contrast($background, $color-contrast-dark: $color-contrast-dark, $color-contrast-light: $color-contrast-light, $min-contrast-ratio: $min-contrast-ratio) {\n $foregrounds: $color-contrast-light, $color-contrast-dark, $white, $black;\n $max-ratio: 0;\n $max-ratio-color: null;\n\n @each $color in $foregrounds {\n $contrast-ratio: contrast-ratio($background, $color);\n @if $contrast-ratio > $min-contrast-ratio {\n @return $color;\n } @else if $contrast-ratio > $max-ratio {\n $max-ratio: $contrast-ratio;\n $max-ratio-color: $color;\n }\n }\n\n @warn \"Found no color leading to #{$min-contrast-ratio}:1 contrast ratio against #{$background}...\";\n\n @return $max-ratio-color;\n}\n\n@function contrast-ratio($background, $foreground: $color-contrast-light) {\n $l1: luminance($background);\n $l2: luminance(opaque($background, $foreground));\n\n @return if($l1 > $l2, divide($l1 + .05, $l2 + .05), divide($l2 + .05, $l1 + .05));\n}\n\n// Return WCAG2.0 relative luminance\n// See https://www.w3.org/WAI/GL/wiki/Relative_luminance\n// See https://www.w3.org/TR/WCAG20-TECHS/G17.html#G17-tests\n@function luminance($color) {\n $rgb: (\n \"r\": red($color),\n \"g\": green($color),\n \"b\": blue($color)\n );\n\n @each $name, $value in $rgb {\n $value: if(divide($value, 255) < .03928, divide(divide($value, 255), 12.92), nth($_luminance-list, $value + 1));\n $rgb: map-merge($rgb, ($name: $value));\n }\n\n @return (map-get($rgb, \"r\") * .2126) + (map-get($rgb, \"g\") * .7152) + (map-get($rgb, \"b\") * .0722);\n}\n\n// Return opaque color\n// opaque(#fff, rgba(0, 0, 0, .5)) => #808080\n@function opaque($background, $foreground) {\n @return mix(rgba($foreground, 1), $background, opacity($foreground) * 100);\n}\n\n// scss-docs-start color-functions\n// Tint a color: mix a color with white\n@function tint-color($color, $weight) {\n @return mix(white, $color, $weight);\n}\n\n// Shade a color: mix a color with black\n@function shade-color($color, $weight) {\n @return mix(black, $color, $weight);\n}\n\n// Shade the color if the weight is positive, else tint it\n@function shift-color($color, $weight) {\n @return if($weight > 0, shade-color($color, $weight), tint-color($color, -$weight));\n}\n// scss-docs-end color-functions\n\n// Return valid calc\n@function add($value1, $value2, $return-calc: true) {\n @if $value1 == null {\n @return $value2;\n }\n\n @if $value2 == null {\n @return $value1;\n }\n\n @if type-of($value1) == number and type-of($value2) == number and comparable($value1, $value2) {\n @return $value1 + $value2;\n }\n\n @if type-of($value1) != number {\n $value1: unquote(\"(\") + $value1 + unquote(\")\");\n }\n\n @if type-of($value2) != number {\n $value2: unquote(\"(\") + $value2 + unquote(\")\");\n }\n\n @return if($return-calc == true, calc(#{$value1} + #{$value2}), $value1 + unquote(\" + \") + $value2);\n}\n\n@function subtract($value1, $value2, $return-calc: true) {\n @if $value1 == null and $value2 == null {\n @return null;\n }\n\n @if $value1 == null {\n @return -$value2;\n }\n\n @if $value2 == null {\n @return $value1;\n }\n\n @if type-of($value1) == number and type-of($value2) == number and comparable($value1, $value2) {\n @return $value1 - $value2;\n }\n\n @if type-of($value1) != number {\n $value1: unquote(\"(\") + $value1 + unquote(\")\");\n }\n\n @if type-of($value2) != number {\n $value2: unquote(\"(\") + $value2 + unquote(\")\");\n }\n\n @return if($return-calc == true, calc(#{$value1} - #{$value2}), $value1 + unquote(\" - \") + $value2);\n}\n\n@function divide($dividend, $divisor, $precision: 10) {\n $sign: if($dividend > 0 and $divisor > 0 or $dividend < 0 and $divisor < 0, 1, -1);\n $dividend: abs($dividend);\n $divisor: abs($divisor);\n @if $dividend == 0 {\n @return 0;\n }\n @if $divisor == 0 {\n @error \"Cannot divide by 0\";\n }\n $remainder: $dividend;\n $result: 0;\n $factor: 10;\n @while ($remainder > 0 and $precision >= 0) {\n $quotient: 0;\n @while ($remainder >= $divisor) {\n $remainder: $remainder - $divisor;\n $quotient: $quotient + 1;\n }\n $result: $result * 10 + $quotient;\n $factor: $factor * .1;\n $remainder: $remainder * 10;\n $precision: $precision - 1;\n @if ($precision < 0 and $remainder >= $divisor * 5) {\n $result: $result + 1;\n }\n }\n $result: $result * $factor * $sign;\n $dividend-unit: unit($dividend);\n $divisor-unit: unit($divisor);\n $unit-map: (\n \"px\": 1px,\n \"rem\": 1rem,\n \"em\": 1em,\n \"%\": 1%\n );\n @if ($dividend-unit != $divisor-unit and map-has-key($unit-map, $dividend-unit)) {\n $result: $result * map-get($unit-map, $dividend-unit);\n }\n @return $result;\n}\n", + "// Variables\n//\n// Variables should follow the `$component-state-property-size` formula for\n// consistent naming. Ex: $nav-link-disabled-color and $modal-content-box-shadow-xs.\n\n// Color system\n\n// scss-docs-start gray-color-variables\n$white: #fff !default;\n$gray-100: #f8f9fa !default;\n$gray-200: #e9ecef !default;\n$gray-300: #dee2e6 !default;\n$gray-400: #ced4da !default;\n$gray-500: #adb5bd !default;\n$gray-600: #6c757d !default;\n$gray-700: #495057 !default;\n$gray-800: #343a40 !default;\n$gray-900: #212529 !default;\n$black: #000 !default;\n// scss-docs-end gray-color-variables\n\n// fusv-disable\n// scss-docs-start gray-colors-map\n$grays: (\n \"100\": $gray-100,\n \"200\": $gray-200,\n \"300\": $gray-300,\n \"400\": $gray-400,\n \"500\": $gray-500,\n \"600\": $gray-600,\n \"700\": $gray-700,\n \"800\": $gray-800,\n \"900\": $gray-900\n) !default;\n// scss-docs-end gray-colors-map\n// fusv-enable\n\n// scss-docs-start color-variables\n$blue: #0d6efd !default;\n$indigo: #6610f2 !default;\n$purple: #6f42c1 !default;\n$pink: #d63384 !default;\n$red: #dc3545 !default;\n$orange: #fd7e14 !default;\n$yellow: #ffc107 !default;\n$green: #198754 !default;\n$teal: #20c997 !default;\n$cyan: #0dcaf0 !default;\n// scss-docs-end color-variables\n\n// scss-docs-start colors-map\n$colors: (\n \"blue\": $blue,\n \"indigo\": $indigo,\n \"purple\": $purple,\n \"pink\": $pink,\n \"red\": $red,\n \"orange\": $orange,\n \"yellow\": $yellow,\n \"green\": $green,\n \"teal\": $teal,\n \"cyan\": $cyan,\n \"white\": $white,\n \"gray\": $gray-600,\n \"gray-dark\": $gray-800\n) !default;\n// scss-docs-end colors-map\n\n// scss-docs-start theme-color-variables\n$primary: $blue !default;\n$secondary: $gray-600 !default;\n$success: $green !default;\n$info: $cyan !default;\n$warning: $yellow !default;\n$danger: $red !default;\n$light: $gray-100 !default;\n$dark: $gray-900 !default;\n// scss-docs-end theme-color-variables\n\n// scss-docs-start theme-colors-map\n$theme-colors: (\n \"primary\": $primary,\n \"secondary\": $secondary,\n \"success\": $success,\n \"info\": $info,\n \"warning\": $warning,\n \"danger\": $danger,\n \"light\": $light,\n \"dark\": $dark\n) !default;\n// scss-docs-end theme-colors-map\n\n// The contrast ratio to reach against white, to determine if color changes from \"light\" to \"dark\". Acceptable values for WCAG 2.0 are 3, 4.5 and 7.\n// See https://www.w3.org/TR/WCAG20/#visual-audio-contrast-contrast\n$min-contrast-ratio: 4.5 !default;\n\n// Customize the light and dark text colors for use in our color contrast function.\n$color-contrast-dark: $black !default;\n$color-contrast-light: $white !default;\n\n// fusv-disable\n$blue-100: tint-color($blue, 80%) !default;\n$blue-200: tint-color($blue, 60%) !default;\n$blue-300: tint-color($blue, 40%) !default;\n$blue-400: tint-color($blue, 20%) !default;\n$blue-500: $blue !default;\n$blue-600: shade-color($blue, 20%) !default;\n$blue-700: shade-color($blue, 40%) !default;\n$blue-800: shade-color($blue, 60%) !default;\n$blue-900: shade-color($blue, 80%) !default;\n\n$indigo-100: tint-color($indigo, 80%) !default;\n$indigo-200: tint-color($indigo, 60%) !default;\n$indigo-300: tint-color($indigo, 40%) !default;\n$indigo-400: tint-color($indigo, 20%) !default;\n$indigo-500: $indigo !default;\n$indigo-600: shade-color($indigo, 20%) !default;\n$indigo-700: shade-color($indigo, 40%) !default;\n$indigo-800: shade-color($indigo, 60%) !default;\n$indigo-900: shade-color($indigo, 80%) !default;\n\n$purple-100: tint-color($purple, 80%) !default;\n$purple-200: tint-color($purple, 60%) !default;\n$purple-300: tint-color($purple, 40%) !default;\n$purple-400: tint-color($purple, 20%) !default;\n$purple-500: $purple !default;\n$purple-600: shade-color($purple, 20%) !default;\n$purple-700: shade-color($purple, 40%) !default;\n$purple-800: shade-color($purple, 60%) !default;\n$purple-900: shade-color($purple, 80%) !default;\n\n$pink-100: tint-color($pink, 80%) !default;\n$pink-200: tint-color($pink, 60%) !default;\n$pink-300: tint-color($pink, 40%) !default;\n$pink-400: tint-color($pink, 20%) !default;\n$pink-500: $pink !default;\n$pink-600: shade-color($pink, 20%) !default;\n$pink-700: shade-color($pink, 40%) !default;\n$pink-800: shade-color($pink, 60%) !default;\n$pink-900: shade-color($pink, 80%) !default;\n\n$red-100: tint-color($red, 80%) !default;\n$red-200: tint-color($red, 60%) !default;\n$red-300: tint-color($red, 40%) !default;\n$red-400: tint-color($red, 20%) !default;\n$red-500: $red !default;\n$red-600: shade-color($red, 20%) !default;\n$red-700: shade-color($red, 40%) !default;\n$red-800: shade-color($red, 60%) !default;\n$red-900: shade-color($red, 80%) !default;\n\n$orange-100: tint-color($orange, 80%) !default;\n$orange-200: tint-color($orange, 60%) !default;\n$orange-300: tint-color($orange, 40%) !default;\n$orange-400: tint-color($orange, 20%) !default;\n$orange-500: $orange !default;\n$orange-600: shade-color($orange, 20%) !default;\n$orange-700: shade-color($orange, 40%) !default;\n$orange-800: shade-color($orange, 60%) !default;\n$orange-900: shade-color($orange, 80%) !default;\n\n$yellow-100: tint-color($yellow, 80%) !default;\n$yellow-200: tint-color($yellow, 60%) !default;\n$yellow-300: tint-color($yellow, 40%) !default;\n$yellow-400: tint-color($yellow, 20%) !default;\n$yellow-500: $yellow !default;\n$yellow-600: shade-color($yellow, 20%) !default;\n$yellow-700: shade-color($yellow, 40%) !default;\n$yellow-800: shade-color($yellow, 60%) !default;\n$yellow-900: shade-color($yellow, 80%) !default;\n\n$green-100: tint-color($green, 80%) !default;\n$green-200: tint-color($green, 60%) !default;\n$green-300: tint-color($green, 40%) !default;\n$green-400: tint-color($green, 20%) !default;\n$green-500: $green !default;\n$green-600: shade-color($green, 20%) !default;\n$green-700: shade-color($green, 40%) !default;\n$green-800: shade-color($green, 60%) !default;\n$green-900: shade-color($green, 80%) !default;\n\n$teal-100: tint-color($teal, 80%) !default;\n$teal-200: tint-color($teal, 60%) !default;\n$teal-300: tint-color($teal, 40%) !default;\n$teal-400: tint-color($teal, 20%) !default;\n$teal-500: $teal !default;\n$teal-600: shade-color($teal, 20%) !default;\n$teal-700: shade-color($teal, 40%) !default;\n$teal-800: shade-color($teal, 60%) !default;\n$teal-900: shade-color($teal, 80%) !default;\n\n$cyan-100: tint-color($cyan, 80%) !default;\n$cyan-200: tint-color($cyan, 60%) !default;\n$cyan-300: tint-color($cyan, 40%) !default;\n$cyan-400: tint-color($cyan, 20%) !default;\n$cyan-500: $cyan !default;\n$cyan-600: shade-color($cyan, 20%) !default;\n$cyan-700: shade-color($cyan, 40%) !default;\n$cyan-800: shade-color($cyan, 60%) !default;\n$cyan-900: shade-color($cyan, 80%) !default;\n// fusv-enable\n\n// Characters which are escaped by the escape-svg function\n$escaped-characters: (\n (\"<\", \"%3c\"),\n (\">\", \"%3e\"),\n (\"#\", \"%23\"),\n (\"(\", \"%28\"),\n (\")\", \"%29\"),\n) !default;\n\n// Options\n//\n// Quickly modify global styling by enabling or disabling optional features.\n\n$enable-caret: true !default;\n$enable-rounded: true !default;\n$enable-shadows: false !default;\n$enable-gradients: false !default;\n$enable-transitions: true !default;\n$enable-reduced-motion: true !default;\n$enable-smooth-scroll: true !default;\n$enable-grid-classes: true !default;\n$enable-button-pointers: true !default;\n$enable-rfs: true !default;\n$enable-validation-icons: true !default;\n$enable-negative-margins: false !default;\n$enable-deprecation-messages: true !default;\n$enable-important-utilities: true !default;\n\n// Prefix for :root CSS variables\n\n$variable-prefix: bs- !default;\n\n// Gradient\n//\n// The gradient which is added to components if `$enable-gradients` is `true`\n// This gradient is also added to elements with `.bg-gradient`\n// scss-docs-start variable-gradient\n$gradient: linear-gradient(180deg, rgba($white, .15), rgba($white, 0)) !default;\n// scss-docs-end variable-gradient\n\n// Spacing\n//\n// Control the default styling of most Bootstrap elements by modifying these\n// variables. Mostly focused on spacing.\n// You can add more entries to the $spacers map, should you need more variation.\n\n// scss-docs-start spacer-variables-maps\n$spacer: 1rem !default;\n$spacers: (\n 0: 0,\n 1: $spacer * .25,\n 2: $spacer * .5,\n 3: $spacer,\n 4: $spacer * 1.5,\n 5: $spacer * 3,\n) !default;\n\n$negative-spacers: if($enable-negative-margins, negativify-map($spacers), null) !default;\n// scss-docs-end spacer-variables-maps\n\n// Position\n//\n// Define the edge positioning anchors of the position utilities.\n\n// scss-docs-start position-map\n$position-values: (\n 0: 0,\n 50: 50%,\n 100: 100%\n) !default;\n// scss-docs-end position-map\n\n// Body\n//\n// Settings for the `` element.\n\n$body-bg: $white !default;\n$body-color: $gray-900 !default;\n$body-text-align: null !default;\n\n\n// Links\n//\n// Style anchor elements.\n\n$link-color: $primary !default;\n$link-decoration: underline !default;\n$link-shade-percentage: 20% !default;\n$link-hover-color: shift-color($link-color, $link-shade-percentage) !default;\n$link-hover-decoration: null !default;\n\n$stretched-link-pseudo-element: after !default;\n$stretched-link-z-index: 1 !default;\n\n// Paragraphs\n//\n// Style p element.\n\n$paragraph-margin-bottom: 1rem !default;\n\n\n// Grid breakpoints\n//\n// Define the minimum dimensions at which your layout will change,\n// adapting to different screen sizes, for use in media queries.\n\n// scss-docs-start grid-breakpoints\n$grid-breakpoints: (\n xs: 0,\n sm: 576px,\n md: 768px,\n lg: 992px,\n xl: 1200px,\n xxl: 1400px\n) !default;\n// scss-docs-end grid-breakpoints\n\n@include _assert-ascending($grid-breakpoints, \"$grid-breakpoints\");\n@include _assert-starts-at-zero($grid-breakpoints, \"$grid-breakpoints\");\n\n\n// Grid containers\n//\n// Define the maximum width of `.container` for different screen sizes.\n\n// scss-docs-start container-max-widths\n$container-max-widths: (\n sm: 540px,\n md: 720px,\n lg: 960px,\n xl: 1140px,\n xxl: 1320px\n) !default;\n// scss-docs-end container-max-widths\n\n@include _assert-ascending($container-max-widths, \"$container-max-widths\");\n\n\n// Grid columns\n//\n// Set the number of columns and specify the width of the gutters.\n\n$grid-columns: 12 !default;\n$grid-gutter-width: 1.5rem !default;\n$grid-row-columns: 6 !default;\n\n$gutters: $spacers !default;\n\n// Container padding\n\n$container-padding-x: $grid-gutter-width * .5 !default;\n\n\n// Components\n//\n// Define common padding and border radius sizes and more.\n\n// scss-docs-start border-variables\n$border-width: 1px !default;\n$border-widths: (\n 1: 1px,\n 2: 2px,\n 3: 3px,\n 4: 4px,\n 5: 5px\n) !default;\n\n$border-color: $gray-300 !default;\n// scss-docs-end border-variables\n\n// scss-docs-start border-radius-variables\n$border-radius: .25rem !default;\n$border-radius-sm: .2rem !default;\n$border-radius-lg: .3rem !default;\n$border-radius-pill: 50rem !default;\n// scss-docs-end border-radius-variables\n\n// scss-docs-start box-shadow-variables\n$box-shadow: 0 .5rem 1rem rgba($black, .15) !default;\n$box-shadow-sm: 0 .125rem .25rem rgba($black, .075) !default;\n$box-shadow-lg: 0 1rem 3rem rgba($black, .175) !default;\n$box-shadow-inset: inset 0 1px 2px rgba($black, .075) !default;\n// scss-docs-end box-shadow-variables\n\n$component-active-color: $white !default;\n$component-active-bg: $primary !default;\n\n// scss-docs-start caret-variables\n$caret-width: .3em !default;\n$caret-vertical-align: $caret-width * .85 !default;\n$caret-spacing: $caret-width * .85 !default;\n// scss-docs-end caret-variables\n\n$transition-base: all .2s ease-in-out !default;\n$transition-fade: opacity .15s linear !default;\n// scss-docs-start collapse-transition\n$transition-collapse: height .35s ease !default;\n// scss-docs-end collapse-transition\n\n// stylelint-disable function-disallowed-list\n// scss-docs-start aspect-ratios\n$aspect-ratios: (\n \"1x1\": 100%,\n \"4x3\": calc(3 / 4 * 100%),\n \"16x9\": calc(9 / 16 * 100%),\n \"21x9\": calc(9 / 21 * 100%)\n) !default;\n// scss-docs-end aspect-ratios\n// stylelint-enable function-disallowed-list\n\n// Typography\n//\n// Font, line-height, and color for body text, headings, and more.\n\n// scss-docs-start font-variables\n// stylelint-disable value-keyword-case\n$font-family-sans-serif: system-ui, -apple-system, \"Segoe UI\", Roboto, \"Helvetica Neue\", Arial, \"Noto Sans\", \"Liberation Sans\", sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\", \"Noto Color Emoji\" !default;\n$font-family-monospace: SFMono-Regular, Menlo, Monaco, Consolas, \"Liberation Mono\", \"Courier New\", monospace !default;\n// stylelint-enable value-keyword-case\n$font-family-base: var(--#{$variable-prefix}font-sans-serif) !default;\n$font-family-code: var(--#{$variable-prefix}font-monospace) !default;\n\n// $font-size-root affects the value of `rem`, which is used for as well font sizes, paddings, and margins\n// $font-size-base affects the font size of the body text\n$font-size-root: null !default;\n$font-size-base: 1rem !default; // Assumes the browser default, typically `16px`\n$font-size-sm: $font-size-base * .875 !default;\n$font-size-lg: $font-size-base * 1.25 !default;\n\n$font-weight-lighter: lighter !default;\n$font-weight-light: 300 !default;\n$font-weight-normal: 400 !default;\n$font-weight-bold: 700 !default;\n$font-weight-bolder: bolder !default;\n\n$font-weight-base: $font-weight-normal !default;\n\n$line-height-base: 1.5 !default;\n$line-height-sm: 1.25 !default;\n$line-height-lg: 2 !default;\n\n$h1-font-size: $font-size-base * 2.5 !default;\n$h2-font-size: $font-size-base * 2 !default;\n$h3-font-size: $font-size-base * 1.75 !default;\n$h4-font-size: $font-size-base * 1.5 !default;\n$h5-font-size: $font-size-base * 1.25 !default;\n$h6-font-size: $font-size-base !default;\n// scss-docs-end font-variables\n\n// scss-docs-start font-sizes\n$font-sizes: (\n 1: $h1-font-size,\n 2: $h2-font-size,\n 3: $h3-font-size,\n 4: $h4-font-size,\n 5: $h5-font-size,\n 6: $h6-font-size\n) !default;\n// scss-docs-end font-sizes\n\n// scss-docs-start headings-variables\n$headings-margin-bottom: $spacer * .5 !default;\n$headings-font-family: null !default;\n$headings-font-style: null !default;\n$headings-font-weight: 500 !default;\n$headings-line-height: 1.2 !default;\n$headings-color: null !default;\n// scss-docs-end headings-variables\n\n// scss-docs-start display-headings\n$display-font-sizes: (\n 1: 5rem,\n 2: 4.5rem,\n 3: 4rem,\n 4: 3.5rem,\n 5: 3rem,\n 6: 2.5rem\n) !default;\n\n$display-font-weight: 300 !default;\n$display-line-height: $headings-line-height !default;\n// scss-docs-end display-headings\n\n// scss-docs-start type-variables\n$lead-font-size: $font-size-base * 1.25 !default;\n$lead-font-weight: 300 !default;\n\n$small-font-size: .875em !default;\n\n$sub-sup-font-size: .75em !default;\n\n$text-muted: $gray-600 !default;\n\n$initialism-font-size: $small-font-size !default;\n\n$blockquote-margin-y: $spacer !default;\n$blockquote-font-size: $font-size-base * 1.25 !default;\n$blockquote-footer-color: $gray-600 !default;\n$blockquote-footer-font-size: $small-font-size !default;\n\n$hr-margin-y: $spacer !default;\n$hr-color: inherit !default;\n$hr-height: $border-width !default;\n$hr-opacity: .25 !default;\n\n$legend-margin-bottom: .5rem !default;\n$legend-font-size: 1.5rem !default;\n$legend-font-weight: null !default;\n\n$mark-padding: .2em !default;\n\n$dt-font-weight: $font-weight-bold !default;\n\n$nested-kbd-font-weight: $font-weight-bold !default;\n\n$list-inline-padding: .5rem !default;\n\n$mark-bg: #fcf8e3 !default;\n// scss-docs-end type-variables\n\n\n// Tables\n//\n// Customizes the `.table` component with basic values, each used across all table variations.\n\n// scss-docs-start table-variables\n$table-cell-padding-y: .5rem !default;\n$table-cell-padding-x: .5rem !default;\n$table-cell-padding-y-sm: .25rem !default;\n$table-cell-padding-x-sm: .25rem !default;\n\n$table-cell-vertical-align: top !default;\n\n$table-color: $body-color !default;\n$table-bg: transparent !default;\n$table-accent-bg: transparent !default;\n\n$table-th-font-weight: null !default;\n\n$table-striped-color: $table-color !default;\n$table-striped-bg-factor: .05 !default;\n$table-striped-bg: rgba($black, $table-striped-bg-factor) !default;\n\n$table-active-color: $table-color !default;\n$table-active-bg-factor: .1 !default;\n$table-active-bg: rgba($black, $table-active-bg-factor) !default;\n\n$table-hover-color: $table-color !default;\n$table-hover-bg-factor: .075 !default;\n$table-hover-bg: rgba($black, $table-hover-bg-factor) !default;\n\n$table-border-factor: .1 !default;\n$table-border-width: $border-width !default;\n$table-border-color: $border-color !default;\n\n$table-striped-order: odd !default;\n\n$table-group-separator-color: currentColor !default;\n\n$table-caption-color: $text-muted !default;\n\n$table-bg-scale: -80% !default;\n// scss-docs-end table-variables\n\n// scss-docs-start table-loop\n$table-variants: (\n \"primary\": shift-color($primary, $table-bg-scale),\n \"secondary\": shift-color($secondary, $table-bg-scale),\n \"success\": shift-color($success, $table-bg-scale),\n \"info\": shift-color($info, $table-bg-scale),\n \"warning\": shift-color($warning, $table-bg-scale),\n \"danger\": shift-color($danger, $table-bg-scale),\n \"light\": $light,\n \"dark\": $dark,\n) !default;\n// scss-docs-end table-loop\n\n\n// Buttons + Forms\n//\n// Shared variables that are reassigned to `$input-` and `$btn-` specific variables.\n\n// scss-docs-start input-btn-variables\n$input-btn-padding-y: .375rem !default;\n$input-btn-padding-x: .75rem !default;\n$input-btn-font-family: null !default;\n$input-btn-font-size: $font-size-base !default;\n$input-btn-line-height: $line-height-base !default;\n\n$input-btn-focus-width: .25rem !default;\n$input-btn-focus-color-opacity: .25 !default;\n$input-btn-focus-color: rgba($component-active-bg, $input-btn-focus-color-opacity) !default;\n$input-btn-focus-blur: 0 !default;\n$input-btn-focus-box-shadow: 0 0 $input-btn-focus-blur $input-btn-focus-width $input-btn-focus-color !default;\n\n$input-btn-padding-y-sm: .25rem !default;\n$input-btn-padding-x-sm: .5rem !default;\n$input-btn-font-size-sm: $font-size-sm !default;\n\n$input-btn-padding-y-lg: .5rem !default;\n$input-btn-padding-x-lg: 1rem !default;\n$input-btn-font-size-lg: $font-size-lg !default;\n\n$input-btn-border-width: $border-width !default;\n// scss-docs-end input-btn-variables\n\n\n// Buttons\n//\n// For each of Bootstrap's buttons, define text, background, and border color.\n\n// scss-docs-start btn-variables\n$btn-padding-y: $input-btn-padding-y !default;\n$btn-padding-x: $input-btn-padding-x !default;\n$btn-font-family: $input-btn-font-family !default;\n$btn-font-size: $input-btn-font-size !default;\n$btn-line-height: $input-btn-line-height !default;\n$btn-white-space: null !default; // Set to `nowrap` to prevent text wrapping\n\n$btn-padding-y-sm: $input-btn-padding-y-sm !default;\n$btn-padding-x-sm: $input-btn-padding-x-sm !default;\n$btn-font-size-sm: $input-btn-font-size-sm !default;\n\n$btn-padding-y-lg: $input-btn-padding-y-lg !default;\n$btn-padding-x-lg: $input-btn-padding-x-lg !default;\n$btn-font-size-lg: $input-btn-font-size-lg !default;\n\n$btn-border-width: $input-btn-border-width !default;\n\n$btn-font-weight: $font-weight-normal !default;\n$btn-box-shadow: inset 0 1px 0 rgba($white, .15), 0 1px 1px rgba($black, .075) !default;\n$btn-focus-width: $input-btn-focus-width !default;\n$btn-focus-box-shadow: $input-btn-focus-box-shadow !default;\n$btn-disabled-opacity: .65 !default;\n$btn-active-box-shadow: inset 0 3px 5px rgba($black, .125) !default;\n\n$btn-link-color: $link-color !default;\n$btn-link-hover-color: $link-hover-color !default;\n$btn-link-disabled-color: $gray-600 !default;\n\n// Allows for customizing button radius independently from global border radius\n$btn-border-radius: $border-radius !default;\n$btn-border-radius-sm: $border-radius-sm !default;\n$btn-border-radius-lg: $border-radius-lg !default;\n\n$btn-transition: color .15s ease-in-out, background-color .15s ease-in-out, border-color .15s ease-in-out, box-shadow .15s ease-in-out !default;\n\n$btn-hover-bg-shade-amount: 15% !default;\n$btn-hover-bg-tint-amount: 15% !default;\n$btn-hover-border-shade-amount: 20% !default;\n$btn-hover-border-tint-amount: 10% !default;\n$btn-active-bg-shade-amount: 20% !default;\n$btn-active-bg-tint-amount: 20% !default;\n$btn-active-border-shade-amount: 25% !default;\n$btn-active-border-tint-amount: 10% !default;\n// scss-docs-end btn-variables\n\n\n// Forms\n\n// scss-docs-start form-text-variables\n$form-text-margin-top: .25rem !default;\n$form-text-font-size: $small-font-size !default;\n$form-text-font-style: null !default;\n$form-text-font-weight: null !default;\n$form-text-color: $text-muted !default;\n// scss-docs-end form-text-variables\n\n// scss-docs-start form-label-variables\n$form-label-margin-bottom: .5rem !default;\n$form-label-font-size: null !default;\n$form-label-font-style: null !default;\n$form-label-font-weight: null !default;\n$form-label-color: null !default;\n// scss-docs-end form-label-variables\n\n// scss-docs-start form-input-variables\n$input-padding-y: $input-btn-padding-y !default;\n$input-padding-x: $input-btn-padding-x !default;\n$input-font-family: $input-btn-font-family !default;\n$input-font-size: $input-btn-font-size !default;\n$input-font-weight: $font-weight-base !default;\n$input-line-height: $input-btn-line-height !default;\n\n$input-padding-y-sm: $input-btn-padding-y-sm !default;\n$input-padding-x-sm: $input-btn-padding-x-sm !default;\n$input-font-size-sm: $input-btn-font-size-sm !default;\n\n$input-padding-y-lg: $input-btn-padding-y-lg !default;\n$input-padding-x-lg: $input-btn-padding-x-lg !default;\n$input-font-size-lg: $input-btn-font-size-lg !default;\n\n$input-bg: $white !default;\n$input-disabled-bg: $gray-200 !default;\n$input-disabled-border-color: null !default;\n\n$input-color: $body-color !default;\n$input-border-color: $gray-400 !default;\n$input-border-width: $input-btn-border-width !default;\n$input-box-shadow: $box-shadow-inset !default;\n\n$input-border-radius: $border-radius !default;\n$input-border-radius-sm: $border-radius-sm !default;\n$input-border-radius-lg: $border-radius-lg !default;\n\n$input-focus-bg: $input-bg !default;\n$input-focus-border-color: tint-color($component-active-bg, 50%) !default;\n$input-focus-color: $input-color !default;\n$input-focus-width: $input-btn-focus-width !default;\n$input-focus-box-shadow: $input-btn-focus-box-shadow !default;\n\n$input-placeholder-color: $gray-600 !default;\n$input-plaintext-color: $body-color !default;\n\n$input-height-border: $input-border-width * 2 !default;\n\n$input-height-inner: add($input-line-height * 1em, $input-padding-y * 2) !default;\n$input-height-inner-half: add($input-line-height * .5em, $input-padding-y) !default;\n$input-height-inner-quarter: add($input-line-height * .25em, $input-padding-y * .5) !default;\n\n$input-height: add($input-line-height * 1em, add($input-padding-y * 2, $input-height-border, false)) !default;\n$input-height-sm: add($input-line-height * 1em, add($input-padding-y-sm * 2, $input-height-border, false)) !default;\n$input-height-lg: add($input-line-height * 1em, add($input-padding-y-lg * 2, $input-height-border, false)) !default;\n\n$input-transition: border-color .15s ease-in-out, box-shadow .15s ease-in-out !default;\n// scss-docs-end form-input-variables\n\n// scss-docs-start form-check-variables\n$form-check-input-width: 1em !default;\n$form-check-min-height: $font-size-base * $line-height-base !default;\n$form-check-padding-start: $form-check-input-width + .5em !default;\n$form-check-margin-bottom: .125rem !default;\n$form-check-label-color: null !default;\n$form-check-label-cursor: null !default;\n$form-check-transition: null !default;\n\n$form-check-input-active-filter: brightness(90%) !default;\n\n$form-check-input-bg: $input-bg !default;\n$form-check-input-border: 1px solid rgba($black, .25) !default;\n$form-check-input-border-radius: .25em !default;\n$form-check-radio-border-radius: 50% !default;\n$form-check-input-focus-border: $input-focus-border-color !default;\n$form-check-input-focus-box-shadow: $input-btn-focus-box-shadow !default;\n\n$form-check-input-checked-color: $component-active-color !default;\n$form-check-input-checked-bg-color: $component-active-bg !default;\n$form-check-input-checked-border-color: $form-check-input-checked-bg-color !default;\n$form-check-input-checked-bg-image: url(\"data:image/svg+xml,\") !default;\n$form-check-radio-checked-bg-image: url(\"data:image/svg+xml,\") !default;\n\n$form-check-input-indeterminate-color: $component-active-color !default;\n$form-check-input-indeterminate-bg-color: $component-active-bg !default;\n$form-check-input-indeterminate-border-color: $form-check-input-indeterminate-bg-color !default;\n$form-check-input-indeterminate-bg-image: url(\"data:image/svg+xml,\") !default;\n\n$form-check-input-disabled-opacity: .5 !default;\n$form-check-label-disabled-opacity: $form-check-input-disabled-opacity !default;\n$form-check-btn-check-disabled-opacity: $btn-disabled-opacity !default;\n\n$form-check-inline-margin-end: 1rem !default;\n// scss-docs-end form-check-variables\n\n// scss-docs-start form-switch-variables\n$form-switch-color: rgba(0, 0, 0, .25) !default;\n$form-switch-width: 2em !default;\n$form-switch-padding-start: $form-switch-width + .5em !default;\n$form-switch-bg-image: url(\"data:image/svg+xml,\") !default;\n$form-switch-border-radius: $form-switch-width !default;\n$form-switch-transition: background-position .15s ease-in-out !default;\n\n$form-switch-focus-color: $input-focus-border-color !default;\n$form-switch-focus-bg-image: url(\"data:image/svg+xml,\") !default;\n\n$form-switch-checked-color: $component-active-color !default;\n$form-switch-checked-bg-image: url(\"data:image/svg+xml,\") !default;\n$form-switch-checked-bg-position: right center !default;\n// scss-docs-end form-switch-variables\n\n// scss-docs-start input-group-variables\n$input-group-addon-padding-y: $input-padding-y !default;\n$input-group-addon-padding-x: $input-padding-x !default;\n$input-group-addon-font-weight: $input-font-weight !default;\n$input-group-addon-color: $input-color !default;\n$input-group-addon-bg: $gray-200 !default;\n$input-group-addon-border-color: $input-border-color !default;\n// scss-docs-end input-group-variables\n\n// scss-docs-start form-select-variables\n$form-select-padding-y: $input-padding-y !default;\n$form-select-padding-x: $input-padding-x !default;\n$form-select-font-family: $input-font-family !default;\n$form-select-font-size: $input-font-size !default;\n$form-select-indicator-padding: $form-select-padding-x * 3 !default; // Extra padding for background-image\n$form-select-font-weight: $input-font-weight !default;\n$form-select-line-height: $input-line-height !default;\n$form-select-color: $input-color !default;\n$form-select-bg: $input-bg !default;\n$form-select-disabled-color: null !default;\n$form-select-disabled-bg: $gray-200 !default;\n$form-select-disabled-border-color: $input-disabled-border-color !default;\n$form-select-bg-position: right $form-select-padding-x center !default;\n$form-select-bg-size: 16px 12px !default; // In pixels because image dimensions\n$form-select-indicator-color: $gray-800 !default;\n$form-select-indicator: url(\"data:image/svg+xml,\") !default;\n\n$form-select-feedback-icon-padding-end: $form-select-padding-x * 2.5 + $form-select-indicator-padding !default;\n$form-select-feedback-icon-position: center right $form-select-indicator-padding !default;\n$form-select-feedback-icon-size: $input-height-inner-half $input-height-inner-half !default;\n\n$form-select-border-width: $input-border-width !default;\n$form-select-border-color: $input-border-color !default;\n$form-select-border-radius: $border-radius !default;\n$form-select-box-shadow: $box-shadow-inset !default;\n\n$form-select-focus-border-color: $input-focus-border-color !default;\n$form-select-focus-width: $input-focus-width !default;\n$form-select-focus-box-shadow: 0 0 0 $form-select-focus-width $input-btn-focus-color !default;\n\n$form-select-padding-y-sm: $input-padding-y-sm !default;\n$form-select-padding-x-sm: $input-padding-x-sm !default;\n$form-select-font-size-sm: $input-font-size-sm !default;\n\n$form-select-padding-y-lg: $input-padding-y-lg !default;\n$form-select-padding-x-lg: $input-padding-x-lg !default;\n$form-select-font-size-lg: $input-font-size-lg !default;\n\n$form-select-transition: $input-transition !default;\n// scss-docs-end form-select-variables\n\n// scss-docs-start form-range-variables\n$form-range-track-width: 100% !default;\n$form-range-track-height: .5rem !default;\n$form-range-track-cursor: pointer !default;\n$form-range-track-bg: $gray-300 !default;\n$form-range-track-border-radius: 1rem !default;\n$form-range-track-box-shadow: $box-shadow-inset !default;\n\n$form-range-thumb-width: 1rem !default;\n$form-range-thumb-height: $form-range-thumb-width !default;\n$form-range-thumb-bg: $component-active-bg !default;\n$form-range-thumb-border: 0 !default;\n$form-range-thumb-border-radius: 1rem !default;\n$form-range-thumb-box-shadow: 0 .1rem .25rem rgba($black, .1) !default;\n$form-range-thumb-focus-box-shadow: 0 0 0 1px $body-bg, $input-focus-box-shadow !default;\n$form-range-thumb-focus-box-shadow-width: $input-focus-width !default; // For focus box shadow issue in Edge\n$form-range-thumb-active-bg: tint-color($component-active-bg, 70%) !default;\n$form-range-thumb-disabled-bg: $gray-500 !default;\n$form-range-thumb-transition: background-color .15s ease-in-out, border-color .15s ease-in-out, box-shadow .15s ease-in-out !default;\n// scss-docs-end form-range-variables\n\n// scss-docs-start form-file-variables\n$form-file-button-color: $input-color !default;\n$form-file-button-bg: $input-group-addon-bg !default;\n$form-file-button-hover-bg: shade-color($form-file-button-bg, 5%) !default;\n// scss-docs-end form-file-variables\n\n// scss-docs-start form-floating-variables\n$form-floating-height: add(3.5rem, $input-height-border) !default;\n$form-floating-line-height: 1.25 !default;\n$form-floating-padding-x: $input-padding-x !default;\n$form-floating-padding-y: 1rem !default;\n$form-floating-input-padding-t: 1.625rem !default;\n$form-floating-input-padding-b: .625rem !default;\n$form-floating-label-opacity: .65 !default;\n$form-floating-label-transform: scale(.85) translateY(-.5rem) translateX(.15rem) !default;\n$form-floating-transition: opacity .1s ease-in-out, transform .1s ease-in-out !default;\n// scss-docs-end form-floating-variables\n\n// Form validation\n\n// scss-docs-start form-feedback-variables\n$form-feedback-margin-top: $form-text-margin-top !default;\n$form-feedback-font-size: $form-text-font-size !default;\n$form-feedback-font-style: $form-text-font-style !default;\n$form-feedback-valid-color: $success !default;\n$form-feedback-invalid-color: $danger !default;\n\n$form-feedback-icon-valid-color: $form-feedback-valid-color !default;\n$form-feedback-icon-valid: url(\"data:image/svg+xml,\") !default;\n$form-feedback-icon-invalid-color: $form-feedback-invalid-color !default;\n$form-feedback-icon-invalid: url(\"data:image/svg+xml,\") !default;\n// scss-docs-end form-feedback-variables\n\n// scss-docs-start form-validation-states\n$form-validation-states: (\n \"valid\": (\n \"color\": $form-feedback-valid-color,\n \"icon\": $form-feedback-icon-valid\n ),\n \"invalid\": (\n \"color\": $form-feedback-invalid-color,\n \"icon\": $form-feedback-icon-invalid\n )\n) !default;\n// scss-docs-end form-validation-states\n\n// Z-index master list\n//\n// Warning: Avoid customizing these values. They're used for a bird's eye view\n// of components dependent on the z-axis and are designed to all work together.\n\n// scss-docs-start zindex-stack\n$zindex-dropdown: 1000 !default;\n$zindex-sticky: 1020 !default;\n$zindex-fixed: 1030 !default;\n$zindex-modal-backdrop: 1040 !default;\n$zindex-offcanvas: 1050 !default;\n$zindex-modal: 1060 !default;\n$zindex-popover: 1070 !default;\n$zindex-tooltip: 1080 !default;\n// scss-docs-end zindex-stack\n\n\n// Navs\n\n// scss-docs-start nav-variables\n$nav-link-padding-y: .5rem !default;\n$nav-link-padding-x: 1rem !default;\n$nav-link-font-size: null !default;\n$nav-link-font-weight: null !default;\n$nav-link-color: $link-color !default;\n$nav-link-hover-color: $link-hover-color !default;\n$nav-link-transition: color .15s ease-in-out, background-color .15s ease-in-out, border-color .15s ease-in-out !default;\n$nav-link-disabled-color: $gray-600 !default;\n\n$nav-tabs-border-color: $gray-300 !default;\n$nav-tabs-border-width: $border-width !default;\n$nav-tabs-border-radius: $border-radius !default;\n$nav-tabs-link-hover-border-color: $gray-200 $gray-200 $nav-tabs-border-color !default;\n$nav-tabs-link-active-color: $gray-700 !default;\n$nav-tabs-link-active-bg: $body-bg !default;\n$nav-tabs-link-active-border-color: $gray-300 $gray-300 $nav-tabs-link-active-bg !default;\n\n$nav-pills-border-radius: $border-radius !default;\n$nav-pills-link-active-color: $component-active-color !default;\n$nav-pills-link-active-bg: $component-active-bg !default;\n// scss-docs-end nav-variables\n\n\n// Navbar\n\n// scss-docs-start navbar-variables\n$navbar-padding-y: $spacer * .5 !default;\n$navbar-padding-x: null !default;\n\n$navbar-nav-link-padding-x: .5rem !default;\n\n$navbar-brand-font-size: $font-size-lg !default;\n// Compute the navbar-brand padding-y so the navbar-brand will have the same height as navbar-text and nav-link\n$nav-link-height: $font-size-base * $line-height-base + $nav-link-padding-y * 2 !default;\n$navbar-brand-height: $navbar-brand-font-size * $line-height-base !default;\n$navbar-brand-padding-y: ($nav-link-height - $navbar-brand-height) * .5 !default;\n$navbar-brand-margin-end: 1rem !default;\n\n$navbar-toggler-padding-y: .25rem !default;\n$navbar-toggler-padding-x: .75rem !default;\n$navbar-toggler-font-size: $font-size-lg !default;\n$navbar-toggler-border-radius: $btn-border-radius !default;\n$navbar-toggler-focus-width: $btn-focus-width !default;\n$navbar-toggler-transition: box-shadow .15s ease-in-out !default;\n// scss-docs-end navbar-variables\n\n// scss-docs-start navbar-theme-variables\n$navbar-dark-color: rgba($white, .55) !default;\n$navbar-dark-hover-color: rgba($white, .75) !default;\n$navbar-dark-active-color: $white !default;\n$navbar-dark-disabled-color: rgba($white, .25) !default;\n$navbar-dark-toggler-icon-bg: url(\"data:image/svg+xml,\") !default;\n$navbar-dark-toggler-border-color: rgba($white, .1) !default;\n\n$navbar-light-color: rgba($black, .55) !default;\n$navbar-light-hover-color: rgba($black, .7) !default;\n$navbar-light-active-color: rgba($black, .9) !default;\n$navbar-light-disabled-color: rgba($black, .3) !default;\n$navbar-light-toggler-icon-bg: url(\"data:image/svg+xml,\") !default;\n$navbar-light-toggler-border-color: rgba($black, .1) !default;\n\n$navbar-light-brand-color: $navbar-light-active-color !default;\n$navbar-light-brand-hover-color: $navbar-light-active-color !default;\n$navbar-dark-brand-color: $navbar-dark-active-color !default;\n$navbar-dark-brand-hover-color: $navbar-dark-active-color !default;\n// scss-docs-end navbar-theme-variables\n\n\n// Dropdowns\n//\n// Dropdown menu container and contents.\n\n// scss-docs-start dropdown-variables\n$dropdown-min-width: 10rem !default;\n$dropdown-padding-x: 0 !default;\n$dropdown-padding-y: .5rem !default;\n$dropdown-spacer: .125rem !default;\n$dropdown-font-size: $font-size-base !default;\n$dropdown-color: $body-color !default;\n$dropdown-bg: $white !default;\n$dropdown-border-color: rgba($black, .15) !default;\n$dropdown-border-radius: $border-radius !default;\n$dropdown-border-width: $border-width !default;\n$dropdown-inner-border-radius: subtract($dropdown-border-radius, $dropdown-border-width) !default;\n$dropdown-divider-bg: $dropdown-border-color !default;\n$dropdown-divider-margin-y: $spacer * .5 !default;\n$dropdown-box-shadow: $box-shadow !default;\n\n$dropdown-link-color: $gray-900 !default;\n$dropdown-link-hover-color: shade-color($gray-900, 10%) !default;\n$dropdown-link-hover-bg: $gray-200 !default;\n\n$dropdown-link-active-color: $component-active-color !default;\n$dropdown-link-active-bg: $component-active-bg !default;\n\n$dropdown-link-disabled-color: $gray-500 !default;\n\n$dropdown-item-padding-y: $spacer * .25 !default;\n$dropdown-item-padding-x: $spacer !default;\n\n$dropdown-header-color: $gray-600 !default;\n$dropdown-header-padding: $dropdown-padding-y $dropdown-item-padding-x !default;\n// scss-docs-end dropdown-variables\n\n// scss-docs-start dropdown-dark-variables\n$dropdown-dark-color: $gray-300 !default;\n$dropdown-dark-bg: $gray-800 !default;\n$dropdown-dark-border-color: $dropdown-border-color !default;\n$dropdown-dark-divider-bg: $dropdown-divider-bg !default;\n$dropdown-dark-box-shadow: null !default;\n$dropdown-dark-link-color: $dropdown-dark-color !default;\n$dropdown-dark-link-hover-color: $white !default;\n$dropdown-dark-link-hover-bg: rgba($white, .15) !default;\n$dropdown-dark-link-active-color: $dropdown-link-active-color !default;\n$dropdown-dark-link-active-bg: $dropdown-link-active-bg !default;\n$dropdown-dark-link-disabled-color: $gray-500 !default;\n$dropdown-dark-header-color: $gray-500 !default;\n// scss-docs-end dropdown-dark-variables\n\n\n// Pagination\n\n// scss-docs-start pagination-variables\n$pagination-padding-y: .375rem !default;\n$pagination-padding-x: .75rem !default;\n$pagination-padding-y-sm: .25rem !default;\n$pagination-padding-x-sm: .5rem !default;\n$pagination-padding-y-lg: .75rem !default;\n$pagination-padding-x-lg: 1.5rem !default;\n\n$pagination-color: $link-color !default;\n$pagination-bg: $white !default;\n$pagination-border-width: $border-width !default;\n$pagination-border-radius: $border-radius !default;\n$pagination-margin-start: -$pagination-border-width !default;\n$pagination-border-color: $gray-300 !default;\n\n$pagination-focus-color: $link-hover-color !default;\n$pagination-focus-bg: $gray-200 !default;\n$pagination-focus-box-shadow: $input-btn-focus-box-shadow !default;\n$pagination-focus-outline: 0 !default;\n\n$pagination-hover-color: $link-hover-color !default;\n$pagination-hover-bg: $gray-200 !default;\n$pagination-hover-border-color: $gray-300 !default;\n\n$pagination-active-color: $component-active-color !default;\n$pagination-active-bg: $component-active-bg !default;\n$pagination-active-border-color: $pagination-active-bg !default;\n\n$pagination-disabled-color: $gray-600 !default;\n$pagination-disabled-bg: $white !default;\n$pagination-disabled-border-color: $gray-300 !default;\n\n$pagination-transition: color .15s ease-in-out, background-color .15s ease-in-out, border-color .15s ease-in-out, box-shadow .15s ease-in-out !default;\n\n$pagination-border-radius-sm: $border-radius-sm !default;\n$pagination-border-radius-lg: $border-radius-lg !default;\n// scss-docs-end pagination-variables\n\n\n// Cards\n\n// scss-docs-start card-variables\n$card-spacer-y: $spacer !default;\n$card-spacer-x: $spacer !default;\n$card-title-spacer-y: $spacer * .5 !default;\n$card-border-width: $border-width !default;\n$card-border-radius: $border-radius !default;\n$card-border-color: rgba($black, .125) !default;\n$card-inner-border-radius: subtract($card-border-radius, $card-border-width) !default;\n$card-cap-padding-y: $card-spacer-y * .5 !default;\n$card-cap-padding-x: $card-spacer-x !default;\n$card-cap-bg: rgba($black, .03) !default;\n$card-cap-color: null !default;\n$card-height: null !default;\n$card-color: null !default;\n$card-bg: $white !default;\n$card-img-overlay-padding: $spacer !default;\n$card-group-margin: $grid-gutter-width * .5 !default;\n// scss-docs-end card-variables\n\n// Accordion\n\n// scss-docs-start accordion-variables\n$accordion-padding-y: 1rem !default;\n$accordion-padding-x: 1.25rem !default;\n$accordion-color: $body-color !default;\n$accordion-bg: $body-bg !default;\n$accordion-border-width: $border-width !default;\n$accordion-border-color: rgba($black, .125) !default;\n$accordion-border-radius: $border-radius !default;\n$accordion-inner-border-radius: subtract($accordion-border-radius, $accordion-border-width) !default;\n\n$accordion-body-padding-y: $accordion-padding-y !default;\n$accordion-body-padding-x: $accordion-padding-x !default;\n\n$accordion-button-padding-y: $accordion-padding-y !default;\n$accordion-button-padding-x: $accordion-padding-x !default;\n$accordion-button-color: $accordion-color !default;\n$accordion-button-bg: $accordion-bg !default;\n$accordion-transition: $btn-transition, border-radius .15s ease !default;\n$accordion-button-active-bg: tint-color($component-active-bg, 90%) !default;\n$accordion-button-active-color: shade-color($primary, 10%) !default;\n\n$accordion-button-focus-border-color: $input-focus-border-color !default;\n$accordion-button-focus-box-shadow: $btn-focus-box-shadow !default;\n\n$accordion-icon-width: 1.25rem !default;\n$accordion-icon-color: $accordion-color !default;\n$accordion-icon-active-color: $accordion-button-active-color !default;\n$accordion-icon-transition: transform .2s ease-in-out !default;\n$accordion-icon-transform: rotate(-180deg) !default;\n\n$accordion-button-icon: url(\"data:image/svg+xml,\") !default;\n$accordion-button-active-icon: url(\"data:image/svg+xml,\") !default;\n// scss-docs-end accordion-variables\n\n// Tooltips\n\n// scss-docs-start tooltip-variables\n$tooltip-font-size: $font-size-sm !default;\n$tooltip-max-width: 200px !default;\n$tooltip-color: $white !default;\n$tooltip-bg: $black !default;\n$tooltip-border-radius: $border-radius !default;\n$tooltip-opacity: .9 !default;\n$tooltip-padding-y: $spacer * .25 !default;\n$tooltip-padding-x: $spacer * .5 !default;\n$tooltip-margin: 0 !default;\n\n$tooltip-arrow-width: .8rem !default;\n$tooltip-arrow-height: .4rem !default;\n$tooltip-arrow-color: $tooltip-bg !default;\n// scss-docs-end tooltip-variables\n\n// Form tooltips must come after regular tooltips\n// scss-docs-start tooltip-feedback-variables\n$form-feedback-tooltip-padding-y: $tooltip-padding-y !default;\n$form-feedback-tooltip-padding-x: $tooltip-padding-x !default;\n$form-feedback-tooltip-font-size: $tooltip-font-size !default;\n$form-feedback-tooltip-line-height: null !default;\n$form-feedback-tooltip-opacity: $tooltip-opacity !default;\n$form-feedback-tooltip-border-radius: $tooltip-border-radius !default;\n// scss-docs-end tooltip-feedback-variables\n\n\n// Popovers\n\n// scss-docs-start popover-variables\n$popover-font-size: $font-size-sm !default;\n$popover-bg: $white !default;\n$popover-max-width: 276px !default;\n$popover-border-width: $border-width !default;\n$popover-border-color: rgba($black, .2) !default;\n$popover-border-radius: $border-radius-lg !default;\n$popover-inner-border-radius: subtract($popover-border-radius, $popover-border-width) !default;\n$popover-box-shadow: $box-shadow !default;\n\n$popover-header-bg: shade-color($popover-bg, 6%) !default;\n$popover-header-color: $headings-color !default;\n$popover-header-padding-y: .5rem !default;\n$popover-header-padding-x: $spacer !default;\n\n$popover-body-color: $body-color !default;\n$popover-body-padding-y: $spacer !default;\n$popover-body-padding-x: $spacer !default;\n\n$popover-arrow-width: 1rem !default;\n$popover-arrow-height: .5rem !default;\n$popover-arrow-color: $popover-bg !default;\n\n$popover-arrow-outer-color: fade-in($popover-border-color, .05) !default;\n// scss-docs-end popover-variables\n\n\n// Toasts\n\n// scss-docs-start toast-variables\n$toast-max-width: 350px !default;\n$toast-padding-x: .75rem !default;\n$toast-padding-y: .5rem !default;\n$toast-font-size: .875rem !default;\n$toast-color: null !default;\n$toast-background-color: rgba($white, .85) !default;\n$toast-border-width: 1px !default;\n$toast-border-color: rgba(0, 0, 0, .1) !default;\n$toast-border-radius: $border-radius !default;\n$toast-box-shadow: $box-shadow !default;\n$toast-spacing: $container-padding-x !default;\n\n$toast-header-color: $gray-600 !default;\n$toast-header-background-color: rgba($white, .85) !default;\n$toast-header-border-color: rgba(0, 0, 0, .05) !default;\n// scss-docs-end toast-variables\n\n\n// Badges\n\n// scss-docs-start badge-variables\n$badge-font-size: .75em !default;\n$badge-font-weight: $font-weight-bold !default;\n$badge-color: $white !default;\n$badge-padding-y: .35em !default;\n$badge-padding-x: .65em !default;\n$badge-border-radius: $border-radius !default;\n// scss-docs-end badge-variables\n\n\n// Modals\n\n// scss-docs-start modal-variables\n$modal-inner-padding: $spacer !default;\n\n$modal-footer-margin-between: .5rem !default;\n\n$modal-dialog-margin: .5rem !default;\n$modal-dialog-margin-y-sm-up: 1.75rem !default;\n\n$modal-title-line-height: $line-height-base !default;\n\n$modal-content-color: null !default;\n$modal-content-bg: $white !default;\n$modal-content-border-color: rgba($black, .2) !default;\n$modal-content-border-width: $border-width !default;\n$modal-content-border-radius: $border-radius-lg !default;\n$modal-content-inner-border-radius: subtract($modal-content-border-radius, $modal-content-border-width) !default;\n$modal-content-box-shadow-xs: $box-shadow-sm !default;\n$modal-content-box-shadow-sm-up: $box-shadow !default;\n\n$modal-backdrop-bg: $black !default;\n$modal-backdrop-opacity: .5 !default;\n$modal-header-border-color: $border-color !default;\n$modal-footer-border-color: $modal-header-border-color !default;\n$modal-header-border-width: $modal-content-border-width !default;\n$modal-footer-border-width: $modal-header-border-width !default;\n$modal-header-padding-y: $modal-inner-padding !default;\n$modal-header-padding-x: $modal-inner-padding !default;\n$modal-header-padding: $modal-header-padding-y $modal-header-padding-x !default; // Keep this for backwards compatibility\n\n$modal-sm: 300px !default;\n$modal-md: 500px !default;\n$modal-lg: 800px !default;\n$modal-xl: 1140px !default;\n\n$modal-fade-transform: translate(0, -50px) !default;\n$modal-show-transform: none !default;\n$modal-transition: transform .3s ease-out !default;\n$modal-scale-transform: scale(1.02) !default;\n// scss-docs-end modal-variables\n\n\n// Alerts\n//\n// Define alert colors, border radius, and padding.\n\n// scss-docs-start alert-variables\n$alert-padding-y: $spacer !default;\n$alert-padding-x: $spacer !default;\n$alert-margin-bottom: 1rem !default;\n$alert-border-radius: $border-radius !default;\n$alert-link-font-weight: $font-weight-bold !default;\n$alert-border-width: $border-width !default;\n$alert-bg-scale: -80% !default;\n$alert-border-scale: -70% !default;\n$alert-color-scale: 40% !default;\n$alert-dismissible-padding-r: $alert-padding-x * 3 !default; // 3x covers width of x plus default padding on either side\n// scss-docs-end alert-variables\n\n\n// Progress bars\n\n// scss-docs-start progress-variables\n$progress-height: 1rem !default;\n$progress-font-size: $font-size-base * .75 !default;\n$progress-bg: $gray-200 !default;\n$progress-border-radius: $border-radius !default;\n$progress-box-shadow: $box-shadow-inset !default;\n$progress-bar-color: $white !default;\n$progress-bar-bg: $primary !default;\n$progress-bar-animation-timing: 1s linear infinite !default;\n$progress-bar-transition: width .6s ease !default;\n// scss-docs-end progress-variables\n\n\n// List group\n\n// scss-docs-start list-group-variables\n$list-group-color: $gray-900 !default;\n$list-group-bg: $white !default;\n$list-group-border-color: rgba($black, .125) !default;\n$list-group-border-width: $border-width !default;\n$list-group-border-radius: $border-radius !default;\n\n$list-group-item-padding-y: $spacer * .5 !default;\n$list-group-item-padding-x: $spacer !default;\n$list-group-item-bg-scale: -80% !default;\n$list-group-item-color-scale: 40% !default;\n\n$list-group-hover-bg: $gray-100 !default;\n$list-group-active-color: $component-active-color !default;\n$list-group-active-bg: $component-active-bg !default;\n$list-group-active-border-color: $list-group-active-bg !default;\n\n$list-group-disabled-color: $gray-600 !default;\n$list-group-disabled-bg: $list-group-bg !default;\n\n$list-group-action-color: $gray-700 !default;\n$list-group-action-hover-color: $list-group-action-color !default;\n\n$list-group-action-active-color: $body-color !default;\n$list-group-action-active-bg: $gray-200 !default;\n// scss-docs-end list-group-variables\n\n\n// Image thumbnails\n\n// scss-docs-start thumbnail-variables\n$thumbnail-padding: .25rem !default;\n$thumbnail-bg: $body-bg !default;\n$thumbnail-border-width: $border-width !default;\n$thumbnail-border-color: $gray-300 !default;\n$thumbnail-border-radius: $border-radius !default;\n$thumbnail-box-shadow: $box-shadow-sm !default;\n// scss-docs-end thumbnail-variables\n\n\n// Figures\n\n// scss-docs-start figure-variables\n$figure-caption-font-size: $small-font-size !default;\n$figure-caption-color: $gray-600 !default;\n// scss-docs-end figure-variables\n\n\n// Breadcrumbs\n\n// scss-docs-start breadcrumb-variables\n$breadcrumb-font-size: null !default;\n$breadcrumb-padding-y: 0 !default;\n$breadcrumb-padding-x: 0 !default;\n$breadcrumb-item-padding-x: .5rem !default;\n$breadcrumb-margin-bottom: 1rem !default;\n$breadcrumb-bg: null !default;\n$breadcrumb-divider-color: $gray-600 !default;\n$breadcrumb-active-color: $gray-600 !default;\n$breadcrumb-divider: quote(\"/\") !default;\n$breadcrumb-divider-flipped: $breadcrumb-divider !default;\n$breadcrumb-border-radius: null !default;\n// scss-docs-end breadcrumb-variables\n\n// Carousel\n\n// scss-docs-start carousel-variables\n$carousel-control-color: $white !default;\n$carousel-control-width: 15% !default;\n$carousel-control-opacity: .5 !default;\n$carousel-control-hover-opacity: .9 !default;\n$carousel-control-transition: opacity .15s ease !default;\n\n$carousel-indicator-width: 30px !default;\n$carousel-indicator-height: 3px !default;\n$carousel-indicator-hit-area-height: 10px !default;\n$carousel-indicator-spacer: 3px !default;\n$carousel-indicator-opacity: .5 !default;\n$carousel-indicator-active-bg: $white !default;\n$carousel-indicator-active-opacity: 1 !default;\n$carousel-indicator-transition: opacity .6s ease !default;\n\n$carousel-caption-width: 70% !default;\n$carousel-caption-color: $white !default;\n$carousel-caption-padding-y: 1.25rem !default;\n$carousel-caption-spacer: 1.25rem !default;\n\n$carousel-control-icon-width: 2rem !default;\n\n$carousel-control-prev-icon-bg: url(\"data:image/svg+xml,\") !default;\n$carousel-control-next-icon-bg: url(\"data:image/svg+xml,\") !default;\n\n$carousel-transition-duration: .6s !default;\n$carousel-transition: transform $carousel-transition-duration ease-in-out !default; // Define transform transition first if using multiple transitions (e.g., `transform 2s ease, opacity .5s ease-out`)\n\n$carousel-dark-indicator-active-bg: $black !default;\n$carousel-dark-caption-color: $black !default;\n$carousel-dark-control-icon-filter: invert(1) grayscale(100) !default;\n// scss-docs-end carousel-variables\n\n\n// Spinners\n\n// scss-docs-start spinner-variables\n$spinner-width: 2rem !default;\n$spinner-height: $spinner-width !default;\n$spinner-vertical-align: -.125em !default;\n$spinner-border-width: .25em !default;\n$spinner-animation-speed: .75s !default;\n\n$spinner-width-sm: 1rem !default;\n$spinner-height-sm: $spinner-width-sm !default;\n$spinner-border-width-sm: .2em !default;\n// scss-docs-end spinner-variables\n\n\n// Close\n\n// scss-docs-start close-variables\n$btn-close-width: 1em !default;\n$btn-close-height: $btn-close-width !default;\n$btn-close-padding-x: .25em !default;\n$btn-close-padding-y: $btn-close-padding-x !default;\n$btn-close-color: $black !default;\n$btn-close-bg: url(\"data:image/svg+xml,\") !default;\n$btn-close-focus-shadow: $input-btn-focus-box-shadow !default;\n$btn-close-opacity: .5 !default;\n$btn-close-hover-opacity: .75 !default;\n$btn-close-focus-opacity: 1 !default;\n$btn-close-disabled-opacity: .25 !default;\n$btn-close-white-filter: invert(1) grayscale(100%) brightness(200%) !default;\n// scss-docs-end close-variables\n\n\n// Offcanvas\n\n// scss-docs-start offcanvas-variables\n$offcanvas-padding-y: $modal-inner-padding !default;\n$offcanvas-padding-x: $modal-inner-padding !default;\n$offcanvas-horizontal-width: 400px !default;\n$offcanvas-vertical-height: 30vh !default;\n$offcanvas-transition-duration: .3s !default;\n$offcanvas-border-color: $modal-content-border-color !default;\n$offcanvas-border-width: $modal-content-border-width !default;\n$offcanvas-title-line-height: $modal-title-line-height !default;\n$offcanvas-bg-color: $modal-content-bg !default;\n$offcanvas-color: $modal-content-color !default;\n$offcanvas-box-shadow: $modal-content-box-shadow-xs !default;\n// scss-docs-end offcanvas-variables\n\n// Code\n\n$code-font-size: $small-font-size !default;\n$code-color: $pink !default;\n\n$kbd-padding-y: .2rem !default;\n$kbd-padding-x: .4rem !default;\n$kbd-font-size: $code-font-size !default;\n$kbd-color: $white !default;\n$kbd-bg: $gray-900 !default;\n\n$pre-color: null !default;\n", + "// Toggles\n//\n// Used in conjunction with global variables to enable certain theme features.\n\n// Vendor\n@import \"vendor/rfs\";\n\n// Deprecate\n@import \"mixins/deprecate\";\n\n// Helpers\n@import \"mixins/breakpoints\";\n@import \"mixins/color-scheme\";\n@import \"mixins/image\";\n@import \"mixins/resize\";\n@import \"mixins/visually-hidden\";\n@import \"mixins/reset-text\";\n@import \"mixins/text-truncate\";\n\n// Utilities\n@import \"mixins/utilities\";\n\n// Components\n@import \"mixins/alert\";\n@import \"mixins/buttons\";\n@import \"mixins/caret\";\n@import \"mixins/pagination\";\n@import \"mixins/lists\";\n@import \"mixins/list-group\";\n@import \"mixins/forms\";\n@import \"mixins/table-variants\";\n\n// Skins\n@import \"mixins/border-radius\";\n@import \"mixins/box-shadow\";\n@import \"mixins/gradients\";\n@import \"mixins/transition\";\n\n// Layout\n@import \"mixins/clearfix\";\n@import \"mixins/container\";\n@import \"mixins/grid\";\n", + "// stylelint-disable property-blacklist, scss/dollar-variable-default\n\n// SCSS RFS mixin\n//\n// Automated responsive values for font sizes, paddings, margins and much more\n//\n// Licensed under MIT (https://github.com/twbs/rfs/blob/main/LICENSE)\n\n// Configuration\n\n// Base value\n$rfs-base-value: 1.25rem !default;\n$rfs-unit: rem !default;\n\n@if $rfs-unit != rem and $rfs-unit != px {\n @error \"`#{$rfs-unit}` is not a valid unit for $rfs-unit. Use `px` or `rem`.\";\n}\n\n// Breakpoint at where values start decreasing if screen width is smaller\n$rfs-breakpoint: 1200px !default;\n$rfs-breakpoint-unit: px !default;\n\n@if $rfs-breakpoint-unit != px and $rfs-breakpoint-unit != em and $rfs-breakpoint-unit != rem {\n @error \"`#{$rfs-breakpoint-unit}` is not a valid unit for $rfs-breakpoint-unit. Use `px`, `em` or `rem`.\";\n}\n\n// Resize values based on screen height and width\n$rfs-two-dimensional: false !default;\n\n// Factor of decrease\n$rfs-factor: 10 !default;\n\n@if type-of($rfs-factor) != number or $rfs-factor <= 1 {\n @error \"`#{$rfs-factor}` is not a valid $rfs-factor, it must be greater than 1.\";\n}\n\n// Mode. Possibilities: \"min-media-query\", \"max-media-query\"\n$rfs-mode: min-media-query !default;\n\n// Generate enable or disable classes. Possibilities: false, \"enable\" or \"disable\"\n$rfs-class: false !default;\n\n// 1 rem = $rfs-rem-value px\n$rfs-rem-value: 16 !default;\n\n// Safari iframe resize bug: https://github.com/twbs/rfs/issues/14\n$rfs-safari-iframe-resize-bug-fix: false !default;\n\n// Disable RFS by setting $enable-rfs to false\n$enable-rfs: true !default;\n\n// Cache $rfs-base-value unit\n$rfs-base-value-unit: unit($rfs-base-value);\n\n@function divide($dividend, $divisor, $precision: 10) {\n $sign: if($dividend > 0 and $divisor > 0 or $dividend < 0 and $divisor < 0, 1, -1);\n $dividend: abs($dividend);\n $divisor: abs($divisor);\n @if $dividend == 0 {\n @return 0;\n }\n @if $divisor == 0 {\n @error \"Cannot divide by 0\";\n }\n $remainder: $dividend;\n $result: 0;\n $factor: 10;\n @while ($remainder > 0 and $precision >= 0) {\n $quotient: 0;\n @while ($remainder >= $divisor) {\n $remainder: $remainder - $divisor;\n $quotient: $quotient + 1;\n }\n $result: $result * 10 + $quotient;\n $factor: $factor * .1;\n $remainder: $remainder * 10;\n $precision: $precision - 1;\n @if ($precision < 0 and $remainder >= $divisor * 5) {\n $result: $result + 1;\n }\n }\n $result: $result * $factor * $sign;\n $dividend-unit: unit($dividend);\n $divisor-unit: unit($divisor);\n $unit-map: (\n \"px\": 1px,\n \"rem\": 1rem,\n \"em\": 1em,\n \"%\": 1%\n );\n @if ($dividend-unit != $divisor-unit and map-has-key($unit-map, $dividend-unit)) {\n $result: $result * map-get($unit-map, $dividend-unit);\n }\n @return $result;\n}\n\n// Remove px-unit from $rfs-base-value for calculations\n@if $rfs-base-value-unit == px {\n $rfs-base-value: divide($rfs-base-value, $rfs-base-value * 0 + 1);\n}\n@else if $rfs-base-value-unit == rem {\n $rfs-base-value: divide($rfs-base-value, divide($rfs-base-value * 0 + 1, $rfs-rem-value));\n}\n\n// Cache $rfs-breakpoint unit to prevent multiple calls\n$rfs-breakpoint-unit-cache: unit($rfs-breakpoint);\n\n// Remove unit from $rfs-breakpoint for calculations\n@if $rfs-breakpoint-unit-cache == px {\n $rfs-breakpoint: divide($rfs-breakpoint, $rfs-breakpoint * 0 + 1);\n}\n@else if $rfs-breakpoint-unit-cache == rem or $rfs-breakpoint-unit-cache == \"em\" {\n $rfs-breakpoint: divide($rfs-breakpoint, divide($rfs-breakpoint * 0 + 1, $rfs-rem-value));\n}\n\n// Calculate the media query value\n$rfs-mq-value: if($rfs-breakpoint-unit == px, #{$rfs-breakpoint}px, #{divide($rfs-breakpoint, $rfs-rem-value)}#{$rfs-breakpoint-unit});\n$rfs-mq-property-width: if($rfs-mode == max-media-query, max-width, min-width);\n$rfs-mq-property-height: if($rfs-mode == max-media-query, max-height, min-height);\n\n// Internal mixin used to determine which media query needs to be used\n@mixin _rfs-media-query {\n @if $rfs-two-dimensional {\n @if $rfs-mode == max-media-query {\n @media (#{$rfs-mq-property-width}: #{$rfs-mq-value}), (#{$rfs-mq-property-height}: #{$rfs-mq-value}) {\n @content;\n }\n }\n @else {\n @media (#{$rfs-mq-property-width}: #{$rfs-mq-value}) and (#{$rfs-mq-property-height}: #{$rfs-mq-value}) {\n @content;\n }\n }\n }\n @else {\n @media (#{$rfs-mq-property-width}: #{$rfs-mq-value}) {\n @content;\n }\n }\n}\n\n// Internal mixin that adds disable classes to the selector if needed.\n@mixin _rfs-rule {\n @if $rfs-class == disable and $rfs-mode == max-media-query {\n // Adding an extra class increases specificity, which prevents the media query to override the property\n &,\n .disable-rfs &,\n &.disable-rfs {\n @content;\n }\n }\n @else if $rfs-class == enable and $rfs-mode == min-media-query {\n .enable-rfs &,\n &.enable-rfs {\n @content;\n }\n }\n @else {\n @content;\n }\n}\n\n// Internal mixin that adds enable classes to the selector if needed.\n@mixin _rfs-media-query-rule {\n\n @if $rfs-class == enable {\n @if $rfs-mode == min-media-query {\n @content;\n }\n\n @include _rfs-media-query {\n .enable-rfs &,\n &.enable-rfs {\n @content;\n }\n }\n }\n @else {\n @if $rfs-class == disable and $rfs-mode == min-media-query {\n .disable-rfs &,\n &.disable-rfs {\n @content;\n }\n }\n @include _rfs-media-query {\n @content;\n }\n }\n}\n\n// Helper function to get the formatted non-responsive value\n@function rfs-value($values) {\n // Convert to list\n $values: if(type-of($values) != list, ($values,), $values);\n\n $val: '';\n\n // Loop over each value and calculate value\n @each $value in $values {\n @if $value == 0 {\n $val: $val + ' 0';\n }\n @else {\n // Cache $value unit\n $unit: if(type-of($value) == \"number\", unit($value), false);\n\n @if $unit == px {\n // Convert to rem if needed\n $val: $val + ' ' + if($rfs-unit == rem, #{divide($value, $value * 0 + $rfs-rem-value)}rem, $value);\n }\n @else if $unit == rem {\n // Convert to px if needed\n $val: $val + ' ' + if($rfs-unit == px, #{divide($value, $value * 0 + 1) * $rfs-rem-value}px, $value);\n }\n @else {\n // If $value isn't a number (like inherit) or $value has a unit (not px or rem, like 1.5em) or $ is 0, just print the value\n $val: $val + ' ' + $value;\n }\n }\n }\n\n // Remove first space\n @return unquote(str-slice($val, 2));\n}\n\n// Helper function to get the responsive value calculated by RFS\n@function rfs-fluid-value($values) {\n // Convert to list\n $values: if(type-of($values) != list, ($values,), $values);\n\n $val: '';\n\n // Loop over each value and calculate value\n @each $value in $values {\n @if $value == 0 {\n $val: $val + ' 0';\n }\n\n @else {\n // Cache $value unit\n $unit: if(type-of($value) == \"number\", unit($value), false);\n\n // If $value isn't a number (like inherit) or $value has a unit (not px or rem, like 1.5em) or $ is 0, just print the value\n @if not $unit or $unit != px and $unit != rem {\n $val: $val + ' ' + $value;\n }\n\n @else {\n // Remove unit from $value for calculations\n $value: divide($value, $value * 0 + if($unit == px, 1, divide(1, $rfs-rem-value)));\n\n // Only add the media query if the value is greater than the minimum value\n @if abs($value) <= $rfs-base-value or not $enable-rfs {\n $val: $val + ' ' + if($rfs-unit == rem, #{divide($value, $rfs-rem-value)}rem, #{$value}px);\n }\n @else {\n // Calculate the minimum value\n $value-min: $rfs-base-value + divide(abs($value) - $rfs-base-value, $rfs-factor);\n\n // Calculate difference between $value and the minimum value\n $value-diff: abs($value) - $value-min;\n\n // Base value formatting\n $min-width: if($rfs-unit == rem, #{divide($value-min, $rfs-rem-value)}rem, #{$value-min}px);\n\n // Use negative value if needed\n $min-width: if($value < 0, -$min-width, $min-width);\n\n // Use `vmin` if two-dimensional is enabled\n $variable-unit: if($rfs-two-dimensional, vmin, vw);\n\n // Calculate the variable width between 0 and $rfs-breakpoint\n $variable-width: #{divide($value-diff * 100, $rfs-breakpoint)}#{$variable-unit};\n\n // Return the calculated value\n $val: $val + ' calc(' + $min-width + if($value < 0, ' - ', ' + ') + $variable-width + ')';\n }\n }\n }\n }\n\n // Remove first space\n @return unquote(str-slice($val, 2));\n}\n\n// RFS mixin\n@mixin rfs($values, $property: font-size) {\n @if $values != null {\n $val: rfs-value($values);\n $fluidVal: rfs-fluid-value($values);\n\n // Do not print the media query if responsive & non-responsive values are the same\n @if $val == $fluidVal {\n #{$property}: $val;\n }\n @else {\n @include _rfs-rule {\n #{$property}: if($rfs-mode == max-media-query, $val, $fluidVal);\n\n // Include safari iframe resize fix if needed\n min-width: if($rfs-safari-iframe-resize-bug-fix, (0 * 1vw), null);\n }\n\n @include _rfs-media-query-rule {\n #{$property}: if($rfs-mode == max-media-query, $fluidVal, $val);\n }\n }\n }\n}\n\n// Shorthand helper mixins\n@mixin font-size($value) {\n @include rfs($value);\n}\n\n@mixin padding($value) {\n @include rfs($value, padding);\n}\n\n@mixin padding-top($value) {\n @include rfs($value, padding-top);\n}\n\n@mixin padding-right($value) {\n @include rfs($value, padding-right);\n}\n\n@mixin padding-bottom($value) {\n @include rfs($value, padding-bottom);\n}\n\n@mixin padding-left($value) {\n @include rfs($value, padding-left);\n}\n\n@mixin margin($value) {\n @include rfs($value, margin);\n}\n\n@mixin margin-top($value) {\n @include rfs($value, margin-top);\n}\n\n@mixin margin-right($value) {\n @include rfs($value, margin-right);\n}\n\n@mixin margin-bottom($value) {\n @include rfs($value, margin-bottom);\n}\n\n@mixin margin-left($value) {\n @include rfs($value, margin-left);\n}\n", + "// Deprecate mixin\n//\n// This mixin can be used to deprecate mixins or functions.\n// `$enable-deprecation-messages` is a global variable, `$ignore-warning` is a variable that can be passed to\n// some deprecated mixins to suppress the warning (for example if the mixin is still be used in the current version of Bootstrap)\n@mixin deprecate($name, $deprecate-version, $remove-version, $ignore-warning: false) {\n @if ($enable-deprecation-messages != false and $ignore-warning != true) {\n @warn \"#{$name} has been deprecated as of #{$deprecate-version}. It will be removed entirely in #{$remove-version}.\";\n }\n}\n", + "// Breakpoint viewport sizes and media queries.\n//\n// Breakpoints are defined as a map of (name: minimum width), order from small to large:\n//\n// (xs: 0, sm: 576px, md: 768px, lg: 992px, xl: 1200px)\n//\n// The map defined in the `$grid-breakpoints` global variable is used as the `$breakpoints` argument by default.\n\n// Name of the next breakpoint, or null for the last breakpoint.\n//\n// >> breakpoint-next(sm)\n// md\n// >> breakpoint-next(sm, (xs: 0, sm: 576px, md: 768px, lg: 992px, xl: 1200px))\n// md\n// >> breakpoint-next(sm, $breakpoint-names: (xs sm md lg xl))\n// md\n@function breakpoint-next($name, $breakpoints: $grid-breakpoints, $breakpoint-names: map-keys($breakpoints)) {\n $n: index($breakpoint-names, $name);\n @if not $n {\n @error \"breakpoint `#{$name}` not found in `#{$breakpoints}`\";\n }\n @return if($n < length($breakpoint-names), nth($breakpoint-names, $n + 1), null);\n}\n\n// Minimum breakpoint width. Null for the smallest (first) breakpoint.\n//\n// >> breakpoint-min(sm, (xs: 0, sm: 576px, md: 768px, lg: 992px, xl: 1200px))\n// 576px\n@function breakpoint-min($name, $breakpoints: $grid-breakpoints) {\n $min: map-get($breakpoints, $name);\n @return if($min != 0, $min, null);\n}\n\n// Maximum breakpoint width.\n// The maximum value is reduced by 0.02px to work around the limitations of\n// `min-` and `max-` prefixes and viewports with fractional widths.\n// See https://www.w3.org/TR/mediaqueries-4/#mq-min-max\n// Uses 0.02px rather than 0.01px to work around a current rounding bug in Safari.\n// See https://bugs.webkit.org/show_bug.cgi?id=178261\n//\n// >> breakpoint-max(md, (xs: 0, sm: 576px, md: 768px, lg: 992px, xl: 1200px))\n// 767.98px\n@function breakpoint-max($name, $breakpoints: $grid-breakpoints) {\n $max: map-get($breakpoints, $name);\n @return if($max and $max > 0, $max - .02, null);\n}\n\n// Returns a blank string if smallest breakpoint, otherwise returns the name with a dash in front.\n// Useful for making responsive utilities.\n//\n// >> breakpoint-infix(xs, (xs: 0, sm: 576px, md: 768px, lg: 992px, xl: 1200px))\n// \"\" (Returns a blank string)\n// >> breakpoint-infix(sm, (xs: 0, sm: 576px, md: 768px, lg: 992px, xl: 1200px))\n// \"-sm\"\n@function breakpoint-infix($name, $breakpoints: $grid-breakpoints) {\n @return if(breakpoint-min($name, $breakpoints) == null, \"\", \"-#{$name}\");\n}\n\n// Media of at least the minimum breakpoint width. No query for the smallest breakpoint.\n// Makes the @content apply to the given breakpoint and wider.\n@mixin media-breakpoint-up($name, $breakpoints: $grid-breakpoints) {\n $min: breakpoint-min($name, $breakpoints);\n @if $min {\n @media (min-width: $min) {\n @content;\n }\n } @else {\n @content;\n }\n}\n\n// Media of at most the maximum breakpoint width. No query for the largest breakpoint.\n// Makes the @content apply to the given breakpoint and narrower.\n@mixin media-breakpoint-down($name, $breakpoints: $grid-breakpoints) {\n $max: breakpoint-max($name, $breakpoints);\n @if $max {\n @media (max-width: $max) {\n @content;\n }\n } @else {\n @content;\n }\n}\n\n// Media that spans multiple breakpoint widths.\n// Makes the @content apply between the min and max breakpoints\n@mixin media-breakpoint-between($lower, $upper, $breakpoints: $grid-breakpoints) {\n $min: breakpoint-min($lower, $breakpoints);\n $max: breakpoint-max($upper, $breakpoints);\n\n @if $min != null and $max != null {\n @media (min-width: $min) and (max-width: $max) {\n @content;\n }\n } @else if $max == null {\n @include media-breakpoint-up($lower, $breakpoints) {\n @content;\n }\n } @else if $min == null {\n @include media-breakpoint-down($upper, $breakpoints) {\n @content;\n }\n }\n}\n\n// Media between the breakpoint's minimum and maximum widths.\n// No minimum for the smallest breakpoint, and no maximum for the largest one.\n// Makes the @content apply only to the given breakpoint, not viewports any wider or narrower.\n@mixin media-breakpoint-only($name, $breakpoints: $grid-breakpoints) {\n $min: breakpoint-min($name, $breakpoints);\n $next: breakpoint-next($name, $breakpoints);\n $max: breakpoint-max($next);\n\n @if $min != null and $max != null {\n @media (min-width: $min) and (max-width: $max) {\n @content;\n }\n } @else if $max == null {\n @include media-breakpoint-up($name, $breakpoints) {\n @content;\n }\n } @else if $min == null {\n @include media-breakpoint-down($next, $breakpoints) {\n @content;\n }\n }\n}\n", + "// scss-docs-start mixin-color-scheme\n@mixin color-scheme($name) {\n @media (prefers-color-scheme: #{$name}) {\n @content;\n }\n}\n// scss-docs-end mixin-color-scheme\n", + "// Image Mixins\n// - Responsive image\n// - Retina image\n\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n\n@mixin img-fluid {\n // Part 1: Set a maximum relative to the parent\n max-width: 100%;\n // Part 2: Override the height to auto, otherwise images will be stretched\n // when setting a width and height attribute on the img element.\n height: auto;\n}\n", + "// Resize anything\n\n@mixin resizable($direction) {\n overflow: auto; // Per CSS3 UI, `resize` only applies when `overflow` isn't `visible`\n resize: $direction; // Options: horizontal, vertical, both\n}\n", + "// stylelint-disable declaration-no-important\n\n// Hide content visually while keeping it accessible to assistive technologies\n//\n// See: https://www.a11yproject.com/posts/2013-01-11-how-to-hide-content/\n// See: https://hugogiraudel.com/2016/10/13/css-hide-and-seek/\n\n@mixin visually-hidden() {\n position: absolute !important;\n width: 1px !important;\n height: 1px !important;\n padding: 0 !important;\n margin: -1px !important; // Fix for https://github.com/twbs/bootstrap/issues/25686\n overflow: hidden !important;\n clip: rect(0, 0, 0, 0) !important;\n white-space: nowrap !important;\n border: 0 !important;\n}\n\n// Use to only display content when it's focused, or one of its child elements is focused\n// (i.e. when focus is within the element/container that the class was applied to)\n//\n// Useful for \"Skip to main content\" links; see https://www.w3.org/TR/2013/NOTE-WCAG20-TECHS-20130905/G1\n\n@mixin visually-hidden-focusable() {\n &:not(:focus):not(:focus-within) {\n @include visually-hidden();\n }\n}\n", + "@mixin reset-text {\n font-family: $font-family-base;\n // We deliberately do NOT reset font-size or overflow-wrap / word-wrap.\n font-style: normal;\n font-weight: $font-weight-normal;\n line-height: $line-height-base;\n text-align: left; // Fallback for where `start` is not supported\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n letter-spacing: normal;\n word-break: normal;\n word-spacing: normal;\n white-space: normal;\n line-break: auto;\n}\n", + "// Text truncate\n// Requires inline-block or block for proper styling\n\n@mixin text-truncate() {\n overflow: hidden;\n text-overflow: ellipsis;\n white-space: nowrap;\n}\n", + "// Utility generator\n// Used to generate utilities & print utilities\n@mixin generate-utility($utility, $infix, $is-rfs-media-query: false) {\n $values: map-get($utility, values);\n\n // If the values are a list or string, convert it into a map\n @if type-of($values) == \"string\" or type-of(nth($values, 1)) != \"list\" {\n $values: zip($values, $values);\n }\n\n @each $key, $value in $values {\n $properties: map-get($utility, property);\n\n // Multiple properties are possible, for example with vertical or horizontal margins or paddings\n @if type-of($properties) == \"string\" {\n $properties: append((), $properties);\n }\n\n // Use custom class if present\n $property-class: if(map-has-key($utility, class), map-get($utility, class), nth($properties, 1));\n $property-class: if($property-class == null, \"\", $property-class);\n\n // State params to generate pseudo-classes\n $state: if(map-has-key($utility, state), map-get($utility, state), ());\n\n $infix: if($property-class == \"\" and str-slice($infix, 1, 1) == \"-\", str-slice($infix, 2), $infix);\n\n // Don't prefix if value key is null (eg. with shadow class)\n $property-class-modifier: if($key, if($property-class == \"\" and $infix == \"\", \"\", \"-\") + $key, \"\");\n\n @if map-get($utility, rfs) {\n // Inside the media query\n @if $is-rfs-media-query {\n $val: rfs-value($value);\n\n // Do not render anything if fluid and non fluid values are the same\n $value: if($val == rfs-fluid-value($value), null, $val);\n }\n @else {\n $value: rfs-fluid-value($value);\n }\n }\n\n $is-rtl: map-get($utility, rtl);\n\n @if $value != null {\n @if $is-rtl == false {\n /* rtl:begin:remove */\n }\n .#{$property-class + $infix + $property-class-modifier} {\n @each $property in $properties {\n #{$property}: $value if($enable-important-utilities, !important, null);\n }\n }\n\n @each $pseudo in $state {\n .#{$property-class + $infix + $property-class-modifier}-#{$pseudo}:#{$pseudo} {\n @each $property in $properties {\n #{$property}: $value if($enable-important-utilities, !important, null);\n }\n }\n }\n @if $is-rtl == false {\n /* rtl:end:remove */\n }\n }\n }\n}\n", + "// scss-docs-start alert-variant-mixin\n@mixin alert-variant($background, $border, $color) {\n color: $color;\n @include gradient-bg($background);\n border-color: $border;\n\n .alert-link {\n color: shade-color($color, 20%);\n }\n}\n// scss-docs-end alert-variant-mixin\n", + "// Button variants\n//\n// Easily pump out default styles, as well as :hover, :focus, :active,\n// and disabled options for all buttons\n\n// scss-docs-start btn-variant-mixin\n@mixin button-variant(\n $background,\n $border,\n $color: color-contrast($background),\n $hover-background: if($color == $color-contrast-light, shade-color($background, $btn-hover-bg-shade-amount), tint-color($background, $btn-hover-bg-tint-amount)),\n $hover-border: if($color == $color-contrast-light, shade-color($border, $btn-hover-border-shade-amount), tint-color($border, $btn-hover-border-tint-amount)),\n $hover-color: color-contrast($hover-background),\n $active-background: if($color == $color-contrast-light, shade-color($background, $btn-active-bg-shade-amount), tint-color($background, $btn-active-bg-tint-amount)),\n $active-border: if($color == $color-contrast-light, shade-color($border, $btn-active-border-shade-amount), tint-color($border, $btn-active-border-tint-amount)),\n $active-color: color-contrast($active-background),\n $disabled-background: $background,\n $disabled-border: $border,\n $disabled-color: color-contrast($disabled-background)\n) {\n color: $color;\n @include gradient-bg($background);\n border-color: $border;\n @include box-shadow($btn-box-shadow);\n\n &:hover {\n color: $hover-color;\n @include gradient-bg($hover-background);\n border-color: $hover-border;\n }\n\n .btn-check:focus + &,\n &:focus {\n color: $hover-color;\n @include gradient-bg($hover-background);\n border-color: $hover-border;\n @if $enable-shadows {\n @include box-shadow($btn-box-shadow, 0 0 0 $btn-focus-width rgba(mix($color, $border, 15%), .5));\n } @else {\n // Avoid using mixin so we can pass custom focus shadow properly\n box-shadow: 0 0 0 $btn-focus-width rgba(mix($color, $border, 15%), .5);\n }\n }\n\n .btn-check:checked + &,\n .btn-check:active + &,\n &:active,\n &.active,\n .show > &.dropdown-toggle {\n color: $active-color;\n background-color: $active-background;\n // Remove CSS gradients if they're enabled\n background-image: if($enable-gradients, none, null);\n border-color: $active-border;\n\n &:focus {\n @if $enable-shadows {\n @include box-shadow($btn-active-box-shadow, 0 0 0 $btn-focus-width rgba(mix($color, $border, 15%), .5));\n } @else {\n // Avoid using mixin so we can pass custom focus shadow properly\n box-shadow: 0 0 0 $btn-focus-width rgba(mix($color, $border, 15%), .5);\n }\n }\n }\n\n &:disabled,\n &.disabled {\n color: $disabled-color;\n background-color: $disabled-background;\n // Remove CSS gradients if they're enabled\n background-image: if($enable-gradients, none, null);\n border-color: $disabled-border;\n }\n}\n// scss-docs-end btn-variant-mixin\n\n// scss-docs-start btn-outline-variant-mixin\n@mixin button-outline-variant(\n $color,\n $color-hover: color-contrast($color),\n $active-background: $color,\n $active-border: $color,\n $active-color: color-contrast($active-background)\n) {\n color: $color;\n border-color: $color;\n\n &:hover {\n color: $color-hover;\n background-color: $active-background;\n border-color: $active-border;\n }\n\n .btn-check:focus + &,\n &:focus {\n box-shadow: 0 0 0 $btn-focus-width rgba($color, .5);\n }\n\n .btn-check:checked + &,\n .btn-check:active + &,\n &:active,\n &.active,\n &.dropdown-toggle.show {\n color: $active-color;\n background-color: $active-background;\n border-color: $active-border;\n\n &:focus {\n @if $enable-shadows {\n @include box-shadow($btn-active-box-shadow, 0 0 0 $btn-focus-width rgba($color, .5));\n } @else {\n // Avoid using mixin so we can pass custom focus shadow properly\n box-shadow: 0 0 0 $btn-focus-width rgba($color, .5);\n }\n }\n }\n\n &:disabled,\n &.disabled {\n color: $color;\n background-color: transparent;\n }\n}\n// scss-docs-end btn-outline-variant-mixin\n\n// scss-docs-start btn-size-mixin\n@mixin button-size($padding-y, $padding-x, $font-size, $border-radius) {\n padding: $padding-y $padding-x;\n @include font-size($font-size);\n // Manually declare to provide an override to the browser default\n @include border-radius($border-radius, 0);\n}\n// scss-docs-end btn-size-mixin\n", + "// scss-docs-start caret-mixins\n@mixin caret-down {\n border-top: $caret-width solid;\n border-right: $caret-width solid transparent;\n border-bottom: 0;\n border-left: $caret-width solid transparent;\n}\n\n@mixin caret-up {\n border-top: 0;\n border-right: $caret-width solid transparent;\n border-bottom: $caret-width solid;\n border-left: $caret-width solid transparent;\n}\n\n@mixin caret-end {\n border-top: $caret-width solid transparent;\n border-right: 0;\n border-bottom: $caret-width solid transparent;\n border-left: $caret-width solid;\n}\n\n@mixin caret-start {\n border-top: $caret-width solid transparent;\n border-right: $caret-width solid;\n border-bottom: $caret-width solid transparent;\n}\n\n@mixin caret($direction: down) {\n @if $enable-caret {\n &::after {\n display: inline-block;\n margin-left: $caret-spacing;\n vertical-align: $caret-vertical-align;\n content: \"\";\n @if $direction == down {\n @include caret-down();\n } @else if $direction == up {\n @include caret-up();\n } @else if $direction == end {\n @include caret-end();\n }\n }\n\n @if $direction == start {\n &::after {\n display: none;\n }\n\n &::before {\n display: inline-block;\n margin-right: $caret-spacing;\n vertical-align: $caret-vertical-align;\n content: \"\";\n @include caret-start();\n }\n }\n\n &:empty::after {\n margin-left: 0;\n }\n }\n}\n// scss-docs-end caret-mixins\n", + "// Pagination\n\n// scss-docs-start pagination-mixin\n@mixin pagination-size($padding-y, $padding-x, $font-size, $border-radius) {\n .page-link {\n padding: $padding-y $padding-x;\n @include font-size($font-size);\n }\n\n .page-item {\n @if $pagination-margin-start == (-$pagination-border-width) {\n &:first-child {\n .page-link {\n @include border-start-radius($border-radius);\n }\n }\n\n &:last-child {\n .page-link {\n @include border-end-radius($border-radius);\n }\n }\n } @else {\n //Add border-radius to all pageLinks in case they have left margin\n .page-link {\n @include border-radius($border-radius);\n }\n }\n }\n}\n// scss-docs-end pagination-mixin\n", + "// Lists\n\n// Unstyled keeps list items block level, just removes default browser padding and list-style\n@mixin list-unstyled {\n padding-left: 0;\n list-style: none;\n}\n", + "// List Groups\n\n// scss-docs-start list-group-mixin\n@mixin list-group-item-variant($state, $background, $color) {\n .list-group-item-#{$state} {\n color: $color;\n background-color: $background;\n\n &.list-group-item-action {\n &:hover,\n &:focus {\n color: $color;\n background-color: shade-color($background, 10%);\n }\n\n &.active {\n color: $white;\n background-color: $color;\n border-color: $color;\n }\n }\n }\n}\n// scss-docs-end list-group-mixin\n", + "// This mixin uses an `if()` technique to be compatible with Dart Sass\n// See https://github.com/sass/sass/issues/1873#issuecomment-152293725 for more details\n\n// scss-docs-start form-validation-mixins\n@mixin form-validation-state-selector($state) {\n @if ($state == \"valid\" or $state == \"invalid\") {\n .was-validated #{if(&, \"&\", \"\")}:#{$state},\n #{if(&, \"&\", \"\")}.is-#{$state} {\n @content;\n }\n } @else {\n #{if(&, \"&\", \"\")}.is-#{$state} {\n @content;\n }\n }\n}\n\n@mixin form-validation-state(\n $state,\n $color,\n $icon,\n $tooltip-color: color-contrast($color),\n $tooltip-bg-color: rgba($color, $form-feedback-tooltip-opacity),\n $focus-box-shadow: 0 0 $input-btn-focus-blur $input-focus-width rgba($color, $input-btn-focus-color-opacity)\n) {\n .#{$state}-feedback {\n display: none;\n width: 100%;\n margin-top: $form-feedback-margin-top;\n @include font-size($form-feedback-font-size);\n font-style: $form-feedback-font-style;\n color: $color;\n }\n\n .#{$state}-tooltip {\n position: absolute;\n top: 100%;\n z-index: 5;\n display: none;\n max-width: 100%; // Contain to parent when possible\n padding: $form-feedback-tooltip-padding-y $form-feedback-tooltip-padding-x;\n margin-top: .1rem;\n @include font-size($form-feedback-tooltip-font-size);\n line-height: $form-feedback-tooltip-line-height;\n color: $tooltip-color;\n background-color: $tooltip-bg-color;\n @include border-radius($form-feedback-tooltip-border-radius);\n }\n\n @include form-validation-state-selector($state) {\n ~ .#{$state}-feedback,\n ~ .#{$state}-tooltip {\n display: block;\n }\n }\n\n .form-control {\n @include form-validation-state-selector($state) {\n border-color: $color;\n\n @if $enable-validation-icons {\n padding-right: $input-height-inner;\n background-image: escape-svg($icon);\n background-repeat: no-repeat;\n background-position: right $input-height-inner-quarter center;\n background-size: $input-height-inner-half $input-height-inner-half;\n }\n\n &:focus {\n border-color: $color;\n box-shadow: $focus-box-shadow;\n }\n }\n }\n\n // stylelint-disable-next-line selector-no-qualifying-type\n textarea.form-control {\n @include form-validation-state-selector($state) {\n @if $enable-validation-icons {\n padding-right: $input-height-inner;\n background-position: top $input-height-inner-quarter right $input-height-inner-quarter;\n }\n }\n }\n\n .form-select {\n @include form-validation-state-selector($state) {\n border-color: $color;\n\n @if $enable-validation-icons {\n &:not([multiple]):not([size]),\n &:not([multiple])[size=\"1\"] {\n padding-right: $form-select-feedback-icon-padding-end;\n background-image: escape-svg($form-select-indicator), escape-svg($icon);\n background-position: $form-select-bg-position, $form-select-feedback-icon-position;\n background-size: $form-select-bg-size, $form-select-feedback-icon-size;\n }\n }\n\n &:focus {\n border-color: $color;\n box-shadow: $focus-box-shadow;\n }\n }\n }\n\n .form-check-input {\n @include form-validation-state-selector($state) {\n border-color: $color;\n\n &:checked {\n background-color: $color;\n }\n\n &:focus {\n box-shadow: $focus-box-shadow;\n }\n\n ~ .form-check-label {\n color: $color;\n }\n }\n }\n .form-check-inline .form-check-input {\n ~ .#{$state}-feedback {\n margin-left: .5em;\n }\n }\n\n .input-group .form-control,\n .input-group .form-select {\n @include form-validation-state-selector($state) {\n @if $state == \"valid\" {\n z-index: 1;\n } @else if $state == \"invalid\" {\n z-index: 2;\n }\n &:focus {\n z-index: 3;\n }\n }\n }\n}\n// scss-docs-end form-validation-mixins\n", + "// scss-docs-start table-variant\n@mixin table-variant($state, $background) {\n .table-#{$state} {\n $color: color-contrast(opaque($body-bg, $background));\n $hover-bg: mix($color, $background, percentage($table-hover-bg-factor));\n $striped-bg: mix($color, $background, percentage($table-striped-bg-factor));\n $active-bg: mix($color, $background, percentage($table-active-bg-factor));\n\n --#{$variable-prefix}table-bg: #{$background};\n --#{$variable-prefix}table-striped-bg: #{$striped-bg};\n --#{$variable-prefix}table-striped-color: #{color-contrast($striped-bg)};\n --#{$variable-prefix}table-active-bg: #{$active-bg};\n --#{$variable-prefix}table-active-color: #{color-contrast($active-bg)};\n --#{$variable-prefix}table-hover-bg: #{$hover-bg};\n --#{$variable-prefix}table-hover-color: #{color-contrast($hover-bg)};\n\n color: $color;\n border-color: mix($color, $background, percentage($table-border-factor));\n }\n}\n// scss-docs-end table-variant\n", + "// stylelint-disable property-disallowed-list\n// Single side border-radius\n\n// Helper function to replace negative values with 0\n@function valid-radius($radius) {\n $return: ();\n @each $value in $radius {\n @if type-of($value) == number {\n $return: append($return, max($value, 0));\n } @else {\n $return: append($return, $value);\n }\n }\n @return $return;\n}\n\n// scss-docs-start border-radius-mixins\n@mixin border-radius($radius: $border-radius, $fallback-border-radius: false) {\n @if $enable-rounded {\n border-radius: valid-radius($radius);\n }\n @else if $fallback-border-radius != false {\n border-radius: $fallback-border-radius;\n }\n}\n\n@mixin border-top-radius($radius: $border-radius) {\n @if $enable-rounded {\n border-top-left-radius: valid-radius($radius);\n border-top-right-radius: valid-radius($radius);\n }\n}\n\n@mixin border-end-radius($radius: $border-radius) {\n @if $enable-rounded {\n border-top-right-radius: valid-radius($radius);\n border-bottom-right-radius: valid-radius($radius);\n }\n}\n\n@mixin border-bottom-radius($radius: $border-radius) {\n @if $enable-rounded {\n border-bottom-right-radius: valid-radius($radius);\n border-bottom-left-radius: valid-radius($radius);\n }\n}\n\n@mixin border-start-radius($radius: $border-radius) {\n @if $enable-rounded {\n border-top-left-radius: valid-radius($radius);\n border-bottom-left-radius: valid-radius($radius);\n }\n}\n\n@mixin border-top-start-radius($radius: $border-radius) {\n @if $enable-rounded {\n border-top-left-radius: valid-radius($radius);\n }\n}\n\n@mixin border-top-end-radius($radius: $border-radius) {\n @if $enable-rounded {\n border-top-right-radius: valid-radius($radius);\n }\n}\n\n@mixin border-bottom-end-radius($radius: $border-radius) {\n @if $enable-rounded {\n border-bottom-right-radius: valid-radius($radius);\n }\n}\n\n@mixin border-bottom-start-radius($radius: $border-radius) {\n @if $enable-rounded {\n border-bottom-left-radius: valid-radius($radius);\n }\n}\n// scss-docs-end border-radius-mixins\n", + "@mixin box-shadow($shadow...) {\n @if $enable-shadows {\n $result: ();\n\n @each $value in $shadow {\n @if $value != null {\n $result: append($result, $value, \"comma\");\n }\n @if $value == none and length($shadow) > 1 {\n @warn \"The keyword 'none' must be used as a single argument.\";\n }\n }\n\n @if (length($result) > 0) {\n box-shadow: $result;\n }\n }\n}\n", + "// Gradients\n\n// scss-docs-start gradient-bg-mixin\n@mixin gradient-bg($color: null) {\n background-color: $color;\n\n @if $enable-gradients {\n background-image: var(--#{$variable-prefix}gradient);\n }\n}\n// scss-docs-end gradient-bg-mixin\n\n// scss-docs-start gradient-mixins\n// Horizontal gradient, from left to right\n//\n// Creates two color stops, start and end, by specifying a color and position for each color stop.\n@mixin gradient-x($start-color: $gray-700, $end-color: $gray-800, $start-percent: 0%, $end-percent: 100%) {\n background-image: linear-gradient(to right, $start-color $start-percent, $end-color $end-percent);\n}\n\n// Vertical gradient, from top to bottom\n//\n// Creates two color stops, start and end, by specifying a color and position for each color stop.\n@mixin gradient-y($start-color: $gray-700, $end-color: $gray-800, $start-percent: null, $end-percent: null) {\n background-image: linear-gradient(to bottom, $start-color $start-percent, $end-color $end-percent);\n}\n\n@mixin gradient-directional($start-color: $gray-700, $end-color: $gray-800, $deg: 45deg) {\n background-image: linear-gradient($deg, $start-color, $end-color);\n}\n\n@mixin gradient-x-three-colors($start-color: $blue, $mid-color: $purple, $color-stop: 50%, $end-color: $red) {\n background-image: linear-gradient(to right, $start-color, $mid-color $color-stop, $end-color);\n}\n\n@mixin gradient-y-three-colors($start-color: $blue, $mid-color: $purple, $color-stop: 50%, $end-color: $red) {\n background-image: linear-gradient($start-color, $mid-color $color-stop, $end-color);\n}\n\n@mixin gradient-radial($inner-color: $gray-700, $outer-color: $gray-800) {\n background-image: radial-gradient(circle, $inner-color, $outer-color);\n}\n\n@mixin gradient-striped($color: rgba($white, .15), $angle: 45deg) {\n background-image: linear-gradient($angle, $color 25%, transparent 25%, transparent 50%, $color 50%, $color 75%, transparent 75%, transparent);\n}\n// scss-docs-end gradient-mixins\n", + "// stylelint-disable property-disallowed-list\n@mixin transition($transition...) {\n @if length($transition) == 0 {\n $transition: $transition-base;\n }\n\n @if length($transition) > 1 {\n @each $value in $transition {\n @if $value == null or $value == none {\n @warn \"The keyword 'none' or 'null' must be used as a single argument.\";\n }\n }\n }\n\n @if $enable-transitions {\n @if nth($transition, 1) != null {\n transition: $transition;\n }\n\n @if $enable-reduced-motion and nth($transition, 1) != null and nth($transition, 1) != none {\n @media (prefers-reduced-motion: reduce) {\n transition: none;\n }\n }\n }\n}\n", + "// scss-docs-start clearfix\n@mixin clearfix() {\n &::after {\n display: block;\n clear: both;\n content: \"\";\n }\n}\n// scss-docs-end clearfix\n", + "// Container mixins\n\n@mixin make-container($gutter: $container-padding-x) {\n width: 100%;\n padding-right: var(--#{$variable-prefix}gutter-x, #{$gutter});\n padding-left: var(--#{$variable-prefix}gutter-x, #{$gutter});\n margin-right: auto;\n margin-left: auto;\n}\n", + "// Grid system\n//\n// Generate semantic grid columns with these mixins.\n\n@mixin make-row($gutter: $grid-gutter-width) {\n --#{$variable-prefix}gutter-x: #{$gutter};\n --#{$variable-prefix}gutter-y: 0;\n display: flex;\n flex-wrap: wrap;\n margin-top: calc(var(--#{$variable-prefix}gutter-y) * -1); // stylelint-disable-line function-disallowed-list\n margin-right: calc(var(--#{$variable-prefix}gutter-x) * -.5); // stylelint-disable-line function-disallowed-list\n margin-left: calc(var(--#{$variable-prefix}gutter-x) * -.5); // stylelint-disable-line function-disallowed-list\n}\n\n@mixin make-col-ready($gutter: $grid-gutter-width) {\n // Add box sizing if only the grid is loaded\n box-sizing: if(variable-exists(include-column-box-sizing) and $include-column-box-sizing, border-box, null);\n // Prevent columns from becoming too narrow when at smaller grid tiers by\n // always setting `width: 100%;`. This works because we set the width\n // later on to override this initial width.\n flex-shrink: 0;\n width: 100%;\n max-width: 100%; // Prevent `.col-auto`, `.col` (& responsive variants) from breaking out the grid\n padding-right: calc(var(--#{$variable-prefix}gutter-x) * .5); // stylelint-disable-line function-disallowed-list\n padding-left: calc(var(--#{$variable-prefix}gutter-x) * .5); // stylelint-disable-line function-disallowed-list\n margin-top: var(--#{$variable-prefix}gutter-y);\n}\n\n@mixin make-col($size: false, $columns: $grid-columns) {\n @if $size {\n flex: 0 0 auto;\n width: percentage(divide($size, $columns));\n\n } @else {\n flex: 1 1 0;\n max-width: 100%;\n }\n}\n\n@mixin make-col-auto() {\n flex: 0 0 auto;\n width: auto;\n}\n\n@mixin make-col-offset($size, $columns: $grid-columns) {\n $num: divide($size, $columns);\n margin-left: if($num == 0, 0, percentage($num));\n}\n\n// Row columns\n//\n// Specify on a parent element(e.g., .row) to force immediate children into NN\n// numberof columns. Supports wrapping to new lines, but does not do a Masonry\n// style grid.\n@mixin row-cols($count) {\n > * {\n flex: 0 0 auto;\n width: divide(100%, $count);\n }\n}\n\n// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `$grid-columns`.\n\n@mixin make-grid-columns($columns: $grid-columns, $gutter: $grid-gutter-width, $breakpoints: $grid-breakpoints) {\n @each $breakpoint in map-keys($breakpoints) {\n // .row-cols defaults must all appear before .col overrides so they can be overridden.\n $infix: breakpoint-infix($breakpoint, $breakpoints);\n @include media-breakpoint-up($breakpoint, $breakpoints) {\n // Provide basic `.col-{bp}` classes for equal-width flexbox columns\n .col#{$infix} {\n flex: 1 0 0%; // Flexbugs #4: https://github.com/philipwalton/flexbugs#flexbug-4\n }\n\n .row-cols#{$infix}-auto > * {\n @include make-col-auto();\n }\n\n @if $grid-row-columns > 0 {\n @for $i from 1 through $grid-row-columns {\n .row-cols#{$infix}-#{$i} {\n @include row-cols($i);\n }\n }\n }\n }\n }\n\n @each $breakpoint in map-keys($breakpoints) {\n $infix: breakpoint-infix($breakpoint, $breakpoints);\n\n @include media-breakpoint-up($breakpoint, $breakpoints) {\n .col#{$infix}-auto {\n @include make-col-auto();\n }\n\n @if $columns > 0 {\n @for $i from 1 through $columns {\n .col#{$infix}-#{$i} {\n @include make-col($i, $columns);\n }\n }\n\n // `$columns - 1` because offsetting by the width of an entire row isn't possible\n @for $i from 0 through ($columns - 1) {\n @if not ($infix == \"\" and $i == 0) { // Avoid emitting useless .offset-0\n .offset#{$infix}-#{$i} {\n @include make-col-offset($i, $columns);\n }\n }\n }\n }\n\n // Gutters\n //\n // Make use of `.g-*`, `.gx-*` or `.gy-*` utilities to change spacing between the columns.\n @each $key, $value in $gutters {\n .g#{$infix}-#{$key},\n .gx#{$infix}-#{$key} {\n --#{$variable-prefix}gutter-x: #{$value};\n }\n\n .g#{$infix}-#{$key},\n .gy#{$infix}-#{$key} {\n --#{$variable-prefix}gutter-y: #{$value};\n }\n }\n }\n }\n}\n", + "// stylelint-disable indentation\n\n// Utilities\n\n$utilities: () !default;\n// stylelint-disable-next-line scss/dollar-variable-default\n$utilities: map-merge(\n (\n // scss-docs-start utils-vertical-align\n \"align\": (\n property: vertical-align,\n class: align,\n values: baseline top middle bottom text-bottom text-top\n ),\n // scss-docs-end utils-vertical-align\n // scss-docs-start utils-float\n \"float\": (\n responsive: true,\n property: float,\n values: (\n start: left,\n end: right,\n none: none,\n )\n ),\n // scss-docs-end utils-float\n // scss-docs-start utils-overflow\n \"overflow\": (\n property: overflow,\n values: auto hidden visible scroll,\n ),\n // scss-docs-end utils-overflow\n // scss-docs-start utils-display\n \"display\": (\n responsive: true,\n print: true,\n property: display,\n class: d,\n values: inline inline-block block grid table table-row table-cell flex inline-flex none\n ),\n // scss-docs-end utils-display\n // scss-docs-start utils-shadow\n \"shadow\": (\n property: box-shadow,\n class: shadow,\n values: (\n null: $box-shadow,\n sm: $box-shadow-sm,\n lg: $box-shadow-lg,\n none: none,\n )\n ),\n // scss-docs-end utils-shadow\n // scss-docs-start utils-position\n \"position\": (\n property: position,\n values: static relative absolute fixed sticky\n ),\n \"top\": (\n property: top,\n values: $position-values\n ),\n \"bottom\": (\n property: bottom,\n values: $position-values\n ),\n \"start\": (\n property: left,\n class: start,\n values: $position-values\n ),\n \"end\": (\n property: right,\n class: end,\n values: $position-values\n ),\n \"translate-middle\": (\n property: transform,\n class: translate-middle,\n values: (\n null: translate(-50%, -50%),\n x: translateX(-50%),\n y: translateY(-50%),\n )\n ),\n // scss-docs-end utils-position\n // scss-docs-start utils-borders\n \"border\": (\n property: border,\n values: (\n null: $border-width solid $border-color,\n 0: 0,\n )\n ),\n \"border-top\": (\n property: border-top,\n values: (\n null: $border-width solid $border-color,\n 0: 0,\n )\n ),\n \"border-end\": (\n property: border-right,\n class: border-end,\n values: (\n null: $border-width solid $border-color,\n 0: 0,\n )\n ),\n \"border-bottom\": (\n property: border-bottom,\n values: (\n null: $border-width solid $border-color,\n 0: 0,\n )\n ),\n \"border-start\": (\n property: border-left,\n class: border-start,\n values: (\n null: $border-width solid $border-color,\n 0: 0,\n )\n ),\n \"border-color\": (\n property: border-color,\n class: border,\n values: map-merge($theme-colors, (\"white\": $white))\n ),\n \"border-width\": (\n property: border-width,\n class: border,\n values: $border-widths\n ),\n // scss-docs-end utils-borders\n // Sizing utilities\n // scss-docs-start utils-sizing\n \"width\": (\n property: width,\n class: w,\n values: (\n 25: 25%,\n 50: 50%,\n 75: 75%,\n 100: 100%,\n auto: auto\n )\n ),\n \"max-width\": (\n property: max-width,\n class: mw,\n values: (100: 100%)\n ),\n \"viewport-width\": (\n property: width,\n class: vw,\n values: (100: 100vw)\n ),\n \"min-viewport-width\": (\n property: min-width,\n class: min-vw,\n values: (100: 100vw)\n ),\n \"height\": (\n property: height,\n class: h,\n values: (\n 25: 25%,\n 50: 50%,\n 75: 75%,\n 100: 100%,\n auto: auto\n )\n ),\n \"max-height\": (\n property: max-height,\n class: mh,\n values: (100: 100%)\n ),\n \"viewport-height\": (\n property: height,\n class: vh,\n values: (100: 100vh)\n ),\n \"min-viewport-height\": (\n property: min-height,\n class: min-vh,\n values: (100: 100vh)\n ),\n // scss-docs-end utils-sizing\n // Flex utilities\n // scss-docs-start utils-flex\n \"flex\": (\n responsive: true,\n property: flex,\n values: (fill: 1 1 auto)\n ),\n \"flex-direction\": (\n responsive: true,\n property: flex-direction,\n class: flex,\n values: row column row-reverse column-reverse\n ),\n \"flex-grow\": (\n responsive: true,\n property: flex-grow,\n class: flex,\n values: (\n grow-0: 0,\n grow-1: 1,\n )\n ),\n \"flex-shrink\": (\n responsive: true,\n property: flex-shrink,\n class: flex,\n values: (\n shrink-0: 0,\n shrink-1: 1,\n )\n ),\n \"flex-wrap\": (\n responsive: true,\n property: flex-wrap,\n class: flex,\n values: wrap nowrap wrap-reverse\n ),\n \"gap\": (\n responsive: true,\n property: gap,\n class: gap,\n values: $spacers\n ),\n \"justify-content\": (\n responsive: true,\n property: justify-content,\n values: (\n start: flex-start,\n end: flex-end,\n center: center,\n between: space-between,\n around: space-around,\n evenly: space-evenly,\n )\n ),\n \"align-items\": (\n responsive: true,\n property: align-items,\n values: (\n start: flex-start,\n end: flex-end,\n center: center,\n baseline: baseline,\n stretch: stretch,\n )\n ),\n \"align-content\": (\n responsive: true,\n property: align-content,\n values: (\n start: flex-start,\n end: flex-end,\n center: center,\n between: space-between,\n around: space-around,\n stretch: stretch,\n )\n ),\n \"align-self\": (\n responsive: true,\n property: align-self,\n values: (\n auto: auto,\n start: flex-start,\n end: flex-end,\n center: center,\n baseline: baseline,\n stretch: stretch,\n )\n ),\n \"order\": (\n responsive: true,\n property: order,\n values: (\n first: -1,\n 0: 0,\n 1: 1,\n 2: 2,\n 3: 3,\n 4: 4,\n 5: 5,\n last: 6,\n ),\n ),\n // scss-docs-end utils-flex\n // Margin utilities\n // scss-docs-start utils-spacing\n \"margin\": (\n responsive: true,\n property: margin,\n class: m,\n values: map-merge($spacers, (auto: auto))\n ),\n \"margin-x\": (\n responsive: true,\n property: margin-right margin-left,\n class: mx,\n values: map-merge($spacers, (auto: auto))\n ),\n \"margin-y\": (\n responsive: true,\n property: margin-top margin-bottom,\n class: my,\n values: map-merge($spacers, (auto: auto))\n ),\n \"margin-top\": (\n responsive: true,\n property: margin-top,\n class: mt,\n values: map-merge($spacers, (auto: auto))\n ),\n \"margin-end\": (\n responsive: true,\n property: margin-right,\n class: me,\n values: map-merge($spacers, (auto: auto))\n ),\n \"margin-bottom\": (\n responsive: true,\n property: margin-bottom,\n class: mb,\n values: map-merge($spacers, (auto: auto))\n ),\n \"margin-start\": (\n responsive: true,\n property: margin-left,\n class: ms,\n values: map-merge($spacers, (auto: auto))\n ),\n // Negative margin utilities\n \"negative-margin\": (\n responsive: true,\n property: margin,\n class: m,\n values: $negative-spacers\n ),\n \"negative-margin-x\": (\n responsive: true,\n property: margin-right margin-left,\n class: mx,\n values: $negative-spacers\n ),\n \"negative-margin-y\": (\n responsive: true,\n property: margin-top margin-bottom,\n class: my,\n values: $negative-spacers\n ),\n \"negative-margin-top\": (\n responsive: true,\n property: margin-top,\n class: mt,\n values: $negative-spacers\n ),\n \"negative-margin-end\": (\n responsive: true,\n property: margin-right,\n class: me,\n values: $negative-spacers\n ),\n \"negative-margin-bottom\": (\n responsive: true,\n property: margin-bottom,\n class: mb,\n values: $negative-spacers\n ),\n \"negative-margin-start\": (\n responsive: true,\n property: margin-left,\n class: ms,\n values: $negative-spacers\n ),\n // Padding utilities\n \"padding\": (\n responsive: true,\n property: padding,\n class: p,\n values: $spacers\n ),\n \"padding-x\": (\n responsive: true,\n property: padding-right padding-left,\n class: px,\n values: $spacers\n ),\n \"padding-y\": (\n responsive: true,\n property: padding-top padding-bottom,\n class: py,\n values: $spacers\n ),\n \"padding-top\": (\n responsive: true,\n property: padding-top,\n class: pt,\n values: $spacers\n ),\n \"padding-end\": (\n responsive: true,\n property: padding-right,\n class: pe,\n values: $spacers\n ),\n \"padding-bottom\": (\n responsive: true,\n property: padding-bottom,\n class: pb,\n values: $spacers\n ),\n \"padding-start\": (\n responsive: true,\n property: padding-left,\n class: ps,\n values: $spacers\n ),\n // scss-docs-end utils-spacing\n // Text\n // scss-docs-start utils-text\n \"font-family\": (\n property: font-family,\n class: font,\n values: (monospace: var(--#{$variable-prefix}font-monospace))\n ),\n \"font-size\": (\n rfs: true,\n property: font-size,\n class: fs,\n values: $font-sizes\n ),\n \"font-style\": (\n property: font-style,\n class: fst,\n values: italic normal\n ),\n \"font-weight\": (\n property: font-weight,\n class: fw,\n values: (\n light: $font-weight-light,\n lighter: $font-weight-lighter,\n normal: $font-weight-normal,\n bold: $font-weight-bold,\n bolder: $font-weight-bolder\n )\n ),\n \"line-height\": (\n property: line-height,\n class: lh,\n values: (\n 1: 1,\n sm: $line-height-sm,\n base: $line-height-base,\n lg: $line-height-lg,\n )\n ),\n \"text-align\": (\n responsive: true,\n property: text-align,\n class: text,\n values: (\n start: left,\n end: right,\n center: center,\n )\n ),\n \"text-decoration\": (\n property: text-decoration,\n values: none underline line-through\n ),\n \"text-transform\": (\n property: text-transform,\n class: text,\n values: lowercase uppercase capitalize\n ),\n \"white-space\": (\n property: white-space,\n class: text,\n values: (\n wrap: normal,\n nowrap: nowrap,\n )\n ),\n \"word-wrap\": (\n property: word-wrap word-break,\n class: text,\n values: (break: break-word),\n rtl: false\n ),\n // scss-docs-end utils-text\n // scss-docs-start utils-color\n \"color\": (\n property: color,\n class: text,\n values: map-merge(\n $theme-colors,\n (\n \"white\": $white,\n \"body\": $body-color,\n \"muted\": $text-muted,\n \"black-50\": rgba($black, .5),\n \"white-50\": rgba($white, .5),\n \"reset\": inherit,\n )\n )\n ),\n // scss-docs-end utils-color\n // scss-docs-start utils-bg-color\n \"background-color\": (\n property: background-color,\n class: bg,\n values: map-merge(\n $theme-colors,\n (\n \"body\": $body-bg,\n \"white\": $white,\n \"transparent\": transparent\n )\n )\n ),\n // scss-docs-end utils-bg-color\n \"gradient\": (\n property: background-image,\n class: bg,\n values: (gradient: var(--#{$variable-prefix}gradient))\n ),\n // scss-docs-start utils-interaction\n \"user-select\": (\n property: user-select,\n values: all auto none\n ),\n \"pointer-events\": (\n property: pointer-events,\n class: pe,\n values: none auto,\n ),\n // scss-docs-end utils-interaction\n // scss-docs-start utils-border-radius\n \"rounded\": (\n property: border-radius,\n class: rounded,\n values: (\n null: $border-radius,\n 0: 0,\n 1: $border-radius-sm,\n 2: $border-radius,\n 3: $border-radius-lg,\n circle: 50%,\n pill: $border-radius-pill\n )\n ),\n \"rounded-top\": (\n property: border-top-left-radius border-top-right-radius,\n class: rounded-top,\n values: (null: $border-radius)\n ),\n \"rounded-end\": (\n property: border-top-right-radius border-bottom-right-radius,\n class: rounded-end,\n values: (null: $border-radius)\n ),\n \"rounded-bottom\": (\n property: border-bottom-right-radius border-bottom-left-radius,\n class: rounded-bottom,\n values: (null: $border-radius)\n ),\n \"rounded-start\": (\n property: border-bottom-left-radius border-top-left-radius,\n class: rounded-start,\n values: (null: $border-radius)\n ),\n // scss-docs-end utils-border-radius\n // scss-docs-start utils-visibility\n \"visibility\": (\n property: visibility,\n class: null,\n values: (\n visible: visible,\n invisible: hidden,\n )\n )\n // scss-docs-end utils-visibility\n ),\n $utilities\n);\n", + ":root {\n // Custom variable values only support SassScript inside `#{}`.\n @each $color, $value in $colors {\n --#{$variable-prefix}#{$color}: #{$value};\n }\n\n @each $color, $value in $theme-colors {\n --#{$variable-prefix}#{$color}: #{$value};\n }\n\n // Use `inspect` for lists so that quoted items keep the quotes.\n // See https://github.com/sass/sass/issues/2383#issuecomment-336349172\n --#{$variable-prefix}font-sans-serif: #{inspect($font-family-sans-serif)};\n --#{$variable-prefix}font-monospace: #{inspect($font-family-monospace)};\n --#{$variable-prefix}gradient: #{$gradient};\n}\n", + "// stylelint-disable declaration-no-important, selector-no-qualifying-type, property-no-vendor-prefix\n\n\n// Reboot\n//\n// Normalization of HTML elements, manually forked from Normalize.css to remove\n// styles targeting irrelevant browsers while applying new styles.\n//\n// Normalize is licensed MIT. https://github.com/necolas/normalize.css\n\n\n// Document\n//\n// Change from `box-sizing: content-box` so that `width` is not affected by `padding` or `border`.\n\n*,\n*::before,\n*::after {\n box-sizing: border-box;\n}\n\n\n// Root\n//\n// Ability to the value of the root font sizes, affecting the value of `rem`.\n// null by default, thus nothing is generated.\n\n:root {\n font-size: $font-size-root;\n\n @if $enable-smooth-scroll {\n @media (prefers-reduced-motion: no-preference) {\n scroll-behavior: smooth;\n }\n }\n}\n\n\n// Body\n//\n// 1. Remove the margin in all browsers.\n// 2. As a best practice, apply a default `background-color`.\n// 3. Prevent adjustments of font size after orientation changes in iOS.\n// 4. Change the default tap highlight to be completely transparent in iOS.\n\nbody {\n margin: 0; // 1\n font-family: $font-family-base;\n @include font-size($font-size-base);\n font-weight: $font-weight-base;\n line-height: $line-height-base;\n color: $body-color;\n text-align: $body-text-align;\n background-color: $body-bg; // 2\n -webkit-text-size-adjust: 100%; // 3\n -webkit-tap-highlight-color: rgba($black, 0); // 4\n}\n\n\n// Content grouping\n//\n// 1. Reset Firefox's gray color\n// 2. Set correct height and prevent the `size` attribute to make the `hr` look like an input field\n\nhr {\n margin: $hr-margin-y 0;\n color: $hr-color; // 1\n background-color: currentColor;\n border: 0;\n opacity: $hr-opacity;\n}\n\nhr:not([size]) {\n height: $hr-height; // 2\n}\n\n\n// Typography\n//\n// 1. Remove top margins from headings\n// By default, `

`-`

` all receive top and bottom margins. We nuke the top\n// margin for easier control within type scales as it avoids margin collapsing.\n\n%heading {\n margin-top: 0; // 1\n margin-bottom: $headings-margin-bottom;\n font-family: $headings-font-family;\n font-style: $headings-font-style;\n font-weight: $headings-font-weight;\n line-height: $headings-line-height;\n color: $headings-color;\n}\n\nh1 {\n @extend %heading;\n @include font-size($h1-font-size);\n}\n\nh2 {\n @extend %heading;\n @include font-size($h2-font-size);\n}\n\nh3 {\n @extend %heading;\n @include font-size($h3-font-size);\n}\n\nh4 {\n @extend %heading;\n @include font-size($h4-font-size);\n}\n\nh5 {\n @extend %heading;\n @include font-size($h5-font-size);\n}\n\nh6 {\n @extend %heading;\n @include font-size($h6-font-size);\n}\n\n\n// Reset margins on paragraphs\n//\n// Similarly, the top margin on `

`s get reset. However, we also reset the\n// bottom margin to use `rem` units instead of `em`.\n\np {\n margin-top: 0;\n margin-bottom: $paragraph-margin-bottom;\n}\n\n\n// Abbreviations\n//\n// 1. Duplicate behavior to the data-bs-* attribute for our tooltip plugin\n// 2. Add the correct text decoration in Chrome, Edge, Opera, and Safari.\n// 3. Add explicit cursor to indicate changed behavior.\n// 4. Prevent the text-decoration to be skipped.\n\nabbr[title],\nabbr[data-bs-original-title] { // 1\n text-decoration: underline dotted; // 2\n cursor: help; // 3\n text-decoration-skip-ink: none; // 4\n}\n\n\n// Address\n\naddress {\n margin-bottom: 1rem;\n font-style: normal;\n line-height: inherit;\n}\n\n\n// Lists\n\nol,\nul {\n padding-left: 2rem;\n}\n\nol,\nul,\ndl {\n margin-top: 0;\n margin-bottom: 1rem;\n}\n\nol ol,\nul ul,\nol ul,\nul ol {\n margin-bottom: 0;\n}\n\ndt {\n font-weight: $dt-font-weight;\n}\n\n// 1. Undo browser default\n\ndd {\n margin-bottom: .5rem;\n margin-left: 0; // 1\n}\n\n\n// Blockquote\n\nblockquote {\n margin: 0 0 1rem;\n}\n\n\n// Strong\n//\n// Add the correct font weight in Chrome, Edge, and Safari\n\nb,\nstrong {\n font-weight: $font-weight-bolder;\n}\n\n\n// Small\n//\n// Add the correct font size in all browsers\n\nsmall {\n @include font-size($small-font-size);\n}\n\n\n// Mark\n\nmark {\n padding: $mark-padding;\n background-color: $mark-bg;\n}\n\n\n// Sub and Sup\n//\n// Prevent `sub` and `sup` elements from affecting the line height in\n// all browsers.\n\nsub,\nsup {\n position: relative;\n @include font-size($sub-sup-font-size);\n line-height: 0;\n vertical-align: baseline;\n}\n\nsub { bottom: -.25em; }\nsup { top: -.5em; }\n\n\n// Links\n\na {\n color: $link-color;\n text-decoration: $link-decoration;\n\n &:hover {\n color: $link-hover-color;\n text-decoration: $link-hover-decoration;\n }\n}\n\n// And undo these styles for placeholder links/named anchors (without href).\n// It would be more straightforward to just use a[href] in previous block, but that\n// causes specificity issues in many other styles that are too complex to fix.\n// See https://github.com/twbs/bootstrap/issues/19402\n\na:not([href]):not([class]) {\n &,\n &:hover {\n color: inherit;\n text-decoration: none;\n }\n}\n\n\n// Code\n\npre,\ncode,\nkbd,\nsamp {\n font-family: $font-family-code;\n @include font-size(1em); // Correct the odd `em` font sizing in all browsers.\n direction: ltr #{\"/* rtl:ignore */\"};\n unicode-bidi: bidi-override;\n}\n\n// 1. Remove browser default top margin\n// 2. Reset browser default of `1em` to use `rem`s\n// 3. Don't allow content to break outside\n\npre {\n display: block;\n margin-top: 0; // 1\n margin-bottom: 1rem; // 2\n overflow: auto; // 3\n @include font-size($code-font-size);\n color: $pre-color;\n\n // Account for some code outputs that place code tags in pre tags\n code {\n @include font-size(inherit);\n color: inherit;\n word-break: normal;\n }\n}\n\ncode {\n @include font-size($code-font-size);\n color: $code-color;\n word-wrap: break-word;\n\n // Streamline the style when inside anchors to avoid broken underline and more\n a > & {\n color: inherit;\n }\n}\n\nkbd {\n padding: $kbd-padding-y $kbd-padding-x;\n @include font-size($kbd-font-size);\n color: $kbd-color;\n background-color: $kbd-bg;\n @include border-radius($border-radius-sm);\n\n kbd {\n padding: 0;\n @include font-size(1em);\n font-weight: $nested-kbd-font-weight;\n }\n}\n\n\n// Figures\n//\n// Apply a consistent margin strategy (matches our type styles).\n\nfigure {\n margin: 0 0 1rem;\n}\n\n\n// Images and content\n\nimg,\nsvg {\n vertical-align: middle;\n}\n\n\n// Tables\n//\n// Prevent double borders\n\ntable {\n caption-side: bottom;\n border-collapse: collapse;\n}\n\ncaption {\n padding-top: $table-cell-padding-y;\n padding-bottom: $table-cell-padding-y;\n color: $table-caption-color;\n text-align: left;\n}\n\n// 1. Removes font-weight bold by inheriting\n// 2. Matches default `` alignment by inheriting `text-align`.\n// 3. Fix alignment for Safari\n\nth {\n font-weight: $table-th-font-weight; // 1\n text-align: inherit; // 2\n text-align: -webkit-match-parent; // 3\n}\n\nthead,\ntbody,\ntfoot,\ntr,\ntd,\nth {\n border-color: inherit;\n border-style: solid;\n border-width: 0;\n}\n\n\n// Forms\n//\n// 1. Allow labels to use `margin` for spacing.\n\nlabel {\n display: inline-block; // 1\n}\n\n// Remove the default `border-radius` that macOS Chrome adds.\n// See https://github.com/twbs/bootstrap/issues/24093\n\nbutton {\n // stylelint-disable-next-line property-disallowed-list\n border-radius: 0;\n}\n\n// Explicitly remove focus outline in Chromium when it shouldn't be\n// visible (e.g. as result of mouse click or touch tap). It already\n// should be doing this automatically, but seems to currently be\n// confused and applies its very visible two-tone outline anyway.\n\nbutton:focus:not(:focus-visible) {\n outline: 0;\n}\n\n// 1. Remove the margin in Firefox and Safari\n\ninput,\nbutton,\nselect,\noptgroup,\ntextarea {\n margin: 0; // 1\n font-family: inherit;\n @include font-size(inherit);\n line-height: inherit;\n}\n\n// Remove the inheritance of text transform in Firefox\nbutton,\nselect {\n text-transform: none;\n}\n// Set the cursor for non-` +

+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Astronomers and Engineers Use a Grid of Computers at a National Scale to Study the Universe 300 Times Faster +

+

Data Processing for Very Large Array Makes Deepest Radio Image of the Hubble Ultra Deep Field

+ +

The Universe is almost inconceivably vast. So is the amount of data astronomers collect when they study it. This is a challenging process for the scientists and engineers at the U.S. National Science Foundation’s National Radio Astronomy Observatory (NRAO). But what if they could do it over 300 times faster?

+ +

The NRAO manages some of the largest and most used radio telescopes in the world, including the NSF’s Karl G. Jansky Very Large Array (VLA). When these telescopes are observing the Universe, they collect vast amounts of data, for hours, months, even years at a time, depending on what they are studying. “We made a single deep image of a small portion of the sky with nearly 2 Terabytes of data – equivalent to 1350 photos taken with a phone every day for 2 years. There are other projects that use the VLA to collect many 100s of Terabytes of data!”, explains Sanjay Bhatnagar, a scientist at the NRAO leading the Algorithms R&D Group. “Traditional ways of processing this data can take months or even years to finish – much longer than most existing supercomputing centers are optimized for.”

+ +

Looking for a more efficient way to process a particularly large VLA data set, to produce one of the deepest radio images of the Hubble Ultra Deep Field (HUDF), made famous by the Hubble Telescope, NRAO staff decided to try a different approach. “Earlier attempts using CPU cores in a supercomputer center took over 10 days to convert a Terabyte of data to an image. In contrast, our approach takes only about one hour”, shares NRAO software engineer Felipe Madsen.

+ +

Processing at the rate of more than 1 Terabyte of data per hour, we made one of the deepest radio images ever made with a noise of 1 micro Jy/beam.

+ +

How is this possible? Rather than sending one Mt. Petabytes to one supercomputing facility, the data was divided into pieces and distributed to smaller banks of computers with GPUs, distributed to university computing centers across the country both large and small.

+ +
+Map of North America and access points from PATh +
The distribution of compute capacity used for these results. Data and jobs from NRAO DSOC were placed at the access point (AP) provided by the PATh project in Madison, Wisconsin. Credit: S. Dagnello NRAO/AUI/NSF
+
+ +

The NRAO team led by Sanjay from Domenici Science Operations Center (DSOC) in Socorro, New Mexico, working with the team at the Center for High Throughput Computing (CHTC) at Wisconsin, Madison, led by Brian Bockelman is the first to demonstrate an end-to-end radio astronomy imaging workflow harnessing computing capacity distributed across the US. “This spanned the nation from California to Clemson. We had the most universities contributing to a single, GPU-based workload, from large institutions like the University of California San Diego to small ones like the Emporia University.”, explains Brian Bockelman of the CHTC. “We believe that researchers should have quick and easy access to the nation’s investments in computing capacity and the best way to do this is through sharing”.

+ +
+Image of Hubble Ultra-deep Field at S-Band +
Image of Hubble Ultra-deep Field at S-Band
+
+ +

These distributed capacity contribution were united using open source technologies like HTCondor for computing and Pelican for data delivery, developed by the NSF-funded Partnership to Advance Throughput Computing (PATh; NSF grant #2030508) and the Pelican Project (NSF grant #2331480), respectively. These technologies power the Open Science Pool (OSPool) which stitches together the different computers amongst universities, including those in the San Diego Supercomputer Center (SDSC) led National Research Platform (NRP).

+ +

“The data was accessed via the National Research Platform (NRP) data caches deployed in the network backbone of Internet2 and federated into the Open Science Data Federation,” said SDSC Director Frank Würthwein. “NRAO thus validated a Modus operandi we expect to become more and more common as we democratize access and ownership of cyberinfrastructure for open science, especially in light of the growth of AI research and education at all scales.”

+ +

This test wasn’t just done to benefit astronomers who want to make deep images to study the universe at radio frequencies with current telescopes. It lays the groundwork for much larger projects in the future. “The next generation Very Large Array (ngVLA) will be producing 100x more data than what we used for this test. This work gives us the confidence that we can tackle large volumes of data that we’ll have from the ngVLA one day”, says Bhatnagar with cautious optimism. “We hope the success of this test inspires other radio astronomers to dream big. If the open computing capacity offered by the OSPool works for our NRAO lab, it will work for others. Researchers from small universities with little or no computing power can do this, too”.

+ +

This won’t be the last time NRAO experiments with dispersed data processing. Says Preshanth Jagannathan, a scientist in the NRAO team, “The experiment showed us where we, and the CHTC, can jointly make improvements in the way the OSPool delivers open capacity to the radio astronomers. Both teams are eagerly looking forward to the next step and continued collaboration.”

+ +

About NRAO

+ +

The National Radio Astronomy Observatory (NRAO) is a facility of the National Science Foundation, operated under cooperative agreement by Associated Universities, Inc.

+ +

Original Article

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/astronomy-archives.html b/preview-fall2024-info/astronomy-archives.html new file mode 100644 index 000000000..4f9db2cae --- /dev/null +++ b/preview-fall2024-info/astronomy-archives.html @@ -0,0 +1,405 @@ + + + + + + +Astronomy archives are creating new science every day + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Astronomy archives are creating new science every day +

+

Accumulated data sets from past and current astronomy research are not dead. Researchers are still doing new science with old data and still making new discoveries.

+ +

Steve Groom serves as task manager for the NASA/IPAC Infrared Science Archive (IRSA), part of the Infrared Processing and Analysis Center (IPAC) located on the campus of the California Institute of Technology (Caltech) in Pasadena, California. He and his colleagues archive the data sets from NASA’s infrared astronomy missions. By preserving the data, they enable more research. One of the most valuable of these is the Spitzer Space Telescope, which was recently instrumental in confirming the existence of Trappist One, a star with several earth-like planets.

+ +
+
+
The TRAPPIST-1 System
+

+ The TRAPPIST-1 system contains a total of seven planets, all around the size of Earth. + Three of them — TRAPPIST-1e, f and g — dwell in their star’s so-called “habitable zone.” + The habitable zone, or Goldilocks zone, is a band around every star (shown here in green) + where astronomers have calculated that temperatures are just right — not too hot, not too + cold — for liquid water to pool on the surface of an Earth-like world. Courtesy NASA/JPL-Caltech. +

+
+
+ TRAPPIST-1 system +
+
+ +

“For example, we are learning how galaxies form by looking at patterns,” says Groom. A partner, the NASA Extra-Galactic Database (NED), compiles measurements of galaxies into a coherent database. Researchers discovered a whole new, previously unknown, class of galaxy (a superluminous spiral). “Theories said it shouldn’t exist, but the data was there, and someone mined the data. These huge data points need to be put to use, so new science can be done. That can only be achieved through computing.”

+ +
+
+ The Spitzer Space Telescope CTA +
+
+

+ Figure 2: The Spitzer Space Telescope cryogenic telescope assembly (CTA) + being prepared for vibration testing. Courtesy NASA/JPL-Caltech. +

+
+
+ +

“We are data recyclers,” says Groom. “We exist to support research that the original mission did not envision or could not do. We are now looking at computing because the data volumes are getting really large.”

+ +

Groom presented his work at the OSG All Hands Meeting in March 2017, including research highlights of science that has been done using the archive’s data. Because of this data tsunami, Groom is exploring the use of the Open Science Pool.

+ +

“New science is being done not by retrieving a single observation but by looking at large numbers of observations and looking for patterns,” says Groom. To see those patterns, researchers must process all the data or large amounts of it at once. But the data are becoming so large that they are becoming difficult to move.

+ +

“We have a lot of data but not a lot of computing resources to make available to outside researchers,” says Groom. “Researchers have computing but not the data. We are looking to bridge that gap.”

+ +

The Wide-field Infrared Survey Explorer (WISE), which maps the sky in infrared wavelengths, was reactivated in 2013 and renamed NEOWISE to search for solar system objects like asteroids. The data sets consist of both images of the entire sky along with catalogs of measurements made on those images. “If a researcher wants to download the catalog, the logistics are difficult,” says Groom. “In terms of a spreadsheet—even though it isn’t in that format—it would be 120 billion rows. We recently produced a subset of 9.5 billion rows and the compressed ASCII text files are three terabytes.” The researchers must download it, uncompress it, and put it into a usable format.

+ +

Now comes the role of computing. “Researchers need access to capabilities,” says Groom. “We have limited staff, but our purpose is to help researchers get access to that data. So, we are looking for shared computing resources where researchers can get access to the data sets and do the computing. The OSG’s computing resources and good network connectivity make that a good option for the remote researcher who may have neither.”

+ +

Increasingly, they see researchers need to do large-scale data processing, like fast inventories or visualizations, and produce large-scale mosaics of survey data that are useful for researchers. +The archive can use the OSG to produce these short-term reference products. Groom also wants to understand how their research partners could use the OSG. “We want to provide access without the need to download everything,” says Groom. “Data centers and other archives like us that NASA funds all have large data sets. When a researcher wants to combine data from different sites, they make that happen through high performance computing and high performance networking. We need to become like a third-party broker for the research.”

+ +

Astronomy is a relative newcomer to the need for high performance and high throughput computing. Now, astronomers have a lot more data, like physics did years ago.

+ +

“We help people get science out of existing data,” says Groom. NASA funds missions like the Spitzer space telescope, Hubble, and other survey missions to do certain kinds of science. We track publication metrics (of science done using astronomy datasets), and we look for references to our data sets. We’ve found in some of our major NASA missions that, a few years after these missions go into operation, the number of papers produced by archival research grows to exceed the originally funded missions.”

+ +

For example, a Principal Investigator can write a proposal to observe a particular part of the sky with the Spitzer Space Telescope. Once the data are public, anyone can browse the library of Spitzer data and reuse the data for their own (different) science goals, even if they were not involved with the original proposal,” says Groom.

+ +

Some researchers look at patterns in Spitzer Space Telescope images and apply machine learning techniques to look for classes of objects based on their shape. “Sometimes a star has something like a bubble shell around it (dust has been cleared out by these bubbles),” says Groom. “Researchers have adopted machine learning to look for these bubbles. They have to download huge data sets to do this. The data sets have the potential to be mined but need computing resources.”

+ +

Many other astronomy projects are also producing massive data sets. Groom’s organization is also involved in the Large Synoptic Survey Telescope (LSST) in Chile, which can take images every 30 seconds. “Astronomy is now seeing things as a movie and creating new branches of science,” says Groom. This increases the volume of data. LSST will by itself produce huge amounts of data.

+ +

The Zwicky Transient Facility (ZTF) at Palomar Observatory with a 600 megapixel camera, will go online this summer. It can watch things unfold in near-real time. The ZTF will observe 1 million events per night. Again, this means much more data.

+ +

Next steps

+ +

The archive is seeking to expand the resources it can offer to researchers, and find ways to better support the use of community computing resources such as OSG with those at the archive. “We either need to adjust funding or provide more funding,” says Groom. “We have to focus on the task at hand: We have all this data that needs computing. Astronomy is late to the big-data game and astronomers are new to large data sets. Our job is to help our community get up to speed. We are also talking to our peer archives to hold workshops about ways to write programs to grab data.”

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/bat-genomics.html b/preview-fall2024-info/bat-genomics.html new file mode 100644 index 000000000..54fac2712 --- /dev/null +++ b/preview-fall2024-info/bat-genomics.html @@ -0,0 +1,407 @@ + + + + + + +80,000 jobs, 40 billion base pairs, and 20 bats –– all in 4 weeks + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ 80,000 jobs, 40 billion base pairs, and 20 bats –– all in 4 weeks +

+

An evolutionary biologist at the AMNH used HTC services provided by the OSG to unlock a genomic basis for convergent evolution in bats.

+ +
+ Ariadna Morales +
Ariadna Morales (Credit: AMNH)
+
+ +

Ariadna Morales, a Gerstner postdoctoral fellow at the American Museum of Natural History (AMNH) from 2018 to 2020, used the fabric of services provided by the OSG +consortium to single-handedly tackle her most computationally-intensive project yet. In only 4 weeks, she ran 80,000 jobs to analyze 20 bat genomes –– a task that +would have taken over 4 months to complete, even on the AMNH’s significant local resources. Managing her computational workload through an AMNH local access point +that harnessed the distributed high-throughput computing (HTC) capacity of the Open Science Pool (OSPool), Morales was able to complete a project that typically +would require a team of researchers and far more time.

+ +

Morales is an evolutionary biologist, though she enthusiastically refers to herself as a “bat biologist”. Her research at the AMNH focused on Myotis, the largest +genus of bats whose species span all continents except Antarctica. Despite this broad geographic distribution, these different Myotis species use the same foraging +strategies to catch insects, their favorite meal.

+ +

This phenomenon of similar traits independently evolving in different species is known as convergent evolution, and was the focus of Morales’s project at the AMNH. +By analyzing the genomes of different Myotis species, she hoped to confirm whether the same genes were being used for the same purposes, despite the fact that +these species have been isolated from each other for millions of years.

+ +

“It’s very interesting to study the genetic mechanisms that led to developing the traits that help bats catch insects, like longer feet or hairy wings,” Morales +explains. “We don’t know if the same genes were used and simply turned on and off depending on environmental pressures, or if different regions of the genome +evolved to have the same function.”

+ +

The answers to her questions were buried within the bat genome, a string of over 2 billion base pairs. This number isn’t all that remarkable from genomic +perspectives, but it becomes colossal in the world of data-processing. Describing the scope of this challenge, Morales reasons, “If we put the genome together, +the letters could probably go to the moon and back. Analyzing that in a single analysis is not even possible.”

+ +

Morales was able to work with Sajesh Singh, who manages research computing at AMNH and has worked closely with OSG staff ever since the two organizations began +collaborating in 2019. Reflecting on the impacts of this collaboration, Singh remarks: “​​Since partnering with the OSG, AMNH has been able to provide computing +resources to its researchers that have allowed them to reduce the time needed to complete their computational work down from years to weeks in some instances.”

+ +

And Morales’s project was no exception. With Singh’s help, Morales split her gigantic genomic datasets into manageable pieces and moved her work to the local OSG +access point at the AMNH, where her jobs could be easily managed as a large HTC workload on the OSPool’s tremendous capacity.

+ +

A vast majority of the jobs were genome assemblies, in which Morales used the genome of a closely-related bat species to construct the Myotis genome. Each of these +genomic regions contained about 10,000 base pairs and ran for approximately one hour on a single CPU, simultaneously accessing thousands of concurrent cores across +the OSPool. One of the features offered by the local AMNH access point –– the HTCondor job scheduler –– queued and managed Morales’s jobs, allowing her to begin +annotating and analyzing the bat genomes and drastically reducing Morales’s time to results.

+ +

Ultimately, this made it possible for Morales to conduct different types of analyses that strengthened support for her findings, like analyzing the genomes of +other non-Myotis bats that have been studied by other scientists. This allowed her to extend the scope of her own research and also compare her results to other +researcher’s data.

+ +

In her time at AMNH, Morales used nearly 23,000 core hours across the OSPool, and her collection of analyzed genomes served as strong evidence that the different +foraging strategies evolved independently in Myotis. Morales had uncovered genomic evidence of convergent evolution.

+ +

Her project was unique for several reasons. Most genomic research is done in controlled laboratory settings using humans, mice, or flies as a model organism. +Researchers begin with one population, which they divide in different environments to study how specific traits change over time.

+ +

With Myotis, everything that’s traditionally manipulated by researchers in the laboratory has already occurred. Originating in Asia, these bats spread to Europe and +eventually to the Americas before they began to independently evolve different foraging strategies millions of years ago. These circumstances make Myotis the +perfect model to study convergent evolution, as Morales phrases it, through “a more realistic perspective of what’s happening in nature.”

+ +

While the model organism Morales used was unique, the tools that she integrated and the analyses she conducted are generalizable. Having previously worked with +salamanders, ants, and penguins; she reasons, “the good thing is that almost all the tools we use can be applied to other organisms, including humans.”

+ +

Morales has moved onto a new position since her time at the AMNH, but HTC is still an important aspect of her work. She’s now a postdoctoral fellow at the +LOEWE Center for Translational Biodiversity Genomics in Frankfurt, Germany, where she’s analysing the genomes of bats who could be potential reservoirs of SARS-CoV2 +as part of the Bat1K Project. Applying similar strategies that she used at the AMNH, she’s looking for genetic signatures that could be linked to higher +coronavirus resistance. Describing the impacts of HTC on this work, Morales reflects: “Being able to have the results in just a couple of weeks or in a couple of +days is amazing. With the COVID-19 project, this really matters for informing the research of other groups.”

+ +

At the AMNH, Morales belonged to a handful of researchers who were using the services provided by the OSG to transform their work. But beyond the AMNH’s historic +walls, Morales is part of a growing global community of researchers who are leveraging HTC to better understand the genomes of an array of species –– bats are just +the beginning.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/big-data-with-osdf.html b/preview-fall2024-info/big-data-with-osdf.html new file mode 100644 index 000000000..bde028132 --- /dev/null +++ b/preview-fall2024-info/big-data-with-osdf.html @@ -0,0 +1,374 @@ + + + + + + +Addressing the challenges of transferring large datasets with the OSDF + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Addressing the challenges of transferring large datasets with the OSDF +

+

Aashish Tripathee has used multiple file transfer systems and experienced challenges with each before using the Open Science Data Federation (OSDF). With the OSDF, Tripathee has already seen an improvement in data transfers.

+ +
+Map of OSDF use +
+ +

Map featuring the locations of current OSDF caches in the federation.

+ +

When transferring big datasets, researchers may face numerous obstacles. University of Michigan Physics post-doctoral research fellow and OSG David Swanson Awardee Aashish Tripathee faced these challenges too while conducting research on ripples in space-time, or continuous gravitational waves. Due to the sizing of his datasets, Tripathee needed to transfer over 30,000 gigabytes.

+ +

Over a year ago, Tripathee began using the Open Science Data Federation (OSDF) service that allows for the efficient delivery of data to compute as part of his workflows on the OSPool. The OSDF supports the sharing of datasets staged in autonomous “origins” that can move data to compute resources around the world through a global network of caches.

+ +

Before using the OSDF, Tripathee had experimented with a variety of data delivery systems. Though Tripathee saw successes with these systems, he was still encountering problems due to the type of data he was using and the volume he was transferring. Tripathee saw about 2–3% of his jobs failing because of completely halted transfers or far too slow transfers.

+ +

Upon switching to the OSDF, Tripathee saw substantial improvements and noticed a significant number of jobs now run to completion while facing far fewer data transfer issues: With the OSDF, only about 0.5% of his jobs still show transfer issues.

+ +
+ Physics post-doctoral research fellow at the University of Michigan Aashish Tripathee. +
Physics post-doctoral research fellow at
the University of Michigan Aashish Tripathee.
+
+ +

The data transfer and storage problems Tripathee encountered are not isolated — difficulties in combining datasets and computing infrastructure are endemic across science. The OSDF is powered by the Pelican Platform, which is supported by a new $7.0 million grant from the National Science Foundation (NSF) (OAC-2331480). The project is led by Brian Bockelman from the Morgridge Institute for Research, director of the Center for High Throughput Computing (CHTC) at University of Wisconsin–Madison Miron Livny, and Frank Würthwein, Executive Director of the San Diego Supercomputing Center at the University of California, San Diego. Through this project, the Pelican software will be enhanced to make the OSDF more reliable, easier to use, and more accessible to other fields of science.

+ +

The switch to the new platform was simple given its integration within the OSPool, which is closely related to the Pelican project at the CHTC. Tripathee reports the few changes he did make were well-documented in the OSPool documentation, which “tells you exactly what you change depending on, for example, what submit node you’re running from and where you want to store the data.” What it came down to for Tripathee, ultimately, was changing a few lines of code.

+ +

Tripathee is part of the Laser Interferometer Gravitational-Wave Observatory (LIGO) collaboration, which is starting to adopt the OSDF as part of its computing infrastructure, he mentions. While LIGO’s use of the OSDF is not yet widespread, Tripathee says he’s been using it for a couple of months and plans to continue with it since it’s been working well for him. As more LIGO members begin using the OSDF, Tripathee predicts that it is “definitely going to make a big difference in the jobs’ efficiency and reduce the number of wasted job hours.”

+ +

In May 2023, LIGO started its fourth observation run, meaning researchers, including Tripathee, will be able to analyze that data soon, which he looks forward to doing. Tripathee recalls that during the third observation round, the researchers experienced hiccups with file transfers, but now hopes retrieving the data from the OSDF should go more quickly and more smoothly. Tripathee anticipates fewer failed jobs and fewer wasted computing cycles. “As more and more universities start attaching servers, it’s going to make it even faster and faster. I’m very excited about its prospects going into the future,” Tripathee says.

+ +

Sharing the results of the work in February 2024, Tripathree’s research from the third observation run was published in the “Physical Review D.” Searching for gravitational waves, Tripathree focused on isolated neutron stars, a potential source for these waves. Working alongside Dr. Keith Riles of the University of Michigan, the duo determined the best upper limits for circular polarization — bringing researchers closer to detecting a continuous wave signal.

+ +

What he learned from the other services he used compared to the OSDF was that “for OSDF, people from all over the world are using it — it’s distributed everywhere, so it serves as a more general-purpose storage,” enabling his jobs to access from close by hardware instead of potentially retrieving it from across the world.

+ +

When working with big datasets that require a great amount of computing power, researchers — not so different from what Tripathee — will face obstacles that they need to jump over or duck under. Although going through may be the quickest and most direct solution to maneuver around the obstacle, this path often takes more resources. The OSDF wants to be that invaluable resource researchers will reach for when they need to push their way through that obstacle.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/branding.html b/preview-fall2024-info/branding.html new file mode 100644 index 000000000..eb7d81989 --- /dev/null +++ b/preview-fall2024-info/branding.html @@ -0,0 +1,362 @@ + + + + + + +CHTC Branding + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ CHTC Branding +

+

Colors

+ +

CHTC Red: #B61F24

+ +

CHTC Black: #000000

+ +

Logos

+ + + +

CHTC Logo W/Text +CHTC Logo

+ +
+

White CHTC Logo W/Text +White CHTC Logo

+
+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/california-megafires.html b/preview-fall2024-info/california-megafires.html new file mode 100644 index 000000000..59322fc63 --- /dev/null +++ b/preview-fall2024-info/california-megafires.html @@ -0,0 +1,387 @@ + + + + + + +Ecologists utilizing HTC to examine the effects of megafires on wildlife + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Ecologists utilizing HTC to examine the effects of megafires on wildlife +

+

Studying the impact of two high-fire years in California on over 600 species, ecologists enlist help from CHTC.

+ +

The western United States is seeing an unprecedented increase in wildfires. Researcher +Jessalyn Ayars and her +team examined how severe wildfires impact the habitats of over 600 species across two megafire +years in California. Ayars, a former Post-Baccalaureate Research Fellow at the Rocky Mountain +Research Station located in Colorado, investigated this impact on wildlife with Dr. Gavin Jones +and Dr. Anu Kramer of the University of Wisconsin—Madison. +Their research was enabled by capacity provided by the Center for High Throughput Computing (CHTC) +and published in the Proceedings of the National Academy of Sciences.

+ +

“Megafires are extremely large, high-severity fires. They are increasing worldwide, especially in the western U.S.” +Ayars explained. “In 2020 and 2021 California experienced a severe fire season.” California was also favorable for +the team to study due to its extensive maps showing the habitat suitability for all vertebrate species. Typically, +ecologists will study the effects of one species or a small number of species after wildfire — Ayars’ research is +novel in that it surveys a wide range of species.

+ +

Surveying a wide range of species across a state led to a bottleneck of data to analyze: “Each species was a gigantic +math problem across the state of California. We were dealing with 608 vertebrae species, each with a suitability map +the size of California at a 30-meter resolution. To get our results we needed to overlay the fire maps [with] the +habitat suitability maps to see how much area burned, and in what severity.”

+ +
+ Photo of Jessalyn Ayars +
Jessalyn Ayars
+
+ +

Very quickly, Ayars knew that doing this hundreds of times by hand was impractical. “That’s just so much data — +it wasn’t possible to do it all on a desktop computer,” she said. Ayars learned about the CHTC from her advisor +at the Rocky Mountain Research Station, Dr. Gavin Jones, who received his Ph.D. from UW-Madison and had connections +with CHTC from earlier research.

+ +

The CHTC Research Computing Facilitators (RCF) helped Ayars and her team break down large chunks of data into smaller +jobs that could scale-out to run simultaneously using capacity provided by the NSF funded OSPool. +“The folks at CHTC were super helpful in setting me up for all the processing, breaking down that giant problem +into one species and one fire severity at a time so they could run in parallel” across the more than 50 sites that +contribute capacity to the OSPool, she said.

+ +

“I would recommend anyone interested in using HTC [high throughput computing] or just curious about whether or +not it would be helpful for their work to reach out to CHTC,” she said. “It’s a great resource and they’re great +at teaching you.” Ayars gave a special shout-out to Rachel Lombardi and Christina Koch, two RCFs at CHTC. Research computing facilitators +help new or potential users of CHTC services understand what computing resources are available on and off the UW-Madison +campus and provide support for adapting workloads to harness HTC.

+ +

Ayars hopes that her team’s work will be a “call to action” for other wildlife ecologists studying the impact of wildfires +on species. “These conditions are so different from what wildlife evolved in, and from what researchers have studied wildfire +in. It’s hard to say how wildlife will respond to these wildfires going forward.”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/campus-onboarding.html b/preview-fall2024-info/campus-onboarding.html new file mode 100644 index 000000000..8b728e4aa --- /dev/null +++ b/preview-fall2024-info/campus-onboarding.html @@ -0,0 +1,431 @@ + + + + + + +“Becoming part of something bigger” motivates campus contributions to the OSPool + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ “Becoming part of something bigger” motivates campus contributions to the OSPool +

+

A spotlight on two newer contributors to the OSPool and the onboarding process.

+ +

A campus’ motivation to contribute computing capacity to the Open Science Pool (OSPool), +an internationally recognized resource supporting scientific research, can be distilled down to the desire to “become part of something bigger,” +says OSG Campus Coordinator Tim Cartwright. The “something bigger” refers to national cyberinfrastructure. +By sharing idle, unused capacity with institutions nationwide, contributors enhance the OSPool and contribute to the science executed by researchers +utilizing this pool.

+ +
+ Tim Cartwright, OSG Campus Coordinator. +
Tim Cartwright, OSG Campus Coordinator
+
+ +

Approximately 80% of OSPool member schools donate capacity to the OSPool after receiving a Campus Cyberinfrastructure (CC*) grant from the +National Science Foundation (NSF), which requires dedicating 20% of +computing capacity to a larger entity like the OSPool. Campuses choose the OSPool to provide this capacity, in part, because it is a readily implemented +approach to meet this requirement without impeding research happening on-campus. Leading the onboarding efforts, Cartwright and OSG staff have developed +a straightforward, fairly easy-to-implement approach for campuses who wish to contribute capacity. Cartwright describes the growth of the OSPool as “an +incredible boom” since 2020. In the past year, about 70 institutions have contributed to the OSPool.

+ +

A closer look at the journey of two new OSPool members, Montana State University and The University of Michigan-Ann Arbor +illustrates the motivations and experiences of campuses when integrating some of their capacity into the OSPool.

+ +

Montana State University

+ +

Coltran Hophan-Nichols, Director of Systems and Research Computing at Montana State, approached the OSG Consortium before +applying for a Campus Cyberinfrastructure (CC*) grant. Familiar with the OSPool, he knew it would be a logical choice to fulfill the 20% requirement.

+ +

Along with growing student interest in HPC and HTC, Montana State needed to provide new computational resources for fields such as quantum science, artificial +intelligence and precision agriculture that were expanding rapidly. Hophan-Nichols knew that the OSPool could augment these much-needed resources for researchers +while allowing Montana State to give back capacity that would otherwise sit idle. “We pursued the OSPool because it provides national-level access while being flexible +[with allocations],” Hophan-Nichols said. “We’re able to contribute significant capacity without impacting what researchers here can do.”

+ +

“The integration itself is a relatively simple process,” Cartwright said, consisting of two meetings with the campus staff and Cartwright, plus OSG Operations team +members. The first meeting is a “kickoff,” where Cartwright and the campus staff talk through the technical aspects of integration. Much of the work occurs between +the two meetings, with campus staff setting up access to their cluster and OSG staff preparing connection and service configuration. The second meeting is the actual +integration to the OSPool, which involves setting up new OSG services to connect the site and manually verifying correct operations.

+ +

During the integration meeting, the OSG team verifies that access to the site works as expected, that manual tests succeed and that the end-to-end automated +processes function. To alleviate safety concerns, Cartwright explains that connections into the campus system are limited to one common service (SSH) and even +then, only to one computer within the campus. All other networks are established from within the campus to external systems. “We have tried to make it as +minimally intrusive as we possibly can to work with individual campuses and what their security teams are comfortable with,” he said.

+ +

Regardless of how much is done to prepare, some hiccups occur. Montana State “had to make minor tweaks to configuration changes, which ultimately sped up transfer +for OSPool and local transfers,” Hophan-Nichols said. The OSG Operations team and Cartwright also try to identify common issues and troubleshoot them before the integration.

+ +

After making sure that connections were working and jobs were starting to run, Montana State kept its contributed capacity small to ensure everything was +working properly. Since then, Hophan-Nichols has worked with Cartwright to scale up availability. When they first joined, they were contributing fewer +than 1,000 jobs +per day. Now, they are contributing up to 181,000 jobs per day and over 2.53 million jobs in total from January through March.

+ +

“It’s been mutually beneficial,” Hophan-Nichols said. “There is next to no impact on the availability of capacity for local researchers and we still +have a significant chunk of resources we’re able to contribute to the OSPool.”

+ +

The Michigan HORUS Project

+ +

The HORUS Project, a joint effort among the University of Michigan-Ann Arbor (U-M), Merit Networks, +Michigan State University and Wayne State University (WSU), integrated some of their computing capacity into +the OSPool in January 2024. The HORUS regional compute project, building upon the previous OSiRIS project, exists to grow statewide +computing and storage capacity, as well as contribute to open capacity. Shawn McKee, a Research Scientist at U-M, +and his colleagues at Merit and WSU secured a CC* grant to create HORUS and begin contributing capacity to the OSPool. “We had been planning to join for a while, +but we managed to get everything operational earlier this year,” he said.

+ +
+ HORUS logo. +
HORUS logo, inspired by the Egyptian god Horus. Created by Michelle David of Michigan State University, courtesy of HORUS website.
+
+ +

HORUS project team members faced unique technical challenges trying to combine their existing statewide system with the broader OSPool. Between the initial meeting +and the onboarding, McKee and his colleagues established a secure transfer node for the OSG Consortium to use. Similar to Montana State, the HORUS project engineers +have a strong background in research computing which made the integration straightforward. In the end, connecting via SSH jump hosts and routing jobs to all three +campuses only took 40 minutes. “Pretty quickly, ‘Hello World!’ worked right away and users could start using it,” McKee recalled.

+ +

McKee also values the OSPool for its ability to smoothly fulfill the 20% requirement for their CC* grant. Beyond this, the OSPool offers more capacity to researchers +and accesses capacity from the HORUS project that would otherwise sit idle. “It was great to have the OSG Consortium come in and start utilizing large memory and +compute nodes that were only lightly loaded,” McKee said. “There was significant idle time that now the OSPool can use.”

+ +

Across the HORUS project, McKee identified at least four researchers interested in using idle resources in the OSPool and is excited to keep growing campus involvement. +At U-M, PI Keith Riles uses the OSPool for work in gravitational physics. Through the OSPool, Riles has +run over 200,000 jobs across 52 facilities. At WSU, PI Chun Shen uses the OSPool for work in nuclear physics, +utilizing its capacity to run over 13 million jobs across 41 facilities.

+ +

Once campuses are onboarded, OSG staff continue to collaborate with campus personnel. Beginning in February, they introduced OSG Campus Meet-Ups, a weekly +campus-focused video conference where campus staff can talk and learn from each other or OSG staff. Throughput Computing +and OSG School, two events in the summer, also offer in-person opportunities for campus staff to visit OSG staff and other campuses on the University of Wisconsin–Madison campus.

+ +

Prospective Campuses

+ +

The NSF CC* program provides unique access to resources and funding to improve campus research. CC* applicants can receive a letter of collaboration from one +of the PATh PIs for submission. For more information, +visit the PATh website instructions.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/cgi-chtc/get-started.php b/preview-fall2024-info/cgi-chtc/get-started.php new file mode 120000 index 000000000..d6222ae2b --- /dev/null +++ b/preview-fall2024-info/cgi-chtc/get-started.php @@ -0,0 +1 @@ +get-started_captcha.php \ No newline at end of file diff --git a/preview-fall2024-info/cgi-chtc/get-started_captcha.php b/preview-fall2024-info/cgi-chtc/get-started_captcha.php new file mode 100755 index 000000000..51bd92938 --- /dev/null +++ b/preview-fall2024-info/cgi-chtc/get-started_captcha.php @@ -0,0 +1,84 @@ + $recaptcha_secret_key, + "response" => $recaptcha_response, + ); + + $curl = curl_init($recaptcha_url); + curl_setopt($curl, CURLOPT_POST, true); // send POST + curl_setopt($curl, CURLOPT_POSTFIELDS, $recaptcha_post_data); + curl_setopt($curl, CURLOPT_RETURNTRANSFER, true); // return JSON + $json_response = curl_exec($curl); + curl_close($curl); + + $recaptcha_data = json_decode(trim($json_response), True); + $recaptcha_success = $recaptcha_data["success"]; + + $messg = ""; + foreach ($_POST as $key=>$value) { + //printf("[$key] => $value\n"); + $messg = $messg . "[$key] => $value\n"; + if($key == "NetID") { + $email = ($value . "@wisc.edu"); + //printf("set email to $email\n"); + } + if($key == "FullName" && $value) { + $fullname = $value; + //printf("set fullname to $fullname\n"); + } + if($key == "PrimaryEmail" && $value) { + $email = $value; + //printf("set email to $email\n"); + } + } + + $safe_email = escapeshellarg($email); + + if($recaptcha_success === True) { // User passed captcha + //$fd = popen("mail -s 'CHTC Engagement Request' chtc@cs.wisc.edu -- -f$safe_email", "w"); + //$fd = popen("mail -s 'CHTC Engagement Request' chtc@cs.wisc.edu", "w"); + //$fd = popen("mailx -s 'CHTC Engagement Request' -r $safe_email chtc@cs.wisc.edu", "w"); + $fd = popen("mailx -s 'CHTC Engagement Request' -a 'Reply-To: $safe_email' chtc@cs.wisc.edu", "w"); + fputs($fd, $messg); + $e = pclose($fd); + + //printf("$e: email was sent from $safe_email!\n"); + + // Send them to the the thanks page + header( "Content-Type: text/html\n" ); + header( "Location: https://chtc.cs.wisc.edu/uw-research-computing/thanks\n\n" ); + + } else { // User failed captcha + // Send them back to the form page + header( "Content-Type: text/html\n" ); + header( "Location: https://chtc.cs.wisc.edu/uw-research-computing/form\n\n" ); + + } + +} else { // Some bot didn't even set the captcha field + // Send them back to the form page, I guess? + header( "Content-Type: text/html\n" ); + header( "Location: https://chtc.cs.wisc.edu/uw-research-computing/form\n\n" ); + +} + +?> + diff --git a/preview-fall2024-info/cgi-chtc/recaptchalib.php b/preview-fall2024-info/cgi-chtc/recaptchalib.php new file mode 100755 index 000000000..32c4f4d75 --- /dev/null +++ b/preview-fall2024-info/cgi-chtc/recaptchalib.php @@ -0,0 +1,277 @@ + $value ) + $req .= $key . '=' . urlencode( stripslashes($value) ) . '&'; + + // Cut the last '&' + $req=substr($req,0,strlen($req)-1); + return $req; +} + + + +/** + * Submits an HTTP POST to a reCAPTCHA server + * @param string $host + * @param string $path + * @param array $data + * @param int port + * @return array response + */ +function _recaptcha_http_post($host, $path, $data, $port = 80) { + + $req = _recaptcha_qsencode ($data); + + $http_request = "POST $path HTTP/1.0\r\n"; + $http_request .= "Host: $host\r\n"; + $http_request .= "Content-Type: application/x-www-form-urlencoded;\r\n"; + $http_request .= "Content-Length: " . strlen($req) . "\r\n"; + $http_request .= "User-Agent: reCAPTCHA/PHP\r\n"; + $http_request .= "\r\n"; + $http_request .= $req; + + $response = ''; + if( false == ( $fs = @fsockopen($host, $port, $errno, $errstr, 10) ) ) { + die ('Could not open socket'); + } + + fwrite($fs, $http_request); + + while ( !feof($fs) ) + $response .= fgets($fs, 1160); // One TCP-IP packet + fclose($fs); + $response = explode("\r\n\r\n", $response, 2); + + return $response; +} + + + +/** + * Gets the challenge HTML (javascript and non-javascript version). + * This is called from the browser, and the resulting reCAPTCHA HTML widget + * is embedded within the HTML form it was called from. + * @param string $pubkey A public key for reCAPTCHA + * @param string $error The error given by reCAPTCHA (optional, default is null) + * @param boolean $use_ssl Should the request be made over ssl? (optional, default is false) + + * @return string - The HTML to be embedded in the user's form. + */ +function recaptcha_get_html ($pubkey, $error = null, $use_ssl = false) +{ + if ($pubkey == null || $pubkey == '') { + die ("To use reCAPTCHA you must get an API key from https://www.google.com/recaptcha/admin/create"); + } + + if ($use_ssl) { + $server = RECAPTCHA_API_SECURE_SERVER; + } else { + $server = RECAPTCHA_API_SERVER; + } + + $errorpart = ""; + if ($error) { + $errorpart = "&error=" . $error; + } + return ' + + '; +} + + + + +/** + * A ReCaptchaResponse is returned from recaptcha_check_answer() + */ +class ReCaptchaResponse { + var $is_valid; + var $error; +} + + +/** + * Calls an HTTP POST function to verify if the user's guess was correct + * @param string $privkey + * @param string $remoteip + * @param string $challenge + * @param string $response + * @param array $extra_params an array of extra variables to post to the server + * @return ReCaptchaResponse + */ +function recaptcha_check_answer ($privkey, $remoteip, $challenge, $response, $extra_params = array()) +{ + if ($privkey == null || $privkey == '') { + die ("To use reCAPTCHA you must get an API key from https://www.google.com/recaptcha/admin/create"); + } + + if ($remoteip == null || $remoteip == '') { + die ("For security reasons, you must pass the remote ip to reCAPTCHA"); + } + + + + //discard spam submissions + if ($challenge == null || strlen($challenge) == 0 || $response == null || strlen($response) == 0) { + $recaptcha_response = new ReCaptchaResponse(); + $recaptcha_response->is_valid = false; + $recaptcha_response->error = 'incorrect-captcha-sol'; + return $recaptcha_response; + } + + $response = _recaptcha_http_post (RECAPTCHA_VERIFY_SERVER, "/recaptcha/api/verify", + array ( + 'privatekey' => $privkey, + 'remoteip' => $remoteip, + 'challenge' => $challenge, + 'response' => $response + ) + $extra_params + ); + + $answers = explode ("\n", $response [1]); + $recaptcha_response = new ReCaptchaResponse(); + + if (trim ($answers [0]) == 'true') { + $recaptcha_response->is_valid = true; + } + else { + $recaptcha_response->is_valid = false; + $recaptcha_response->error = $answers [1]; + } + return $recaptcha_response; + +} + +/** + * gets a URL where the user can sign up for reCAPTCHA. If your application + * has a configuration page where you enter a key, you should provide a link + * using this function. + * @param string $domain The domain where the page is hosted + * @param string $appname The name of your application + */ +function recaptcha_get_signup_url ($domain = null, $appname = null) { + return "https://www.google.com/recaptcha/admin/create?" . _recaptcha_qsencode (array ('domains' => $domain, 'app' => $appname)); +} + +function _recaptcha_aes_pad($val) { + $block_size = 16; + $numpad = $block_size - (strlen ($val) % $block_size); + return str_pad($val, strlen ($val) + $numpad, chr($numpad)); +} + +/* Mailhide related code */ + +function _recaptcha_aes_encrypt($val,$ky) { + if (! function_exists ("mcrypt_encrypt")) { + die ("To use reCAPTCHA Mailhide, you need to have the mcrypt php module installed."); + } + $mode=MCRYPT_MODE_CBC; + $enc=MCRYPT_RIJNDAEL_128; + $val=_recaptcha_aes_pad($val); + return mcrypt_encrypt($enc, $ky, $val, $mode, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); +} + + +function _recaptcha_mailhide_urlbase64 ($x) { + return strtr(base64_encode ($x), '+/', '-_'); +} + +/* gets the reCAPTCHA Mailhide url for a given email, public key and private key */ +function recaptcha_mailhide_url($pubkey, $privkey, $email) { + if ($pubkey == '' || $pubkey == null || $privkey == "" || $privkey == null) { + die ("To use reCAPTCHA Mailhide, you have to sign up for a public and private key, " . + "you can do so at http://www.google.com/recaptcha/mailhide/apikey"); + } + + + $ky = pack('H*', $privkey); + $cryptmail = _recaptcha_aes_encrypt ($email, $ky); + + return "http://www.google.com/recaptcha/mailhide/d?k=" . $pubkey . "&c=" . _recaptcha_mailhide_urlbase64 ($cryptmail); +} + +/** + * gets the parts of the email to expose to the user. + * eg, given johndoe@example,com return ["john", "example.com"]. + * the email is then displayed as john...@example.com + */ +function _recaptcha_mailhide_email_parts ($email) { + $arr = preg_split("/@/", $email ); + + if (strlen ($arr[0]) <= 4) { + $arr[0] = substr ($arr[0], 0, 1); + } else if (strlen ($arr[0]) <= 6) { + $arr[0] = substr ($arr[0], 0, 3); + } else { + $arr[0] = substr ($arr[0], 0, 4); + } + return $arr; +} + +/** + * Gets html to display an email address given a public an private key. + * to get a key, go to: + * + * http://www.google.com/recaptcha/mailhide/apikey + */ +function recaptcha_mailhide_html($pubkey, $privkey, $email) { + $emailparts = _recaptcha_mailhide_email_parts ($email); + $url = recaptcha_mailhide_url ($pubkey, $privkey, $email); + + return htmlentities($emailparts[0]) . "...@" . htmlentities ($emailparts [1]); + +} + + +?> diff --git a/preview-fall2024-info/chtc-demo.html b/preview-fall2024-info/chtc-demo.html new file mode 100644 index 000000000..1bf4cf3ba --- /dev/null +++ b/preview-fall2024-info/chtc-demo.html @@ -0,0 +1,377 @@ + + + + + + +CHTC Leads High Throughput Computing Demonstrations + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ CHTC Leads High Throughput Computing Demonstrations +

+

UW-Madison Assistant Professor Kaiping Chen is taking her life sciences communication course (LSC660) to the next level by incorporating high throughput computing (HTC) into her class.

+ +

Data Science for understanding science communication involves learning to use statistical methods (e.g., chi-square, analysis of variance, correlation and regression analysis, nonparametric tests) and computational methods (e.g., automated text analysis, computer vision) – all of which sometimes requires complex, time-consuming computing that surpasses the capacity of the everyday computer.

+ +
+ Kaiping Chen +
Kaiping Chen, Assistant
Professor of Life Science
Communication.
+
+ +

To meet this computing challenge, Chen enlisted the help of CHTC Lead Research Computing Facilitator Christina Koch in November 2022 for a demonstration for her class. Chen wanted students to:

+
    +
  • Acquire knowledge about the basic approaches for large scale computing
  • +
  • Understand the different scenarios regarding why they may need to use high throughput computing in research
  • +
  • Be able to distinguish between independent and sequential tasks
  • +
  • Be able to submit script jobs onto the campus computer cluster of CHTC
  • +
  • Obtain a basic understanding of the parallel computing implementation in R
  • +
+ +

Koch achieved these goals by presenting the uses of HTC for large scale computing and leading a hands-on demonstration with Chen to teach students how to submit and run R programming scripts for topic modeling of social media data using HTC.

+ +

This learning, Chen noted, served as a tool to aid students to convert theoretical, class-based knowledge into more practical abilities, including learning how to approach computational tasks that could be useful in future work. Two examples of such complex computational tasks include structure topic models (STMs) and regression models. STM uses unsupervised machine learning to identify keywords and major themes across large corpus that could be interpreted into human-readable formats for data analysis. It is also useful in comparing social media influencer versus non-influencer perspectives on science issues through STM.

+ +

The majority of the students in the class, while new to CHTC resources, found the class to be a good introduction to HTC. Ph.D student Ashley Cate from LSC was a prime example. +“I am still an extreme novice when it comes to understanding all the options CHTC has to offer. However, one thing that Christina Koch made very clear is that you’re not alone in your endeavor of utilizing HTC to meet your research needs, and I feel very confident that the professionals would be able to work me through how CHTC could help me.” Master’s student of Life Sciences Communication Jocelyn Cao reported that “I do think I will be utilizing CHTC in my future work because I am interested in doing work with social media.”

+ +

Other campus groups have also reached out to Koch to learn about CHTC services for their research. Lindley’s research group, a group of undergraduate students, M.S., Ph.D and postdocs candidates involved in nuclear reactor physics, advanced reactor design and integrated energy systems, wanted to understand how to harness the power of HPC/HTC in their research.

+ +

Ben Lindley, UW Madison Engineering Physics assistant professor has utilized CHTC in his previous work to build software. With the assistance of post-doc Una Baker, Lindley sought the help of CHTC… “One of the beauties of the high throughput computing resources is that we can analyze dozens or hundreds of cases in parallel,” Lindley said. These cases represent scenarios where certain design features of nuclear reactors are modified and observed for change. “Without HTC, the scope of research could be very limited. Computers could crash and tasks could take too long to complete.”

+ +
+ Ben Lindley +
Ben Lindley, Assistant Professor
of Engineering Physics
+
+ +

In-person demonstrations with classrooms and research groups are always available at CHTC to UW-Madison researchers looking to expand computing beyond local resources. Koch noted that “we are always happy to meet with course instructors who are interested in including large scale computing in their courses, to share different ways we can support our goals.”

+ +

Contact CHTC here.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/chtc-workshop.html b/preview-fall2024-info/chtc-workshop.html new file mode 100644 index 000000000..2e68c5fb7 --- /dev/null +++ b/preview-fall2024-info/chtc-workshop.html @@ -0,0 +1,350 @@ + + + + + + +CHTC Launches First Introductory Workshop on HTC and HPC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ CHTC Launches First Introductory Workshop on HTC and HPC +

+

On November 8, CHTC hosted a hands-on workshop for researchers new to high throughput or high performance computing.

+ +

The Center for High Throughput Computing (CHTC) facilitation team spent the morning of November 8 with over 50 students, researchers, and faculty interested in learning high performance computing (HPC) and high throughput computing (HTC). Lead Research Computing Facilitator, Christina Koch, began by asking everyone in the room who had run into problems with computing on a single computer to raise their hand. Over half the room did so. Participants reported bottlenecks such as month-long run times or loops with millions of iterations. The facilitation team then delved into why these problems were happening and how CHTC could help.

+ +

The workshop focused on identifying and practicing the steps needed to use a large-scale computing system at CHTC. Students were provided with detailed workflows and tools to improve their usage of high throughput computing or high performance computing. Hands-on examples were also incorporated, where attendees did the work themselves. Participants then got to “meet a server” and see what happens behind the scenes when they use CHTC resources.

+ +

Given the attendance and high level of interest in this workshop, it certainly will not be the last. The CHTC facilitation team is planning to host additional large-scale computing workshops in the future for those that missed this opportunity.

+ +

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/collaborations-epic-eic.html b/preview-fall2024-info/collaborations-epic-eic.html new file mode 100644 index 000000000..c298913e0 --- /dev/null +++ b/preview-fall2024-info/collaborations-epic-eic.html @@ -0,0 +1,364 @@ + + + + + + +Collaborations Between Two National Laboratories and the OSG Consortium Propel Nuclear and High-Energy Physics Forward + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Collaborations Between Two National Laboratories and the OSG Consortium Propel Nuclear and High-Energy Physics Forward +

+
+ Electron Beam Ion Source / Brookhaven National Laboratory +
Electron Beam Ion Source / Brookhaven National Laboratory
+
+ +

Seeking to unlock the secrets of the “glue” binding visible matter in the universe, the ePIC Collaboration stands at the forefront of innovation. Led by a collective of hundreds of scientists and engineers, the Electron-Proton/Ion Collider (ePIC) Collaboration was formed to design, build, and operate the first experiment at the Electron-Ion Collider (EIC). This experiment aims to construct the world’s most advanced particle detector, capable of analyzing collisions between electrons and protons or other atomic nuclei.

+ +

“They are building a detector that can slide seamlessly into EIC’s infrastructure. When these collisions occur, they will capture a wealth of physics data that advances our understanding of high-energy nuclear interactions. The outcomes of these collisions allow us to explore new frontiers in physics” reports Pascal Paschos, the OSG Consortium Facilitator who supports the work of ePIC. Paschos oversees the collaborations’ access to computational capacity necessary to conduct these experiments.

+ +

The EIC, developed by the collaboration between Brookhaven National Lab (BNL) and Jefferson Lab (JLab), will be the world’s first electron-nucleus collider of its kind. It features two intersecting accelerators—one generating a powerful polarized electron beam and the other a high-energy beam of polarized protons or heavier atomic nuclei. However, ePIC faces the challenge of validating the detector design for integration with the EIC. Paschos explains, “To validate the detector design, throughput capacity is required to model and experimentally simulate the detector. The goal is to evaluate how the detector would respond to signals generated from theoretical collisions included in their parameter files.”

+ +

“This project isn’t new,” adds Dr. Wouter Deconinck, associate professor at the University of Manitoba and Deputy Software and Computing Coordinator (SCC) for Operations at ePIC. “Concepts for an electron-ion collider have been in development for 20 or 30 years, aiming to include a polarized ion beam.” Drawing on his experience at the HERA electron-proton collider and post-graduate work in electron polarimetry, Deconinck, alongside BNL postdoctoral fellow Sakib Rahman, explains, “Since 2008, we’ve been evaluating the computing and software stacks needed for an electron-ion collider. Planning for operation into the 2030s necessitates future-proof and modular development to incorporate emerging technologies.”

+ +

Achieving a unified system is one important goal for the project. ePIC requires an infrastructure capable of simulating and gathering essential data for their design. One option was the OSPool, a shared computing capacity available to researchers affiliated with US academic institutions. Deconinck and Rahman considered other computing infrastructures for these simulation jobs. Deconinck notes, “We evaluated the OSPool and compared it with the EIC pool of resources available at JLab, BNL and the Digital Research Alliance of Canada (the Alliance.) Both pools were capable of handling about 2000 jobs at a time.” Ultimately selecting the OSPool, Deconinck notes,“We saw it as a way to convince JLab to allocate jobs to the OSPool indirectly, transferring jobs to the Alliance, and continuing to submit to the OSPool. The primary advantage was achieving a unified interface across all these sites.”

+ +

Rahman also emphasizes OSPool’s role in the integrated system, stating, “Previously, someone had to create accounts on every site to submit jobs. Now, running production campaigns, others need an account on one PATh operated Access Point, with their work credited to ePIC. This detachment from individual operators and alignment with the project as a whole significantly reduces onboarding time.”

+ +

Scalability is also an essential requirement of the project. “Typically, we consider the time to simulate one event, estimated at 10-15 seconds per event using standard computing software,” Deconinck explains. “Based on this and statistical projections for the number of events to analyze, we calculate our total computing needs. For each reaction channel, we typically examine 5 to 10-20 million events to determine if a detector configuration yields desired results.”

+ +

Deconinck also reports that “One aspect I found immensely valuable was the initial interaction with the OSPool. Upon setting up an account, I received personalized attention from the OSG Research Computing Facilitation team, with someone guiding me through the process and inquiring about my approach. What truly made a significant impact were the office hours provided. There were weeks where I attended these sessions twice, and without fail, there was always someone available to offer assistance and feedback. This support was instrumental in getting us started.”

+ +

The OSPool enables Deconinck, Rahman, and teams to collaborate seamlessly worldwide in partnership with 173 institutions. “The engagement of ePIC with the OSG fabric of services brings two major laboratories into the fold, helping coordinate between them for the delivery of science,” adds Paschos. “The collaboration itself extends beyond individual PIs aiming to advance scientific outcomes; it encompasses engagement at all levels, from technical teams to geographically diverse points of interest.”

+ +

Collaboration is the cornerstone of success in multi-institutional projects like these. The synergy between the OSPool, two major national laboratories, and international teams exemplifies the power of collective effort in propelling physics science forward. Overcoming challenges and achieving groundbreaking discoveries requires a village of dedicated scientists, engineers, and collaborators from diverse backgrounds. Much like the “glue” binding visible matter, teamwork unites the ePIC Collaboration, magnifying their impact beyond the lab. Their journey powered by the OSPool capacity underscores the transformative power of cooperation and sharing in unraveling the mysteries of our universe.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/david-swanson-awardees-2023.html b/preview-fall2024-info/david-swanson-awardees-2023.html new file mode 100644 index 000000000..a2d85e539 --- /dev/null +++ b/preview-fall2024-info/david-swanson-awardees-2023.html @@ -0,0 +1,440 @@ + + + + + + +OSG David Swanson Awardees Honored at HTC23 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ OSG David Swanson Awardees Honored at HTC23 +

+

Jimena González Lozano and Aashish Tripathee are 2023’s recipients of the award for their +research advancements with strategic use of high-throughput computing (HTC).

+ +

+ +

OSG leadership created the OSG David Swanson Award +in memoriam of Swanson, who championed throughout his life for both the success of his students +and the expansion of OSG and research computing. David Swanson, who passed away unexpectedly in 2019, was +a computer science and engineering research professor at the University of Nebraska-Lincoln. +The award reflects Swanson’s and OSG School’s emphasis +on helping people develop their skills in technology and advancing science with large-scale +computing, OSG research facilitation lead Christina Koch says.

+ +

Researchers — like Jimena González Lozano +and Aashish Tripathee who sought +the OSG School’s high-throughput computing (HTC) resources to solve complex computational challenges, +and in turn, were able to evolve their research projects — have been honored with the award since +its establishment in 2019. González is a Department of Physics observational cosmology Ph.D. student at +the University of Wisconsin-Madison, and Tripathee is a University of Michigan Physics post-doctoral +research fellow.

+ +

Awardees are provided the opportunity to share their research at the OSG All-Hands Meeting, which is +part of the annual 2023 Throughput Computing (HTC23) conference, +held in Madison, Wisconsin. “To have it in the context of recognizing a wonderful person like David +is really meaningful. It’s like ‘Oh yes, this is why we’re doing what we’re doing,’ and it’s rewarding,” +Koch reflects.

+ +

As a David Swanson awardee, it’s an honor to be an example of how HTC and the OSG School transformed her +research, González elaborates. “I couldn’t even explore new ideas [because it could take weeks to run one simulation], +and it was around that time that I was reading all my emails carefully, and I saw the OSG User +School [application] announcement,” González remembers. “They did a really good job at describing +what you would learn and what high-throughput computing is. From that description, I thought that it +was perfect for me. I applied, and then during the summer of 2021, I learned how to implement it, and +it was very quick. After the first day, I already knew how to submit a job.”

+ +

Gonàzlez’s research on strong gravitational lenses in the dark energy +survey implements HTC and machine learning. Strong gravitational lenses can image stars from which +González can extract the position of the source and the magnification between the images. From the +images, González creates thousands of simulations composed of millions of images while constraining +the quality of the images. Because of the volume of simulations she needs to train, González could +be left waiting for up to weeks using machine learning — and the tighter constraints, the greater +the waiting time. This put its own constraints on which properties she could experiment with. Some +ideas, González says, were impossible to do because she couldn’t do them quickly. Implementing HTC +shortened the waiting time from days to hours.

+ +

The OSG school also impacted other areas of González’s research, including training the machine and +performing a complete search — each was reduced from long wait times spanning days to years to much +more manageable wait times of as little as three hours.

+ +

Tripathee uses HTC for solving a big data challenge too. For one +project on continuous gravitational waves, the data he collected spans a year and the entire sky, +as well as the polarization over 24 times, resulting in 80 quadrillion templates. The solution, +Tripathee said at HTC23, is looking at 500 billion templates per job. The answer for computing +templates at a magnitude of a quadrillion is to use HTC, which helps with efficiency when running +the numbers and makes the project possible. Without HTC, Tripathee’s jobs would’ve taken on average +more than 10 hours for some or more than 24 hours for others. Through the OSG, Tripathee uses 22 +million core hours, 1.4 million hours per month, and 47,000 hours per day.

+ +

Tripathee’s mentor and OSG Deputy Executive Director Tim Cartwright encouraged Tripathee to self-nominate +for the award. Upon learning he was chosen to receive the award, “It felt like a nice validation +and a recognition of having used the [OSG] to perform research,” Tripathee says about receiving the +award. Attending HTC23 event in Madison to receive the award was rewarding. “I also got to meet a +lot of people… like the OSG faculty, Tim Cartwright in particular, and Christina [Koch]. There was +a really nice opportunity and an honor to come to Madison, attend the event, and receive the award +but also meet [David Swanson’s widow, Ronda].”

+ +

At HTC23 Ronda Swanson said she hopes David’s legacy will live on in science. Ronda Swanson is +OSG’s self-described “biggest fangirl” and has continued her relationship with the OSG as an +advocate for HTC since David’s death, Cartwright says.

+ +

Annually, a committee chooses one or more former students from the OSG School according to the +student’s advancements and research achievements with distributed high-throughput computing (dHTC). +The OSG School teaches students how to harness HTC resources for their data collection and research +needs. Koch, who served on the selection committee for the award, explains the committee looks for +students who have taken what they learned at the OSG School and achieved great things with it, like +tackling a research problem or writing workflows from scratch after coming in with little to no +experience. Cartwright says committee members also look for applicants who can give back to the +community. Both González and Tripathee embody what the selection committee looks for, Koch explains.

+ +

“What Jimena learned [from the OSG School] really helped her solve a problem that she wouldn’t have +been able to solve before. Aashish is tackling both a niche field of research with these resources +and also has been testing new features for us or letting us know when things aren’t working and has +had this ongoing relationship with us.”

+ +

González will continue to use HTC to model the mass distribution of each galaxy that produces a +gravitational lens. People previously performed the computing for these models by hand, but as +the data accumulates, it becomes less feasible for humans to do this computing. To remedy this, +González will use machine learning to do the modeling because it requires a great deal of computational power.

+ +

Tripathee plans to continue using the OSG’s resources on new data and to conduct deeper searches +more quickly and efficiently. “With OSG, we didn’t have to fight and struggle for resources. Having +this access to these extra resources allowed us to do searches that are more computationally +costly and sensitive,” Tripathee says. “If I had never heard of OSG, I would have probably still +performed similar searches but not to this depth or sensitivity because the number of features +that I would have had access to would have been more limited.”

+ +

“Once I was at [HTC23], I understood what impact he [David Swanson] had on people, and not only +in developing OSG, which was huge,” Gonzàlez notes. “It was shocking, that impact, but it was so +very interesting to see people talking about him because it seemed like he was also a really good +human being, a really good mentor, and really liked helping people and supporting people.”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/des-expanding-universe.html b/preview-fall2024-info/des-expanding-universe.html new file mode 100644 index 000000000..505b36e64 --- /dev/null +++ b/preview-fall2024-info/des-expanding-universe.html @@ -0,0 +1,405 @@ + + + + + + +Des Expanding Universe + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Des Expanding Universe +

+

Since its beginning, our universe has been expanding. The early work of scientists +such as Edwin Hubble gave us proof that galaxies across the universe are moving apart +from one another. Hubble’s law states, “Objects observed in deep space are found to +have a red shift, interpreted as a relative velocity away from Earth.” The +Dark Energy Survey (DES) seeks to help unravel +the mysteries of what forces are causing this expansion by focusing on dark energy +and how it constantly remaps the cosmos.

+ +

As stated by the DES collaboration, “DES is an international, collaborative effort to +map hundreds of millions of galaxies, detect thousands of supernovae, and find patterns +of cosmic structure that will reveal the nature of the mysterious dark energy that is +accelerating the expansion of our universe.” Starting on August 31, 2013, researchers +began taking data from a telescope at the Cerro Tololo Inter-American Observatory in +Chile. This telescope has a 4 meter mirror and an attached 570 megapixel digital camera, +called DECam. Over a five-year period, the observatory allotted DES researchers 525 +nights of observation in order to complete a wide-area survey of approximately 300 million +galaxies.

+ +

The amount of data produced during these observations is immense. According to the DES +DR1 Data Release, this catalog contains, “over 38,000 single exposure images, close to +62,000 coadd images covering 10,388 tiles over roughly 5,000 square degrees within the +DES footprint, resulting in nearly 400M distinct cataloged objects.” To sort through +this data is no trivial task. Ken Herner, Application developer and System Analyst at +Fermilab (working on DES Computing), tells us that analyzing this data is very memory +intensive, as it includes massive amounts of cosmology simulation, among other types of +analysis.

+ +
+
+
Dark Matter map
+

+ Map of dark matter, measured through weak lensing with Science Verification Data (Image Credit: DES Collaboration) +

+
+
+ Dark matter map +
+
+ +

To help in these efforts, DES looks to the Open Science Pool to provide a portion of their computing power. +Over the last year, DES has gained roughly 4.58 million hours (522 years) of computing +from OSG. “The main areas OSG has provided resources have been to the TNO/dwarf planet work, +and the LIGO follow-up efforts,” says Herner. +TNO stands for trans-Neptunian Object, +which is any is any minor planet in the Solar System that orbits the Sun at a average distance +greater than Neptune, and LIGO is the “Laser Interferometer Gravitational-Wave Observatory,” +which is best known for the Nobel Prize-winning direct observation of gravitational waves.

+ +

Herner expects even more DES workflows coming over to OSG in the next year. With year five +of data taking having ended just weeks ago, Herner tells us that, “there will almost +certainly be an extra half-year this fall to make up for poor weather in Year 3. After that, +we’ll do final data analysis until ~2021.”

+ +

The research and analysis that DES has completed +so far has exceeded expectations. Of the milestones so far, Herner states, “[DES has] the +world’s most accurate dark matter map so far, competitive measurement of cosmological parameters +(with even better results to come), the binary neutron star merger follow-up, extreme +Trans-Neptunian object and dwarf planet discoveries, stellar streams in the Milky Way, to +name a few.”

+ +

As stated before, by receiving more workflows in the future, OSG will become an even larger +part of the discoveries to come. We look forward to helping DES further understand the +nature of dark energy in our ever-expanding universe.

+ +

For more info, please visit: https://www.darkenergysurvey.org

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/doc/CARMI.ps b/preview-fall2024-info/doc/CARMI.ps new file mode 100644 index 000000000..f4304ecae Binary files /dev/null and b/preview-fall2024-info/doc/CARMI.ps differ diff --git a/preview-fall2024-info/doc/CCGRID2003.doc b/preview-fall2024-info/doc/CCGRID2003.doc new file mode 100644 index 000000000..488a84988 Binary files /dev/null and b/preview-fall2024-info/doc/CCGRID2003.doc differ diff --git a/preview-fall2024-info/doc/CCGRID2003.pdf b/preview-fall2024-info/doc/CCGRID2003.pdf new file mode 100644 index 000000000..051f38750 Binary files /dev/null and b/preview-fall2024-info/doc/CCGRID2003.pdf differ diff --git a/preview-fall2024-info/doc/CLAD.ps.gz b/preview-fall2024-info/doc/CLAD.ps.gz new file mode 100644 index 000000000..bf27404b0 Binary files /dev/null and b/preview-fall2024-info/doc/CLAD.ps.gz differ diff --git a/preview-fall2024-info/doc/CODO-hpdc.doc b/preview-fall2024-info/doc/CODO-hpdc.doc new file mode 100644 index 000000000..d075cbb5d Binary files /dev/null and b/preview-fall2024-info/doc/CODO-hpdc.doc differ diff --git a/preview-fall2024-info/doc/CODO-hpdc.pdf b/preview-fall2024-info/doc/CODO-hpdc.pdf new file mode 100644 index 000000000..a2b55d5c9 Binary files /dev/null and b/preview-fall2024-info/doc/CODO-hpdc.pdf differ diff --git a/preview-fall2024-info/doc/CODO.pdf b/preview-fall2024-info/doc/CODO.pdf new file mode 100644 index 000000000..a2b55d5c9 Binary files /dev/null and b/preview-fall2024-info/doc/CODO.pdf differ diff --git a/preview-fall2024-info/doc/CondorandFirewalls.pdf b/preview-fall2024-info/doc/CondorandFirewalls.pdf new file mode 100644 index 000000000..3a3327fa0 Binary files /dev/null and b/preview-fall2024-info/doc/CondorandFirewalls.pdf differ diff --git a/preview-fall2024-info/doc/HPCwire.1 b/preview-fall2024-info/doc/HPCwire.1 new file mode 100644 index 000000000..400e68423 --- /dev/null +++ b/preview-fall2024-info/doc/HPCwire.1 @@ -0,0 +1,168 @@ +HIGH THROUGHPUT COMPUTING: AN INTERVIEW WITH MIRON LIVNY 06.27.97 +by Alan Beck, editor in chief HPCwire +============================================================================= + + This month, NCSA's (National Center for Supercomputing Applications) +Advanced Computing Group (ACG) will begin testing Condor, a software system +developed at the University of Wisconsin that promises to expand computing +capabilities through efficient capture of cycles on idle machines. The +software, operating within an HTC (High Throughput Computing) rather than a +traditional HPC (High Performance Computing) paradigm, organizes machines +into clusters, called pools, or collections of clusters called flocks, that +can exchange resources. Condor then hunts for idle workstations to run jobs. +When the owner resumes computing, Condor migrates the job to another machine. + + To learn more about recent Condor developments, HPCwire interviewed Miron +Livny, professor of Computer Science, University of Wisconsin at Madison and +principal investigator for the Condor Project. Following are selected +excerpts from that discussion. + +--- + + HPCwire: Please provide a brief background on the Condor Project and your +role in it. + + LIVNY: "The Condor project has been underway for about 10 years now; I'm +currently head of the effort to further develop the software, implement and +deploy it. We are now also closely tied with NCSA. + + "The underlying idea revolves around the contrast between compute power +which is owned and that which can be accessed. When we started about 15 years +ago, this gap was relatively small. For example, when I joined the department +we had two 780s, and I had accounts on both. These were one MIP machines. +Today, I have a 200 MIP machine on my desk -- and we have 400 machines like +this in the department, although I don't have accounts on them. So for me to +access all these resources I need additional software. The main obstacle this +software faces is the problem of distributed ownership. We were the first to +identify and significantly address this critical issue." + + HPCwire: How? + + LIVNY: "The first step is to discuss with the owner of a resource who, +when, and how that resource can be used by others. Once the owners are happy, +we can move on and deal with the more technical aspects of the system. + + HPCwire: How does the Condor project differ from conventional approaches to +distributed resources? + + LIVNY: "The Condor project is referred to as High Throughput Computing +(HTC) rather than the traditional High Performance Computing (HPC). HPC deals +with floating-point operations per second, and I like to portray HTC as +floating-point operations per year. We believe there are many scientists and +engineers who are interested in the question: 'What can I accomplish +(computationally) in two to six months?' HTC serves a vital role for this +group of researchers. + + "HPC brings enormous amounts of computing power to bear over relatively +short periods of time. HTC employs large amounts of computing power for very +lengthy periods. This is important simply because throughput is the primary +limiting factor in many scientific and engineering efforts. For example, if +you must manufacture a chip, you have a window of about three months to run +as many simulations as you can before bringing the product to market. +Essentially, this is an HTC problem. If you have a high-energy physicist who +is reconstructing events and enriching them with Monte Carlo data, the +project has a year or two to complete, but the more computational resources +that can be brought to bear in that time, the greater will be the statistical +significance attained. This too is an application basically limited by +throughput rather than response time." + + HPCwire: What criteria determine whether HPC or HTC is more appropriate? + + LIVNY: "HPC must be used for decision-support (person-in-the-loop) or +applications under sharp time-constraint, such as weather modeling. However, +those doing sensitivity analyses, parametric studies or simulations to +establish statistical confidence need HTC. We use HTC for neural-network +training, Monte Carlo statistics, and a very wide variety of simulations, +including computer hardware, scheduling policies, and communication +protocols, annealing, even combustion-engine simulations, where 100 or even +1000 jobs are submitted to explore the entire parameter space." + + HPCwire: Given the rapidly growing power of workstations, do you feel HTC +will be able to increasingly address HPC-type problems? + + LIVNY: "Yes, and we already see it happening. For example, our campus has +cancelled a surplus cycle account at SDSC (San Diego Supercomputer Center), +because we were able to fulfill the needs of everyone at the graduate school +who requested HPC resources. If someone just needs two to five months of CPU- +time, we can easily provide it. Only those who require a huge amount of +tightly-coupled memory must go to the HPC end." + + HPCwire: Please detail the work you're doing with NCSA. + + LIVNY: "We play a dual role with respect to NCSA. On one hand, we're a +regional partner, with over 500 workstations here on campus. These will +provide a source of cycles to NCSA and a testbed for scientists who would +like to see how well their applications work in an HTC environment. On the +other hand, we're also an enabling technology, where our experience in +building and maintaining Condor will contribute to the construction of the +National Technology Grid. Thus, we hope we can soon move from a campus-wide +to a nation-wide HTC system." + + HPCwire: How can our readers experiment with Condor? + + LIVNY: "The software is freely available and can be downloaded from our +Website at http://www.cs.wisc.edu/condor There are also pointers at the NCSA +homepages." + + HPCwire: Do you also foresee Condor moving into a commercial context? + + LIVNY: "IBM's LoadLeveler, which runs on SP, is already a commercial +offspring of Condor. We're currently moving in the direction of NT, and I +believe there will be a significant commercial implication there. We are now +talking with several commercial entities interested in seeing what Condor can +do for their HTC applications. I certainly believe that industry could +benefit from all these idle cycles -- if they only knew how to utilize them. + + "Over the last year we've restructured our software so that it relies less +on Unix-specific functionality. We hope to have the first round for NT by the +end of the summer. Condor normally provides checkpointing of applications and +redirection of I/O, but the first versions for NT will not provide those -- +only resource allocation and management. However, our goal for the end of the +year is to have a full-featured supported version for NT. + + HPCwire: Are you planning to create a version fully compatible with both +operating systems? + + LIVNY: "Yes -- although obviously it won't be able to do migrate jobs +between UNIX and NT. Right now we're running across architectures; at UW we +have a heterogeneous environment composed of 6 or 7 different UNIX/flavor +combinations. Soon NT machines will come in as submission sites or +cycle-servers and will co-exist in the UNIX environment. We see no technical +obstacles to this." + + HPCwire: Are there any further points you would like to emphasize? + + LIVNY: "It is very important to be able to harness the enormous amount of +computing power we already have at our fingertips -- whether this is done +through Condor or another way. We have focused too long on the problem of how +to run a single application, and we have not paid enough attention to how to +run 100 or 1000. I have one user at the University of Washington who +regularly submits 2000 jobs at one keystroke." + +-------------------- +Alan Beck is editor in chief of HPCwire. Comments are always welcome and +should be directed to editor@hpcwire.tgc.com + +************************************************************************** + H P C w i r e S P O N S O R S + + Product specifications and company information in this section are + available to both subscribers and non-subscribers. + + [ ] 936) Sony [ ] 905) MAXSTRAT + [ ] 934) HP/Convex Tech. Ctr. [ ] 930) HNSX Supercomputers + [ ] 909) Fujitsu [ ] 902) IBM Corp. + [ ] 937) Digital Equipment [ ] 932) Portland Group + [ ] 938) Visual Numerics [ ] 940) Eudora + [ ] 941) HAL Computers [ ] 942) Sun Microsystems + [ ] 921) Silicon Graphics/Cray Research [ ] 943) Northrop Grumman + [ ] 944) Raytheon E-Systems + Send info requests (an X-marked copy of this message) to more@tgc.com +*************************************************************************** +Copyright 1997 HPCwire. Redistribution of this article is forbidden by law +without the expressed written consent of the publisher. For a free trial +subscription to HPCwire, send e-mail to trial@hpcwire.tgc.com. + + + + diff --git a/preview-fall2024-info/doc/HTCTalk.ps.gz b/preview-fall2024-info/doc/HTCTalk.ps.gz new file mode 100644 index 000000000..91bd3a886 Binary files /dev/null and b/preview-fall2024-info/doc/HTCTalk.ps.gz differ diff --git a/preview-fall2024-info/doc/PVMUG94_slides.ps.Z b/preview-fall2024-info/doc/PVMUG94_slides.ps.Z new file mode 100644 index 000000000..6b124cff7 Binary files /dev/null and b/preview-fall2024-info/doc/PVMUG94_slides.ps.Z differ diff --git a/preview-fall2024-info/doc/RECOORD-proteins-2005.pdf b/preview-fall2024-info/doc/RECOORD-proteins-2005.pdf new file mode 100644 index 000000000..10f78262e Binary files /dev/null and b/preview-fall2024-info/doc/RECOORD-proteins-2005.pdf differ diff --git a/preview-fall2024-info/doc/SAML-XACML-profile-JGridComputing-2009.pdf b/preview-fall2024-info/doc/SAML-XACML-profile-JGridComputing-2009.pdf new file mode 100644 index 000000000..85ce41735 Binary files /dev/null and b/preview-fall2024-info/doc/SAML-XACML-profile-JGridComputing-2009.pdf differ diff --git a/preview-fall2024-info/doc/SC07-Iosup.pdf b/preview-fall2024-info/doc/SC07-Iosup.pdf new file mode 100644 index 000000000..25eeed88e Binary files /dev/null and b/preview-fall2024-info/doc/SC07-Iosup.pdf differ diff --git a/preview-fall2024-info/doc/SandboxingWorlds053.pdf b/preview-fall2024-info/doc/SandboxingWorlds053.pdf new file mode 100644 index 000000000..04f81a092 Binary files /dev/null and b/preview-fall2024-info/doc/SandboxingWorlds053.pdf differ diff --git a/preview-fall2024-info/doc/WildLetter.pdf b/preview-fall2024-info/doc/WildLetter.pdf new file mode 100644 index 000000000..72e049772 Binary files /dev/null and b/preview-fall2024-info/doc/WildLetter.pdf differ diff --git a/preview-fall2024-info/doc/WiscIdea.html b/preview-fall2024-info/doc/WiscIdea.html new file mode 100644 index 000000000..287f7bd6a --- /dev/null +++ b/preview-fall2024-info/doc/WiscIdea.html @@ -0,0 +1,328 @@ + + +Hunting for wasted computing power + + + +
+ +

+
+HUNTING FOR WASTED COMPUTING POWER +
+New Software for Computing Networks Puts Idle PC's to Work +
+ +
+ +
+by Scott Fields +
+1993 Research Sampler +
+University of Wisconsin-Madison +
+

+ +Scattered throughout the world of science are personal computers just +waiting to work. While a university biologist grades papers, her +computer sits unused. On the desk of an engineering professor, a +neglected workstation hums quietly through the night. Idle computers +mean wasted computing power. And Miron Livny, a University of +Wisconsin-Madison associate professor of computer sciences, hates waste. +

+Rather than see computers lie fallow, a group of computer +scientists now led by Livny has been developing Condor, a software system +that puts inactive computers back to work. Like a vulture circling the +desert, Condor scavenges for processing power that would otherwise be +lost. +

+The idea behind Condor is simple. It matches any computational +jobs that computer users have with spare power in other owners' +computers. Computer owners don't have to modify their programs to use +Condor. They just have to agree to become part of a Condor network, a +group of computers connected in such a way that messages can pass +between them. +

+Condor lurks in the background of the network, nesting in a +computer called the central manager-which can be any computer in the +network-where it watches for inactive computers. When it senses an idle +machine, Condor swoops in to run a project on it. When the owner +resumes using the computer, Condor moves the project somewhere else. +

+In practice, however, Condor is more complicated. Every +computer in the pool must continually run parts of the Condor software +that track activity on both the central manager and the local system. +To do this, the computer must be capable of "multi-tasking," or running +more than one piece of software simultaneously, the computer equivalent +of walking and chewing gum at the same time. +

+Currently Condor runs only on workstations that use an +operating system called UNIX. This operating system-the software that +lets a computer run other software applications-was selected because it +can smoothly handle many applications at once and because it is wide- +spread in the scientific community. Livny says, however, that Condor +could be modified to work with any multi-tasking operating system. + +

+HATCHING AN IDEA +

+ +

+Just a year after Livny arrived at the university in 1984, a +team of UW-Madison computer scientists-then including professors David +DeWitt and Marvin Solomon-began the Condor project. Livny had started +thinking about how to exploit wasted processing power even earlier, when +he was a graduate student at the Weizmann Institute in his native +Israel. +

+At that time, he says, the rapid development of mini-computers +marked a great shift in where computing power was located. Instead of +being concentrated in a single mainframe computer, an institution's +computing power was now "distributed" into many small computers. Livny +soon realized that this shift would result in wasted computing capacity +unless some way were found to harness that power. +

+Before personal computers became commonplace, Livny explains, +scientists shared mainframe computers-often experiencing long delays as +the computer processed jobs submitted ahead of theirs. But these +centralized systems, although frustrating to use, maximized a computer's +processing power, because they ran 24 hours a day. +

+Personal computers eliminated the wait. But, Livny says, while +the recent explosion in personal computing has tremendously increased +the amount of computing power-which computer scientists measure in +number of computing "cycles"-that researchers own, there has not +been a proportionate increase in the number of cycles available to any +individual researcher. +

+"Ten years ago, what was owned and what was accessible were +almost the same," Livny says. "Now, because of distributed ownership, +there is an increasing gap between what is owned and what is available +for scientific computing. What we are trying to do is narrow this gap." + +

+MAKING IT FLY +

+ +Developing and debugging Condor's programming code took +countless hours of work by a team of UW-Madison programmers headed by +Michael Litzkow. But the hardest task for Condor's developers, Livny +says, was figuring out how to maintain a comfortable relationship +between two communities of computer users: the people who own computers +but don't necessarily need more computing power, and people who want to +borrow spare cycles. Satisfying both groups was important, he explains, +because administrators can't add a machine to the pool without its +owner's permission. "So if the owner is not happy," Livny says, "we +lose the machine." +

+Meeting the varied demands of computer owners, however, is not so +easy. Condor must keep track of activity on every computer on networks +that can include hundreds of computers. It must know how soon it is +allowed to start using a computer after the owner stops using it. It +must work both in environments where each individual uses just a single +computer and in environments where any individual can log on to any +computer. And it must know how to share the resources of the network +fairly. +

+Some researchers-Livny calls them "cycle monsters"-have an +insatiable appetite for computer cycles. Most of these "frustrated" +owners are willing to sacrifice some convenience for a chance to tap +into the ocean of computer power a Condor pool offers. +

+Many other researchers, however, rarely need more cycles than +their own computers offer. These scientists are often reluctant to join +the Condor pool, since they think they have little to gain from it. +"These `happy' or `almost-always-happy' owners are the ones that we have +to convince," Livny says. +

+To lure them to Condor, the design team was careful to make the +new system attractive to both heavy and light users, so the cycle +monsters could feel free to do time-consuming research. "The philosophy +here is that we would like to encourage you to use as many cycles as +possible and to do research projects that can run for weeks or months," +Livny explains. "But we want to protect owners, whether or not they are +heavy users." +

+The researcher who has a job to run sends an electronic message +to a special piece of Condor software called the local scheduler. The +scheduler negotiates for cycles with another piece of software called +the coordinator, which can be installed in any computer in the network. +When the coordinator finds an idle computer, it will move the job +there-if it is that researcher's turn. +

+Condor doesn't just run jobs in the order that it gets them. To +keep big jobs from draining the pool of cycles, Livny-working with then- +graduate student Matt Mutka, now on the faculty of Michigan State +University-developed a priority system called the Up-Down algorithm. This +system makes it possible for scientists who are running time-consuming +research to coexist with people running shorter jobs. The priority that +the system assigns to a scientist's project decreases as the number of +cycles the scientist uses increases. +

+"If you are heavy user, eventually you will have low priority +when competing with other users. Once you stop using the system, your +priority starts to slowly go up," Livny says. "We want to protect +somebody who wants to come in and run 10 to 20 hours of computing time +to have a better response time than someone who is running for months +and months and months." +

+ +

+THAT'S ONE HARD-WORKING BIRD +

+

+ +

+Researchers who use Condor gain two major advantages: they can have +many tasks running at once, and they can run big projects that otherwise +might be too expensive to conduct. +

+Without access to scavenged computing cycles, Livny explains, +many of the projects run on Condor would monopolize a single workstation +for months, a luxury not many researchers can afford. One UW-Madison +graduate student, for example, consumed in just one month the number of +cycles a single workstation would generate running around the clock for +350 days. +

+Although some of the workstations that Condor links are +relatively slow-running at just 10 to 20 MIPS (million instructions per +second) rather than the hundreds of MIPS a mainframe computer might +achieve-these workstation cycles are surplus, whereas time on a +mainframe is expensive. + +

+GROOMING CONDOR +

+ +

+But Condor's advantages have a price: each task takes longer +that it would running on a single machine. All the juggling Condor must +do wastes time. Projects move from computer to computer. Projects +often must wait their turn. Information must be moved in and out of +electronic memory and the magnetic storage drives. And if a computer's +owner returns-signaled either by tapping a key or moving the computer's +mouse-the project must be interrupted and moved either to another +workstation or, if no other machine is available back to its home +workstation. +

+Often, however, the guest project hasn't finished when Condor +must yield the workstation. If Condor had to restart from scratch every +time a project was interrupted, many tasks-some of which can take +months-would never end. To make Condor more efficient and allow big jobs +to finish, the design team incorporated "checkpoints," software markers +that indicate how far a job has progressed. When it places a checkpoint, +Condor remembers the current status of the program, as well as any +results that had been computed so far. Then, when it restarts the +project somewhere else, it can pick up at the checkpoint. +

+The idea of checkpoints is not new, Livny says. But making +checkpoints work presented a major challenge. Working outside of the +UNIX operating system, Condor had to be able to remember, but not +disrupt, the status of every detail of software running inside the +operating system. +

+Originally Condor didn't place checkpoints until a workstation's +owner returned. But that forced the owner to wait while Condor +completed its checkpoint calculations. To speed things up, the team +decided to place checkpoints periodically during the calculations. Now +Condor returns to the previous checkpoint and moves the task away almost +immediately. That can mean losing up to an hour's work, but, Livny +says, a few lost cycles is the price of keeping workstation owners +happy. +

+Although the design team tried to make Condor as friendly as +possible some users still find it inconvenient. For example, normally a +person who takes a short break while using a software application that's +loaded into the computer's electronic memory will be able to resume using +the application immediately. But if Condor moves in, it will displace +the application. Reloading may take 10 or 15 seconds. Such delays, +Livny says, can be bothersome for people who use their computers +intermittently, making their machines frequent targets of Condor's +scavenging excursions. +

+That's why the team modified Condor so that it could feed on +unused machines only on a schedule that each owner approved. For some, +that means their computers aren't available to the pool until an hour or +two after they normally leave for the day. "We lose two or three hours +of computing time for lunch and at the end of the day," Livny say, "but +the fact that they join the pool means we gain 11 hours." +

+ + +

+LEAVING THE NEST +

+ +

+Condor pools have spread to laboratories around the world. But +because the Condor system is available for free on electronic bulletin +boards, a practice computer scientists use to share new ideas, Livny +doesn't know where all of them are. He has helped Condor users at the +University of Michigan, Ohio State University, the Weizmann Institute in +Israel, the CERN physics laboratory in Geneva, and the National +High-Energy Physics Lab in the Netherlands. At some of these sites, +Livny says, Condor pools comprising hundreds of machines run thousands +of jobs daily. +

+Some UW-Madison departments are employing Condor to carry out +massive computing chores. In the genetics department, for example, +Condor-using machines in the computer sciences department-is helping +decipher the genetic code of the bacterium Escherichia coli. This vast +joint-research project requires extensive computer searches, matching +newly decoded DNA patterns to a database of known protein sequences. +

+At UW-Madison and universities around the world, Condor +supporters are explaining its advantages to scientists reluctant to +relinquish complete control of their workstations. Recently, for +example, Livny started working with researchers at the UW-Madison +College of Engineering to find ways to use Condor for tasks currently +run on supercomputers. Big projects that demand rapid results, such as +sophisticated modeling of the activity inside a nuclear reactor, he +says, will probably continue to justify the expense of buying +supercomputer time. But others that involve highly repetitious tasks may +be shifted to a new Condor network. + +

+A FLOCK OF CONDORS +

+ +

+As Condor pools have multiplied, Livny's team has started work +on the next logical extension of distributing computer resources. "To +go beyond the boundaries of individual networks in an institution," he +says, "we speak now about a flock of Condors, where you have a Condor, +and a Condor, and another Condor linked." +

+With the flock, there are even more problems to solve. Just as +each researcher in a Condor pool wants to control his or her own +workstation, each research group in a flock wants to control its own +Condor pool. And Condor's decisions about where to send jobs become more +difficult. +

+The Condor pools in the flock, he says, have to be able to +decide whether to keep a job in a busy pool, or fly to the next idle +pool. "It's like joining a seemingly endless line in front of a teller +and having a soothsayer tell you to beware of the empty teller on the +next block. Are you going to leave this line and run there? Maybe +everyone heard you and everyone is running there and you're better off +standing here." +

+Livny's team is continuing to improve Condor. Some of his +graduate students are working on modifying the system so that a single +job could be divided and run in parallel on many workstations at once. +This would mean that running jobs on Condor would actually be faster +than on a single workstation. Livny, Michael Litzkow, and other team +members are working with IBM on a commercial version of Condor, which +would let businesses share their distributed resources. And they are +continuing to fine-tune Condor to make it the perfect hunter of idle +workstations. +
+ +
+ +

+ +
diff --git a/preview-fall2024-info/doc/X11_tools.ps b/preview-fall2024-info/doc/X11_tools.ps new file mode 100644 index 000000000..30812b2ca Binary files /dev/null and b/preview-fall2024-info/doc/X11_tools.ps differ diff --git a/preview-fall2024-info/doc/X11_tools.txt b/preview-fall2024-info/doc/X11_tools.txt new file mode 100644 index 000000000..e0a24c7cf --- /dev/null +++ b/preview-fall2024-info/doc/X11_tools.txt @@ -0,0 +1,198 @@ + + + + + + + + + + + + BUILDING IMAKE, CPP, and MAKEDEPEND + + + _M_i_c_h_a_e_l _L_i_t_z_k_o_w + + Computer Sciences Department + University of Wisconsin - Madison + mike@cs.wisc.edu + + + + + + + + +_1. _I_N_T_R_O_D_U_C_T_I_O_N + + _I_m_a_k_e, _c_p_p, and _m_a_k_e_d_e_p_e_n_d are not part of _c_o_n_d_o_r, but + you will need correctly working copies of all three to + build _c_o_n_d_o_r. _I_m_a_k_e is a program for building + "Makefiles" which are customized for a particular set of + hardware, software, and local environment. _I_m_a_k_e accom- + plishes much of this task by using _c_p_p to expand macros. + Since _i_m_a_k_e makes certain assumptions about the operation + of _c_p_p which will not be true in all installations, it + may be necessary to build a customized _c_p_p for _i_m_a_k_e'_s + use. _M_a_k_e_d_e_p_e_n_d is a program for generating dependencies + in "Makefiles" for C programs which include header files. + Since some versions of _m_a_k_e_d_e_p_e_n_d do not parse certain + _c_p_p directives correctly, you may need the version + included here even if you already have _m_a_k_e_d_e_p_e_n_d. + + All three of these programs are included in the _X_1_1 dis- + tribution, and are available from MIT. If you already + have X11, you should already have these programs avail- + able. They are included here for your convenience in the + event you do not have X11. Instructions for building + these programs are included in the X11 distribution, but + those instructions assume you want to build the whole + distribution. These instructions are intended to be a + "direct path" to building just these three programs. The + _c_o_n_d_o_r developers assume no responsibility for their com- + pleteness, correctness, etc. + +_2. _B_U_I_L_D_I_N_G _I_M_A_K_E + + (1) cd to "imake" + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 2222////22228888////99992222 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 2222////22228888////99992222 + + + (2) To build correct Makefiles, _i_m_a_k_e must determine + which platform, (both hardware and variant of + UNIX), it is running on. It depends on certain + unique strings being defined by your local version + of _c_p_p to do this. If your local _c_p_p does not + define the strings which _i_m_a_k_e expects, you can + build a custom version of _i_m_a_k_e which will pass + those strings to _c_p_p by using "-D" on the + command line. Some hardware platforms are unique + enough that the unix variation is implicit. In + other cases two strings are used. Determine + whether your local _c_p_p defines the necessary + strings to identify your platform. The expected + strings are as follows: + + ______________________________________________________ + _|_S__t__r__i__n__g__(__s__)____________________________P__l__a__t__f__o__r__m________________________________________________| + |"RiscAIX" IBM R6000 running AIX3.1 | + |"mips" & "ultrix" DecStation running ULTRIX 4.0 | + |"sun" & "mc68020" SUN 3 running SunOS 4.1+ | + |"sun" & "sparc" SUN 4 running SunOS 4.1+ | + |"mc68000" & "hp300" HP Bobcat running BSD 4.3 | + |"vax" & "ultrix" Vax running Ultrix 3.2 | + |"i386" & "sequent" Sequent Symmetry running Dynix | + |"ibm032" IBM RT running BSD 4.3 | + _|_____________________________________________________| + + + (3) If you need to customize your _i_m_a_k_e to pass such + strings to _c_p_p: + + a. To "Makefile.ini" add "-D" to the + CFLAGS macro, where is replaced + by any reasonable name for your platform. + + b. To "imakemdep.h" in the "cpp_argv" definition, + add + + #ifdef + "-D", + #endif + + where matches the definition you + added to "Makefile.ini" and + matches the appropriate string(s) listed in the + above table. Note, if two identifiers are + listed above, you will have to add two lines + here. + + (4) On AIX 3.1.5 platforms, you will also need to + add "_BSD" to the CFLAGS macro in + "Makefile.ini". + + + +XXXX11111111 TTTTOOOOOOOOLLLLSSSS 2222 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 2222////22228888////99992222 + + + (5) Compile _i_m_a_k_e by running "make -f + Makefile.ini". + +_3. _B_U_I_L_D_I_N_G _C_P_P + + (1) Some versions of _c_p_p will not accept "/**/" as a + zero length separator. This will cause constructs + like + + /**/# Makefile comment line + + to be flagged as errors. If this is the case with + your _c_p_p, you will need to build a special version + to use with _i_m_a_k_e. Just try running such a line + through your local version of _c_p_p to determine + whether you have the problem. + + (2) If you do need the special _c_p_p, go into the _c_p_p + directory and build it. Hopefully no customiza- + tion will be needed. + +_4. _M_A_K_E_D_E_P_E_N_D + + (1) You will also need a correctly working version of + "makedepend". This program reads a set of source + files along with the flags which will be given to + the C compiler when those sources are compiled and + generates a set of dependence lines to be added to + the local Makefile. + + There are some versions of this program around + which incorrectly process lines of the form: + + #if + + Since condor uses this construct extensively to + include the correct header files for different + platforms, you will need a correctly working ver- + sion. A shell script version called "mdepend.sh" + is included here. It is much slower that the com- + piled version, but should work correctly. + + + + + + + + + + + + + + +XXXX11111111 TTTTOOOOOOOOLLLLSSSS 3333 + + + diff --git a/preview-fall2024-info/doc/XRAY.pdf b/preview-fall2024-info/doc/XRAY.pdf new file mode 100644 index 000000000..8bcaeb833 Binary files /dev/null and b/preview-fall2024-info/doc/XRAY.pdf differ diff --git a/preview-fall2024-info/doc/XRAY_paper.pdf b/preview-fall2024-info/doc/XRAY_paper.pdf new file mode 100644 index 000000000..8bcaeb833 Binary files /dev/null and b/preview-fall2024-info/doc/XRAY_paper.pdf differ diff --git a/preview-fall2024-info/doc/aiosup06buildntest.pdf b/preview-fall2024-info/doc/aiosup06buildntest.pdf new file mode 100644 index 000000000..84b4a90d4 Binary files /dev/null and b/preview-fall2024-info/doc/aiosup06buildntest.pdf differ diff --git a/preview-fall2024-info/doc/alderman_thesis.pdf b/preview-fall2024-info/doc/alderman_thesis.pdf new file mode 100644 index 000000000..b1111a9f5 Binary files /dev/null and b/preview-fall2024-info/doc/alderman_thesis.pdf differ diff --git a/preview-fall2024-info/doc/badfs-nsdi04.pdf b/preview-fall2024-info/doc/badfs-nsdi04.pdf new file mode 100644 index 000000000..6ee61aed8 Binary files /dev/null and b/preview-fall2024-info/doc/badfs-nsdi04.pdf differ diff --git a/preview-fall2024-info/doc/badfs-nsdi04.ps b/preview-fall2024-info/doc/badfs-nsdi04.ps new file mode 100644 index 000000000..3c8274b4a Binary files /dev/null and b/preview-fall2024-info/doc/badfs-nsdi04.ps differ diff --git a/preview-fall2024-info/doc/beowulf-chapter-rev1.pdf b/preview-fall2024-info/doc/beowulf-chapter-rev1.pdf new file mode 100644 index 000000000..c247e10d6 Binary files /dev/null and b/preview-fall2024-info/doc/beowulf-chapter-rev1.pdf differ diff --git a/preview-fall2024-info/doc/beowulf-chapter-rev1.ps b/preview-fall2024-info/doc/beowulf-chapter-rev1.ps new file mode 100644 index 000000000..b528e7a5f Binary files /dev/null and b/preview-fall2024-info/doc/beowulf-chapter-rev1.ps differ diff --git a/preview-fall2024-info/doc/birdbath.pdf b/preview-fall2024-info/doc/birdbath.pdf new file mode 100644 index 000000000..6d5ec4fd7 Binary files /dev/null and b/preview-fall2024-info/doc/birdbath.pdf differ diff --git a/preview-fall2024-info/doc/build-n-test_ccgrid07talk.pdf b/preview-fall2024-info/doc/build-n-test_ccgrid07talk.pdf new file mode 100644 index 000000000..405ea89f4 Binary files /dev/null and b/preview-fall2024-info/doc/build-n-test_ccgrid07talk.pdf differ diff --git a/preview-fall2024-info/doc/building_pipelines-TR1487.pdf b/preview-fall2024-info/doc/building_pipelines-TR1487.pdf new file mode 100644 index 000000000..397d79e0b Binary files /dev/null and b/preview-fall2024-info/doc/building_pipelines-TR1487.pdf differ diff --git a/preview-fall2024-info/doc/bulkdata_framework-ispdc2003.pdf b/preview-fall2024-info/doc/bulkdata_framework-ispdc2003.pdf new file mode 100644 index 000000000..fe3d26649 Binary files /dev/null and b/preview-fall2024-info/doc/bulkdata_framework-ispdc2003.pdf differ diff --git a/preview-fall2024-info/doc/bypass-hpdc9.pdf b/preview-fall2024-info/doc/bypass-hpdc9.pdf new file mode 100644 index 000000000..87041391e Binary files /dev/null and b/preview-fall2024-info/doc/bypass-hpdc9.pdf differ diff --git a/preview-fall2024-info/doc/bypass-hpdc9.ps b/preview-fall2024-info/doc/bypass-hpdc9.ps new file mode 100644 index 000000000..55c45f96d Binary files /dev/null and b/preview-fall2024-info/doc/bypass-hpdc9.ps differ diff --git a/preview-fall2024-info/doc/bypass-jcc-preprint.pdf b/preview-fall2024-info/doc/bypass-jcc-preprint.pdf new file mode 100644 index 000000000..cbf2d3645 Binary files /dev/null and b/preview-fall2024-info/doc/bypass-jcc-preprint.pdf differ diff --git a/preview-fall2024-info/doc/bypass-jcc-preprint.ps b/preview-fall2024-info/doc/bypass-jcc-preprint.ps new file mode 100644 index 000000000..4a26d6f7d Binary files /dev/null and b/preview-fall2024-info/doc/bypass-jcc-preprint.ps differ diff --git a/preview-fall2024-info/doc/camera.doc b/preview-fall2024-info/doc/camera.doc new file mode 100644 index 000000000..f8dca37ef Binary files /dev/null and b/preview-fall2024-info/doc/camera.doc differ diff --git a/preview-fall2024-info/doc/camera.pdf b/preview-fall2024-info/doc/camera.pdf new file mode 100644 index 000000000..7f1656230 Binary files /dev/null and b/preview-fall2024-info/doc/camera.pdf differ diff --git a/preview-fall2024-info/doc/camera.ps b/preview-fall2024-info/doc/camera.ps new file mode 100644 index 000000000..b84f4a0b6 Binary files /dev/null and b/preview-fall2024-info/doc/camera.ps differ diff --git a/preview-fall2024-info/doc/carmi_fgcs_paper.ps b/preview-fall2024-info/doc/carmi_fgcs_paper.ps new file mode 100644 index 000000000..39ad7545f Binary files /dev/null and b/preview-fall2024-info/doc/carmi_fgcs_paper.ps differ diff --git a/preview-fall2024-info/doc/carmi_users_guide.ps b/preview-fall2024-info/doc/carmi_users_guide.ps new file mode 100644 index 000000000..65fa4977d Binary files /dev/null and b/preview-fall2024-info/doc/carmi_users_guide.ps differ diff --git a/preview-fall2024-info/doc/cheap-cycles-bib.html b/preview-fall2024-info/doc/cheap-cycles-bib.html new file mode 100644 index 000000000..29afc6e03 --- /dev/null +++ b/preview-fall2024-info/doc/cheap-cycles-bib.html @@ -0,0 +1,23 @@ + +BibTex Source for Paper Citation + + + + +

+BibTex Source for paper citation of "Cheap cycles" +

+ +
+@inproceedings
+{
+   wright2001cheap-cycles,
+   author = "Derek Wright",
+   title = "Cheap cycles from the desktop to the dedicated cluster: combining opportunistic and dedicated scheduling with {C}ondor",
+   booktitle = "Proceedings of the Linux Clusters: The HPC Revolution conference"
+   month = "June",
+   year = "2001",
+   address = "Champaign - Urbana, IL"
+}
+
+ diff --git a/preview-fall2024-info/doc/cheap-cycles.pdf b/preview-fall2024-info/doc/cheap-cycles.pdf new file mode 100644 index 000000000..6346647da Binary files /dev/null and b/preview-fall2024-info/doc/cheap-cycles.pdf differ diff --git a/preview-fall2024-info/doc/cheap-cycles.ppt b/preview-fall2024-info/doc/cheap-cycles.ppt new file mode 100644 index 000000000..b995bde91 Binary files /dev/null and b/preview-fall2024-info/doc/cheap-cycles.ppt differ diff --git a/preview-fall2024-info/doc/cheap-cycles.ps b/preview-fall2024-info/doc/cheap-cycles.ps new file mode 100644 index 000000000..2ecd8fbbf Binary files /dev/null and b/preview-fall2024-info/doc/cheap-cycles.ps differ diff --git a/preview-fall2024-info/doc/chep09_condor_scalability.pdf b/preview-fall2024-info/doc/chep09_condor_scalability.pdf new file mode 100644 index 000000000..95b35f576 Binary files /dev/null and b/preview-fall2024-info/doc/chep09_condor_scalability.pdf differ diff --git a/preview-fall2024-info/doc/chep10_condor_scalability.pdf b/preview-fall2024-info/doc/chep10_condor_scalability.pdf new file mode 100644 index 000000000..a7f58e3cb Binary files /dev/null and b/preview-fall2024-info/doc/chep10_condor_scalability.pdf differ diff --git a/preview-fall2024-info/doc/ckpt97.pdf b/preview-fall2024-info/doc/ckpt97.pdf new file mode 100644 index 000000000..7a2b88094 Binary files /dev/null and b/preview-fall2024-info/doc/ckpt97.pdf differ diff --git a/preview-fall2024-info/doc/ckpt97.ps b/preview-fall2024-info/doc/ckpt97.ps new file mode 100644 index 000000000..3e738fa17 Binary files /dev/null and b/preview-fall2024-info/doc/ckpt97.ps differ diff --git a/preview-fall2024-info/doc/ckpt_mgmt.pdf b/preview-fall2024-info/doc/ckpt_mgmt.pdf new file mode 100644 index 000000000..e558668e7 Binary files /dev/null and b/preview-fall2024-info/doc/ckpt_mgmt.pdf differ diff --git a/preview-fall2024-info/doc/ckpt_mgmt.ps b/preview-fall2024-info/doc/ckpt_mgmt.ps new file mode 100644 index 000000000..dc032f425 Binary files /dev/null and b/preview-fall2024-info/doc/ckpt_mgmt.ps differ diff --git a/preview-fall2024-info/doc/classad-x509.pdf b/preview-fall2024-info/doc/classad-x509.pdf new file mode 100644 index 000000000..31de28f9e Binary files /dev/null and b/preview-fall2024-info/doc/classad-x509.pdf differ diff --git a/preview-fall2024-info/doc/community-sc2001.pdf b/preview-fall2024-info/doc/community-sc2001.pdf new file mode 100644 index 000000000..5ee36f4da Binary files /dev/null and b/preview-fall2024-info/doc/community-sc2001.pdf differ diff --git a/preview-fall2024-info/doc/community-sc2001.ps b/preview-fall2024-info/doc/community-sc2001.ps new file mode 100644 index 000000000..f938983c0 Binary files /dev/null and b/preview-fall2024-info/doc/community-sc2001.ps differ diff --git a/preview-fall2024-info/doc/complexity-aware-grid1.pdf b/preview-fall2024-info/doc/complexity-aware-grid1.pdf new file mode 100644 index 000000000..f30ccd328 Binary files /dev/null and b/preview-fall2024-info/doc/complexity-aware-grid1.pdf differ diff --git a/preview-fall2024-info/doc/condor-bibtex.html b/preview-fall2024-info/doc/condor-bibtex.html new file mode 100644 index 000000000..9b5d88a7b --- /dev/null +++ b/preview-fall2024-info/doc/condor-bibtex.html @@ -0,0 +1,1627 @@ + +Condor BibTex Source for Citations + + + + +
+

+Thesis citation from Data-Driven Batch Scheduling +

+
+@PhdThesis{BentPhd05,
+           title = "{Data-Driven Batch Scheduling}",
+          author = "John Bent",
+          school = "University of Wisconsin, Madison",
+           month = may,
+            year = "2005",
+}
+
+ +
+

+Journal article citation from Concurrency and Computation: Practice and Experience +

+
+@article{condor-practice, 
+  author    = "Douglas Thain and Todd Tannenbaum and Miron Livny",
+  title     = "Distributed computing in practice: the Condor experience.",
+  journal   = "Concurrency - Practice and Experience",
+  volume    = "17",
+  number    = "2-4",
+  year      = "2005",
+  pages     = "323-356",
+}
+
+ +
+

+Book chapter citation from The Grid: Blueprint for a New Computing Infrastructure +

+
+@incollection{
+        grid2-ch19,
+        author = "Douglas Thain and Miron Livny",
+        title = "Building Reliable Clients and Servers",
+        editor = "Ian Foster and Carl Kesselman",
+        booktitle = "The Grid: Blueprint for a New Computing Infrastructure",
+        publisher = "Morgan Kaufmann",
+        year = "2003"
+}
+
+ + +
+

+Book chapter citation from Grid Computing: Making the Global Infrastructure a Reality +

+
+@incollection{
+        condorgrid,
+	author = "Douglas Thain and Todd Tannenbaum and Miron Livny",
+	title = "{C}ondor and the Grid",
+	editor = "Fran Berman and Geoffrey Fox and Tony Hey",
+	booktitle = "Grid Computing: Making the Global Infrastructure a Reality",
+	publisher = "John Wiley \& Sons Inc.",
+	month = "December",
+	year = "2002"
+}
+
+ +
+

+Book chapter citation from Beowulf Cluster Computing with Linux +

+
+@incollection{
+        beowulfbook-condor,
+	author = "Todd Tannenbaum and Derek Wright and Karen Miller and Miron Livny",
+	title = "{C}ondor -- A Distributed Job Scheduler",
+	editor = "Thomas Sterling",
+	booktitle = "Beowulf Cluster Computing with {L}inux",
+	publisher = "MIT Press",
+	month = "October",
+	year = "2001"
+}
+
+ +
+

+Book chapter citation from High Performance Cluster Computing: Architectures and Systems, Volume 1 +

+
+@incollection{
+        htc-deployment-chapter,
+	author = "Jim Basney and Miron Livny",
+	title = "Deploying a High Throughput Computing Cluster",
+	editor = "Rajkumar Buyya",
+	booktitle = "High Performance Cluster Computing: Architectures and Systems, Volume 1",
+	publisher = "Prentice Hall PTR",
+	year = "1999"
+}
+
+ +
+

+Article citation from "SPEEDUP" +

+
+@article{
+        htc-mechanisms,
+	author = "Miron Livny and Jim Basney and Rajesh Raman and Todd Tannenbaum",
+	title = "Mechanisms for High Throughput Computing",
+	journal = "SPEEDUP Journal",
+	volume = "11",
+	number = "1",
+	month = "June",
+	year = "1997"
+}
+
+ +
+

+Article citation from "Future Generation Computer Systems" +

+
+@article{
+        condor-flock,
+	title = "A worldwide flock of {C}ondors: Load sharing among workstation clusters",
+	author = "D.H.J. Epema and M. Livny and R. van Dantzig and X. Evers and J. Pruyne",
+	journal = "Future Generation Computer Systems",
+	volume = "12",
+	year = "1996",
+	pages = "53--65"
+}
+
+ +
+

+Paper citation from Proceedings of the 8th International Conference of Distributed Computing Systems +

+
+@inproceedings{
+        condor-hunter,
+	author = "Michael Litzkow and Miron Livny and Matthew Mutka",
+	title = "{C}ondor - A Hunter of Idle Workstations",
+	booktitle = "Proceedings of the 8th International Conference of Distributed Computing Systems",
+	month = "June",
+	year = "1988"
+}
+
+ +
+

+Paper citation from the "Usenix Summer Conference" +

+
+@inproceedings{
+        litzkow-cycles,
+	author = "Michael Litzkow",
+	title = "Remote Unix - Turning Idle Workstations into Cycle Servers",
+	booktitle = "Usenix Summer Conference",
+	year = "1987"
+	pages = "381--384"
+}
+
+ +
+

+Technical report 1481 +

+
+@techreport{
+        ncoleman-tr1481,
+	author = "Nicholas Coleman and Rajesh Raman and Miron Livny and Marvin Solomon",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "Distributed Policy Management and Comprehension with Classified Advertisements",
+	number = "UW-CS-TR-1481",
+	month = "April",
+	year = "2003"
+}
+
+ +
+

+Masters' Project report of Nicolas Coleman +

+
+@mastersthesis{
+        ncoleman-mr,
+	author = "Nicholas Coleman",
+	title = "An Implementation of Matchmaking Analysis in Condor",
+	school = "University of Wisconsin-Madison",
+	month = "May",
+	year = "2001"
+}
+
+ +
+

+Paper citation from HPDC9 +

+
+@inproceedings{
+        raman-hpdc9,
+	author = "Rajesh Raman and Miron Livny and Marvin Solomon",
+	title = "Resource Management through Multilateral Matchmaking",
+	booktitle = "Proceedings of the Ninth {IEEE} Symposium on High Performance Distributed Computing ({HPDC9})",
+	address = "Pittsburgh, PA",
+	month = "August",
+	year = "2000",
+	pages = "290--291"
+}
+
+ +
+

+Paper citation from HPDC7 +

+
+@inproceedings{
+        raman-hpdc7,
+	author = "Rajesh Raman and Miron Livny and Marvin Solomon",
+	title = "Matchmaking: Distributed Resource Management for High Throughput Computing",
+	booktitle = "Proceedings of the Seventh {IEEE} International Symposium on High Performance Distributed Computing ({HPDC7})",
+	month = "July",
+	date = "28--31",
+	year = "1998",
+	address = "Chicago, IL"
+}
+
+ +
+

+Paper citation from HPDC9 +

+
+@inproceedings{
+        basney-hpdc9,
+	author = "Jim Basney and Miron Livny",
+	title = "Managing Network Resources in {C}ondor",
+	booktitle = "Proceedings of the Ninth {IEEE} Symposium on High Performance Distributed Computing ({HPDC9})",
+	address = "Pittsburgh, PA",
+	month = "August",
+	year = "2000",
+	pages = "298--299"
+}
+
+ +
+

+Article citation from "International Journal of High Performance Computing Applications" +

+
+@article{
+        basney-goodput,
+	title = "Improving Goodput by Co-scheduling CPU and Network Capacity",
+	author = "Jim Basney and Miron Livny",
+	journal = "International Journal of High Performance Computing Applications",
+	volume = "13",
+	number = "3",
+	year = "1999",
+}
+
+ +
+

+Book chapter citation from The Grid: Blueprint for a New Computing Infrastructure +

+
+@incollection{
+        gridbook-htc,
+	author = "Miron Livny and Rajesh Raman",
+	title = "High-throughput resource management",
+	editor = "Ian Foster and Carl Kesselman",
+	booktitle = "The Grid: Blueprint for a New Computing Infrastructure",
+	publisher = "Morgan Kaufmann",
+	year = "1998"
+}
+
+ +
+

+Article citation from "Performance Evaluation" +

+
+@article{
+        mutka-pe,
+	title = "The Available Capacity of a Privately Owned Workstation Environment",
+	author = "Matt Mutka and Miron Livny",
+	journal = "Performance Evaluation",
+	volume = "12",
+	number = "4",
+	year = "1991",
+	pages = "269--284"
+}
+
+ +
+

+Paper citation from Performance '87 +

+
+@inproceedings{
+        mutka-p87,
+	author = "Matt Mutka and Miron Livny",
+	title = "Profiling Workstations' Available Capacity for Remote Execution",
+	booktitle = "Performance '87, 12th IFIP WG 7.3",
+	address = "Brussels",
+	month = "December",
+	year = "1987",
+	pages = "529--544"
+}
+
+ + +
+

+Paper citation from CHEP 2000 +

+
+@inproceedings{
+        basney-chep2000,
+	author = "Jim Basney and Miron Livny and Paolo Mazzanti",
+	title = "Harnessing the Capacity of Computational Grids for High Energy Physics",
+	booktitle = "Proceedings of the International Conference on Computing in High Energy and Nuclear Physics (CHEP 2000)",
+	address = "Padova, Italy",
+	month = "February",
+	year = "2000"
+}
+
+ +
+

+Technical report 1346 +

+
+@techreport{
+        litzkow-tr1346,
+	author = "Michael Litzkow and Todd Tannenbaum and Jim Basney and Miron Livny",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "Checkpoint and Migration of {UNIX} Processes in the {C}Ondor Distributed Processing System",
+	number = "UW-CS-TR-1346",
+	month = "April",
+	year = "1997"
+}
+
+ +
+

+Paper citation from IPPS '96 +

+
+@inproceedings{
+        pruyne-ipps96,
+	author = "Jim Pruyne and Miron Livny",
+	title = "Managing Checkpoints for Parallel Programs",
+	booktitle = "Workshop on Job Scheduling Strategies for Parallel Processing (IPPS '96)",
+	address = "Honolulu, HI",
+	month = "April",
+	year = "1996"
+}
+
+ +
+

+Paper citation from MICS 2005 +

+
+@inproceedings{
+        meehean-mics2005,
+        author = "Joe Meehean and Miron Livny",
+        title = "A Service Migration Case Study: Migrating the {C}ondor Schedd",
+        booktitle = "Midwest Instruction and Computing Symposium",
+        month = "April",
+        year = 2005
+}
+
+ +
+

+Article citation from "Computer Physics Communications" +

+
+@article{
+        basney-cpc,
+	title = "Utilizing Widely Distributed Computational Resources Efficiently with Execution Domains",
+	author = "Jim Basney and Miron Livny and Paolo Mazzanti",
+	journal = "Computer Physics Communications",
+	publisher = "Elsevier Science",
+	volume = "140",
+	number = "1-2",
+	pages = "246+",
+	month = "October",
+	year = "2001"
+}
+
+ +
+

+Article citation from Dr Dobbs Journal +

+
+@article{
+        tannenba-dobbs,
+	author = "Todd Tannenbaum and Michael Litzkow",
+	title = "Checkpointing and Migration of UNIX Processes in the {C}ondor Distributed Processing System",
+	journal = "Dr Dobbs Journal",
+	month = "February",
+	year = "1995"
+}
+
+ +
+

+Paper citation from Usenix '92 +

+
+@inproceedings{
+        litzkow-usenix,
+	author = "Michael Litzkow and Miron Livny",
+	title = "Supporting Checkpointing and Process Migration Outside the {UNIX} Kernel",
+	booktitle = "Proceedings of the Winter 1992 {USENIX} Conference",
+	address = "San Francisco, CA",
+	month = "January",
+	year = "1992",
+	pages = "283--290"
+}
+
+ +
+

+Paper citation from Grid 2007 +

+
+@inproceedings{
+        chervenak-grid2007
+	author = "Ann Chervenak and Ewa Deelman and Miron Livny and Mei-Hui Su and Rob Schuler and Shishir Bharathi and Gaurang Mehta and Karan Vahi",
+	title = "Data Placement for Scientific Applications in Distributed Environments",
+	booktitle = "Proceedings of the 8th IEEE/ACM International Conference on Grid Computing (Grid 2007)",
+	address = "Austin, TX",
+	month = "September",
+	year = "2007"
+}
+
+ +
+

+Paper citation from Euro-Par 2004 +

+
+@inproceedings{
+        profiling-europar2004,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+	title = "Profiling Grid Data Transfer Protocols and Servers",
+	booktitle = "Proceedings of Euro-Par 2004",
+	address = "Pisa, Italy",
+	month = "September",
+	year = "2004"
+}
+
+ +
+

+Paper citation from NOSSDAV 2004 +

+
+@inproceedings{
+        kola-nossdav2004,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+	title = "A Fully Automated Fault-tolerant System for Distributed Video Processing and Off-site Replication",
+	booktitle = "Proceedings of the 14th {ACM} International Workshop on Network and Operating Systems Support for Digital Audio and Video ({NOSSDAV 2004})",
+	address = "Kinsale, Ireland",
+	month = "June",
+	year = "2004"
+}
+
+ +
+

+Journal citation from Parallel and Distributed Computing Practices +

+
+@article{
+        runtime-pdcp,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+	title = "Run-time Adaptation of Grid Data-placement Jobs",
+	journal = "Parallel and Distributed Computing Practices",
+	year = "2004"
+}
+
+ +
+

+Paper citation from ISPDC 2003 +

+
+@inproceedings{
+        kosar-ispdc2003,
+	author = "Tevfik Kosar and George Kola and Miron Livny",
+	title = "A Framework for Self-optimising, Fault-tolerant, High Performance Bulk Data Transfers in a Heterogeneous Grid Environment",
+	booktitle = "Proceedings of the Second International Symposium on Parallel and Distributed Computing ({ISPDC2003})",
+	address = "Ljubljana, Slovenia",
+	month = "October",
+	year = "2003"
+}
+
+ +
+

+Paper citation from AGRIDM 2003 +

+
+@inproceedings{
+        runtime-agridm2003,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+	title = "Run-time Adaptation of Grid Data-placement Jobs",
+	booktitle = "Proceedings of the International Workshop on Adaptive Grid Middleware ({AGridM2003})",
+	address = "New Orleans, LA",
+	month = "September",
+	year = "2003"
+}
+
+ +
+

+Technical report 1483 +

+
+@techreport{
+        kosart-tr1483,
+	author = "Tevfik Kosar and Miron Livny",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "Scheduling Data Placement Activities in Grid",
+	number = "UW-CS-TR-1483",
+	month = "July",
+	year = "2003"
+}
+
+ +
+

+Paper citation from WORLDS 2005 +

+
+@inproceedings{
+        santhanam-worlds05,
+	author = "Sriya Santhanam and Pradheep Elango and Andrea Arpaci-Dusseau and Miron Livny",
+        title = "Deploying Virtual Machines as Sandboxes for the Grid",
+        booktitle = "Second Workshop on Real, Large Distributed Systems ({WORLDS 2005})",
+        address = "San Francisco, CA",
+        month = "December",
+        year = "2005"
+}
+
+ +
+

+Paper citation from 2005 UK e-Science All Hands +

+
+@inproceedings{
+	birdbath2005,
+	author = "Clovis Chapman and Charaka Goonatilake and Wolfgang Emmerich and Matthew Farrellee and Todd Tannenbaum and Miron Livny and Mark Calleja and Martin Dove",
+        title = "Condor {B}ird{B}ath: Web Service interfaces to {C}ondor",
+        booktitle = "Proceedings of 2005 {UK} e-{S}cience {A}ll {H}ands {M}eeting",
+        address = "Nottingham, UK",
+		pages = "737--744",
+        month = "September",
+        year = "2005"
+}
+
+ +
+

+Paper citation from Supercomputing 2007 +

+
+@inproceedings{
+	SC07-Iosup,
+	author = "Alexandru Iosup and Dick H.J. Epema and Todd Tannenbaum and Matthew Farrellee and Miron Livny",
+        title = "Inter-Operating Grids through Delegated MatchMaking",
+	booktitle = "Proceedings of International Conference for High Performance Computing, Networking, Storage and Analysis (SC07)",
+	address = "Reno, NV",
+	month = "November",
+	year = "2007"
+}
+
+ +
+

+Paper citation from Cluster 2005 +

+
+@inproceedings{
+	Sonny-CLUSTER2005,
+	author = "Sechang Son and Matthew Farrellee and Miron Livny",
+        title = "A Generic Proxy Mechanism for Secure Middlebox Traversal",
+	booktitle = "Proceedings of CLUSTER 2005",
+	address = "Boston, MA",
+	month = "September",
+	year = "2005"
+}
+
+ +
+

+Paper citation from UK e-Science All Hands Meeting 2005 +

+
+@inproceedings{
+	AHM2005,
+	author = "Bruce Beckles and Sechang Son and John Kewley",
+        title = "Current methods for negotiating firewalls for the Condor system",
+	booktitle = "Proceedings of the 4th UK e-Science All Hands Meeting 2005",
+	address = "Nottingham, UK",
+	month = "September",
+	year = "2005"
+}
+
+ +
+

+Paper citation from HPDC 14 +

+
+@inproceedings{
+	CODO-hpdc-2005,
+	author = "Sechang Son and  Bill Allcock and Miron Livny",
+        title = "CODO: Firewall Traversal by Cooperative On-Demand Opening",
+	booktitle = "Proceedings of the Fourteenth {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Research Triangle Park, NC",
+	month = "July",
+	year = "2005"
+}
+
+ +
+

+Paper citation from 2004 UK e-Science All Hands +

+
+@inproceedings{
+	condor-ogsa-2004,
+	author = "Clovis Chapman and  Paul Wilson and  Todd Tannenbaum and  Matthew Farrellee and  Miron Livny and  John Brodholt and Wolfgang Emmerich",
+        title = "Condor services for the global grid: interoperability between {C}ondor and {OGSA}",
+        booktitle = "Proceedings of 2004 {UK} e-{S}cience {A}ll {H}ands {M}eeting",
+        address = "Nottingham, UK",
+		pages = "870--877",
+        month = "August",
+        year = "2004"
+}
+
+ +
+

+Paper citation from Cluster 2004 +

+
+@inproceedings{
+	samgrid-cluster2004,
+	author = "Andrew Baranovski and Gabriele Garzoglio and Igor Terekhov and Alain Roy and Todd Tannenbaum",
+        title = "Management of Grid Jobs and Data within {SAMG}rid",
+        booktitle = "Proceedings of 2004 {IEEE} International Conference on Cluster Computing",
+		publisher = "{IEEE}",
+        address = "San Diego, CA",
+		pages = "353--360",
+        month = "September",
+        year = "2004"
+}
+
+ +
+

+Paper citation from Cluster 2004 +

+
+@inproceedings{
+	gridknowledgebase-cluster2004,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+    title = "A Client-centric Grid Knowledgebase",
+    booktitle = "Proceedings of 2004 {IEEE} International Conference on Cluster Computing",
+	publisher = "{IEEE}",
+    address = "San Diego, CA",
+	pages = "431--438",
+    month = "September",
+    year = "2004"
+}
+
+ +
+

+Paper citation from NSDI 2004 +

+
+@inproceedings{
+        badfs-nsdi04,
+        author = "John Bent and Douglas Thain and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+        title = "Explicit Control in a Batch Aware Distributed File System",
+        booktitle = "Proceedings of the First {USENIX/ACM} Conference on Networked Systems Design and Implementation ({NSDI})",
+        address = "San Francisco, CA",
+        month = "March",
+        year = "2004"
+}
+
+ +
+

+Paper citation from HPDC 12 +

+
+@inproceedings{
+        thain-hpdc12-sharing,
+	author = "Douglas Thain and John Bent and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	title = "Pipeline and Batch Sharing in Grid Workloads",
+	booktitle = "Proceedings of the Twelfth {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Seattle, WA",
+	month = "June",
+	year = "2003"
+}
+
+ +
+

+Paper citation from HPDC 12 +

+
+@inproceedings{
+        thain-hpdc12-ethernet,
+	author = "Douglas Thain and Miron Livny",
+	title = "The Ethernet Approach to Grid Computing",
+	booktitle = "Proceedings of the Twelfth {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Seattle, WA",
+	month = "June",
+	year = "2003"
+}
+
+ +
+

+Book chapter citation from Grid Resource Management +

+
+@incollection{
+        nest-bookchapter,
+	author = "John Bent and Venkateshwaran Venkataramani and Nick LeRoy and Alain Roy and Joseph Stanley and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	title = "{N}e{ST} - A Grid Enabled Storage Appliance",
+	editor = "Jan Weglarz and Jarek Nabrzyski and Jennifer Schopf and Macief Stroinkski",
+	booktitle = "Grid Resource Management",
+	publisher = "Kluwer Academic Publishers",
+	year = "2003"
+}
+
+ +
+

+Book chapter citation from Grid Computing: Making the Global Infrastructure a Reality +

+
+@incollection{
+        condor-grid,
+	author = "Douglas Thain and Todd Tannenbaum and Miron Livny",
+	title = "{C}ondor and the Grid",
+	editor = "Fran Berman and Geoffrey Fox and Anthony Hey",
+	booktitle = "Grid Computing: Making the Global Infrastructure a Reality",
+	publisher = "John Wiley \& Sons Inc.",
+	month = "April",
+	year = "2003"
+}
+
+ +
+

+Technical report PPDG-20 +

+
+@techreport{
+        ppdg-20,
+	author = "Francesco Giacomini and Francesco Prelz and Massimo Sgaravatto and Igor Terekhov and Gabriele Garzoglio and and Todd Tannenbaum",
+	institution = "Particle Physics Data Grid collaboration (http://www.ppdg.net)",
+	title = "Planning on the Grid: A Status Report [DRAFT]",
+	number = "PPDG-20",
+	month = "October",
+	year = "2002"
+}
+
+ +
+

+Paper citation from HPDC 11 +

+
+@inproceedings{
+        storage-hpdc11,
+	author = "John Bent and Venkateshwaran Venkataramani and Nick LeRoy and Alain Roy and Joseph Stanley and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	title = "Flexibility, Manageability, and Performance in a Grid Storage Appliance",
+	booktitle = "Proceedings of the Eleventh {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Edinburgh, Scotland",
+	month = "July",
+	year = "2002"
+}
+
+ +
+

+Paper citation from HPDC 11 +

+
+@inproceedings{
+        thain-hpdc11,
+	author = "Douglas Thain and Miron Livny",
+	title = "Error Scope on a Computational Grid: Theory and Practice",
+	booktitle = "Proceedings of the Eleventh {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Edinburgh, Scotland",
+	month = "July",
+	year = "2002"
+}
+
+ +
+

+Paper citation from Supercomputing 2001 +

+
+@inproceedings{
+        thain-sc2001,
+	author = "Douglas Thain and John Bent and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	title = "Gathering at the Well: Creating Communities for Grid {I/O}",
+	booktitle = "Proceedings of Supercomputing 2001",
+	address = "Denver, Colorado",
+	month = "November",
+	year = "2001"
+}
+
+ +
+

+Journal citation from Journal of Cluster Computing +

+
+@article{
+        condor-g-jcc,
+	author = "James Frey and Todd Tannenbaum and Ian Foster and Miron Livny and Steve Tuecke",
+	title = "{C}ondor-{G}: A Computation Management Agent for Multi-Institutional Grids",
+	journal = "Cluster Computing",
+	volume = "5",
+	year = "2002",
+	pages = "237--246"
+}
+
+ +
+

+Paper citation from HPDC 10 +

+
+@inproceedings{
+        condor-g-hpdc10,
+	author = "James Frey and Todd Tannenbaum and Ian Foster and Miron Livny and Steve Tuecke",
+	title = "{C}ondor-{G}: A Computation Management Agent for Multi-Institutional Grids",
+	booktitle = "Proceedings of the Tenth {IEEE} Symposium on High Performance Distributed Computing ({HPDC})",
+	address = "San Francisco, California",
+	pages = "7--9",
+	month = "August",
+	year = "2001"
+}
+
+ +
+

+Paper citation from HPDC 10 +

+
+@inproceedings{
+        kangaroo-hpdc10,
+        author = "Douglas Thain and Jim Basney and Se-Chang Son and Miron Livny",
+	title  = "The {K}angaroo Approach to Data Movement on the Grid",
+	booktitle = "Proceedings of the Tenth (IEEE} Symposium on High Performance Distributed Computing ({HPDC10})",
+	address = "San Francisco, CA",
+	month = "August",
+	date = "7--9",
+	year = "2001"
+}
+
+ +
+

+Masters' Project report of John Bent +

+
+@mastersthesis{
+        bent-mr,
+	author = "John Bent",
+	title = "Building Storage Appliances for the Grid and Beyond",
+	school = "University of Wisconsin-Madison",
+	month = "May",
+	year = "2001"
+}
+
+ +
+

+Paper citation from CCGRID 2003 +

+
+@inproceedings{
+        son-ccgrid03,
+        author = "Sechang Son and Miron Livny",
+	title  = "Recovering Internet Symmetry in Distributed Computing",
+	booktitle = "Proceedings of the 3rd International Symposium on Cluster Computing and the Grid",
+	address = "Tokyo, Japan",
+	month = "May",
+	year = "2003"
+}
+
+ +
+

+Paper citation from GRID 2000 +

+
+@inproceedings{
+        mw-grid2000,
+        author = " Elisa Heymann and Miquel A. Senar and Emilio Luque and and Miron Livny",
+	title  = "Adaptive Scheduling for Master-Worker Applications on the Computational Grid",
+	booktitle = "Proceedings of the First IEEE/ACM International Workshop on Grid Computing (GRID 2000)",
+	address = "Bangalore, India",
+	month = "December",
+	year = "2000"
+}
+
+ +
+

+Paper citation from HiPC 2000 +

+
+@inproceedings{
+        mw-hipc2000,
+        author = " Elisa Heymann and Miquel A. Senar and Emilio Luque and and Miron Livny",
+	title  = "Evaluation of an Adaptive Scheduling Strategy for Master-Worker Applications on Clusters of Workstations",
+	booktitle = "Proceedings of the 7th International Conference on High Performance Computing (HiPC 2000)",
+	address = "Bangalore, India",
+	month = "December",
+	year = "2000"
+}
+
+ +
+

+Paper citation from HPDC 9 +

+
+@inproceedings{
+        mw-hpdc9,
+	author = "Jeff Linderoth and Sanjeev Kulkarni and Jean-Pierre Goux and Michael Yoder",
+	title = "An Enabling Framework for Master-Worker Applications on the Computational Grid",
+	booktitle = "Proceedings of the Ninth {IEEE} Symposium on High Performance Distributed Computing ({HPDC9})",
+	address = "Pittsburgh, PA",
+	pages = "43--50",
+	month = "August",
+	year = "2000"
+}
+
+ +
+

+Technical report ANL/MCS-P792-0200 +

+
+@techreport{
+        mw-tr,
+	author = "Jeff Linderoth and Jean-Pierre Goux and Michael Yoder",
+	institution = "Mathematics and Computer Science Division, Argonne National Laboratory",
+	title = "Metacomputing and the Master-Worker Paradigm",
+	number = "ANL/MCS-P792-0200",
+	month = "February",
+	year = "2000"
+}
+
+ +
+

+Paper citation from the Second Workshop on Environments and Tools for Parallel Scientific Computing +

+
+@inproceedings{
+        pruyne94,
+	author = "Jim Pruyne and Miron Livny",
+	title = "Providing Resource Management Services to Parallel Applications",
+	booktitle = "Proceedings of the Second Workshop on Environments and Tools for Parallel Scientific Computing",
+	address = "Townsend, TN",
+	month = "May",
+	year = "1994"
+}
+
+ +
+

+Paper citation from the ACM Conference on JavaGrande +

+
+@inproceedings{
+        javagenes,
+        author = "Al Globus and Eric Langhirt and Miron Livny and Ravishankar Ramamurthy and Marvin Solomon and Steve Traugott",
+        title = "{J}ava{G}enes and {C}ondor: Cycle-Scavenging Genetic Algorithms",
+	booktitle = "Proceedings of the {ACM} Conference on {J}ava {G}rande",
+	address = "San Francisco, CA",
+	year = "2000",
+	pages = "134--139"
+}
+
+ +
+

+Paper citation from the Journal of Supercomputing +

+
+@article{
+        parrot-jsc,
+        author = "Douglas Thain and Sander Klous and Miron Livny",
+        title = "Deploying Complex Applications in Unfriendly Systems with Parrot",
+	journal = "Journal of Supercomputing",
+	year = "2004"
+}
+
+ +
+

+Paper citation from the Journal of Parallel and Distributed Computing Practices +

+
+@article{
+        parrot-jpdcp,
+        author = "Douglas Thain and Miron Livny",
+        title = "Parrot: An Application Environment for Data-Intensive Computing",
+	journal = "Journal of Parallel and Distributed Computing Practices",
+	year = "2004"
+}
+
+ + +
+

+Paper citation from the Workshop on Adaptive Grid Middleware +

+
+@inproceedings{
+	parrot-agm2003,
+	author = "Douglas Thain and Miron Livny",
+	title = "Parrot: Transparent User-Level Middleware for Data-Intensive Computing",
+	booktitle = "Workshop on Adaptive Grid Middleware",
+	address = "New Orleans, Louisiana",
+	month = "September",
+	year = "2003"
+}
+
+
+

+Technical report 1448 +

+
+@techreport{
+        thain-tr1448,
+	author = "Douglas Thain and Miron Livny",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "Error Management in the Pluggable File System",
+	number = "UW-CS-TR-1448",
+	month = "October",
+	year = "2002"
+}
+
+ +
+

+Journal citation from Journal of Cluster Computing +

+
+@article{
+        bypass-jcc,
+	author = "Douglas Thain and Miron Livny",
+	title = "Multiple Bypass: Interposition Agents for Distributed Computing",
+	journal = "Journal of Cluster Computing",
+	volume = "5",
+	year = "2001",
+	pages = "39--47"
+}
+
+ +
+

+Paper citation from HPDC 9 +

+
+@inproceedings{
+        bypass-hpdc9,
+	author = "Douglas Thain and Miron Livny",
+	title = "Bypass: A Tool for Building Split Execution Systems",
+	booktitle = "Proceedings of the Ninth {IEEE} Symposium on High Performance Distributed Computing ({HPDC9})",
+	address = "Pittsburgh, PA",
+	pages = "79--85",
+	month = "August",
+	year = "2000"
+}
+
+ +
+

+Paper citation from HPDC 8 +

+
+@inproceedings{
+        hijack-hpdc8,
+	author = "Victor C. Zandy and Barton P. Miller and Miron Livny",
+	title = "Process Hijacking",
+	booktitle = "Proceedings of the 8th {IEEE} Symposium on High Performance Distributed Computing ({HPDC8})",
+	address = "Redondo Beach, CA",
+	pages = "177--184",
+	month = "August",
+	year = "1999"
+}
+
+ +
+

+Paper citation from the 3rd Workshop on Workstation Operating Systems +

+
+@inproceedings{
+        batch-friendly,
+	author = "Miron Livny and Michael Litzkow",
+	title = "Making Workstations a Friendly Environment for Batch Jobs",
+	booktitle = "Third {IEEE} Workshop on Workstation Operating Systems",
+	month = "April",
+	year = "1992",
+	address = "Key Biscayne, FL"
+}
+
+ +
+

+Paper citation from Journal of Physics +

+
+@article{1742-6596-219-4-042017,
+  author={Zach Miller and Dan Bradley and Todd Tannenbaum and Igor Sfiligoi},
+  title={Flexible session management in a distributed environment},
+  journal={Journal of Physics: Conference Series},
+  volume={219},
+  number={4},
+  pages={042017},
+  url={http://stacks.iop.org/1742-6596/219/i=4/a=042017},
+  year={2010}
+}
+
+ +
+

+Paper citation for CHEP10 +

+
+@article{1742-6596-331-6-062002,
+  author={D Bradley and T St Clair and M Farrellee and Z Guo and M Livny and I Sfiligoi and T Tannenbaum},
+  title={An update on the scalability limits of the Condor batch system},
+  journal={Journal of Physics: Conference Series},
+  volume={331},
+  number={6},
+  pages={062002},
+  url={http://stacks.iop.org/1742-6596/331/i=6/a=062002},
+  year={2011},
+}
+
+ +
+

+Paper citation for CHEP09 +

+
+@article{1742-6596-219-6-062035,
+  author={D Bradley and D Dasu and M Livny and A Mohapatra and T Tannenbaum and G Thain},
+  title={Condor enhancements for a rapid-response adaptive computing environment for LHC},
+  journal={Journal of Physics: Conference Series},
+  volume={219},
+  number={6},
+  pages={062035},
+  url={http://iopscience.iop.org/1742-6596/219/6/062035/pdf/1742-6596_219_6_062035.pdf},
+  year={2010},
+}
+
+ +
+

+Paper citation from POLICY 2004 +

+
+@inproceedings{
+        wang-security,
+	author = "Hao Wang and Somesh Jha and Miron Livny and Patrick D. McDaniel",
+	title = "Security Policy Reconciliation in Distributed Computing Environments",
+	booktitle = "Proceedings of the 5th {IEEE} International Workshop on Policies for Distributed
+	Systems and Networks({POLICY2004})",
+	month = "June",
+	year = "2004",
+	address = "Yorktown Heights, NY"
+}
+
+ + +
+

+Technical report 1499 +

+
+@techreport{
+        wang-tr1499,
+	author = "Hao Wang and Somesh Jha and Miron Livny and Patrick D. McDaniel",
+	title = "Security Policy Reconciliation in Distributed Computing Environments",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	number = "UW-CS-TR-1499",
+	month = "March",
+	year = "2004"
+}
+
+ +
+

+Paper citation from the Workshop on Experimental Distributed Systems +

+
+@inproceedings{
+        condor-experience,
+	author = "Michael Litzkow and Miron Livny",
+	title = "Experience with the {C}ondor Distributed Batch System",
+	booktitle = "Proceedings of the {IEEE} Workshop on Experimental Distributed Systems",
+	month = "October",
+	year = "1990",
+	address = "Huntsville, AL"
+}
+
+ +
+

+Technical report 1464 +

+
+@techreport{
+        thain-tr1464,
+	author = "Douglas Thain and John Bent and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "The Architectural Implications of Pipeline and Batch Sharing in Scientific Workloads",
+	number = "UW-CS-TR-1464",
+	month = "January",
+	year = "2003"
+}
+
+ +
+

+Paper citation from the 9th SIAM Conference on Parallel Processing for Scientific Computing +

+
+@inproceedings{
+        ht-montecarlo,
+	author = "Jim Basney and Rajesh Raman and Miron Livny",
+	title = "High Throughput Monte Carlo",
+	booktitle = "Proceedings of the Ninth SIAM Conference on Parallel Processing for Scientific Computing",
+	month = "March",
+	year = "1999",
+	address = "San Antonio, TX"
+}
+
+ +
+

+Paper citation from the 16th ICDCS +

+
+@inproceedings{
+        dbc-dcs16,
+	author = "Chung-min Chen and Kenneth Salem and Miron Livny",
+	title = "The {DBC}: Processing Scientific Data Over the Internet",
+	booktitle = "16th International Conference on Distributed Computing Systems",
+	pages = "673-682",
+	month = "May",
+	year = "1996",
+	address = "Hong Kong"
+}
+
+ +
+

+Paper citation from HPDC 15 +

+
+@inproceedings
+{
+   hpdc15-silberstein,
+   author = "Mark Silberstein and Dan Geiger and Assaf Schuster and Miron Livny",
+   title = "Scheduling Mixed Workloads in Multi-grids: The Grid Execution Hierarchy",
+   booktitle = "Proceedings of the 15th {IEEE} Symposium on High Performance Distributed Computing ({HPDC-15})",
+   address = "Paris, France",
+   month = "June",
+   year = "2006"
+}
+
+ +
+

+Paper citation from "Proceedings of the Linux Clusters: The HPC Revolution conference" +

+
+@inproceedings
+{
+   wright2001cheap-cycles,
+   author = "Derek Wright",
+   title = "Cheap cycles from the desktop to the dedicated cluster: combining opportunistic and dedicated scheduling with {C}ondor",
+   booktitle = "Proceedings of the {L}inux Clusters: The HPC Revolution conference"
+   month = "June",
+   year = "2001",
+   address = "Champaign - Urbana, IL"
+}
+
+ +
+

+Paper citation from the 8th ICDCS +

+
+@inproceedings{
+        krueger-icdcs8,
+	author = "Phillip Krueger and Miron Livny",
+	title = "A Comparison of Preemptive and Non-Preemptive Load Distributing",
+	booktitle = "8th International Conference on Distributed Computing Systems",
+	month = "June",
+	year = "1988",
+	address = "San Jose, CA",
+	pages = "123--130"
+}
+
+ +
+

+Paper citation from the 7th ICDCS +

+
+@inproceedings{
+        mutka-icdcs7,
+	author = "Matt Mutka and Miron Livny",
+	title = "Scheduling Remote Processing Capacity In A Workstation-Processing Bank Computing System",
+	booktitle = "7th International Conference on Distributed Computing Systems",
+	month = "September",
+	year = "1987",
+	address = "Berlin, Germany",
+	pages = "2--9"
+}
+
+ +
+

+Paper citation from the 2006 USENIX Large Installation Systems Administration Conference +

+
+@inproceedings{
+        nmi-lisa2006,
+	author = "Andrew Pavlo and Peter Couvares and Rebekah Gietzel and Anatoly Karp and Ian D. Alderman and Miron Livny and Charles Bacon",
+	title = "The {NMI} {B}uild \& {T}est {L}aboratory: Continuous Integration Framework for Distributed Computing Software",
+	booktitle = "Proceedings of {LISA} '06: Twentieth Systems Administration Conference",
+        pages = "263--273",
+	month = "December",
+	year = "2006",
+	address = "Washington, DC",
+}
+
+ +
+

+Paper citation from CCGRID 2007 +

+
+@inproceedings{
+        nmi-ccgrid07,
+        author = {Iosup, Alexandru and Epema, Dick and Couvares, Peter and Karp, Anatoly and Livny, Miron},
+        title = {Build-and-Test Workloads for Grid Middleware: Problem, Analysis, and Applications},
+        booktitle = {Seventh IEEE International Symposium on Cluster Computing and the Grid (CCGRID)},
+        month = {May},
+        year = {2007},
+        pages = {205--213},
+        doi = {10.1109/CCGRID.2007.29}
+}
+
+ +
+

+Paper citation from the 1982 Computer Network Performance Symposium +

+ +
+@inproceedings{
+        livny-melman-82,
+	author = "Miron Livny and Myron Melman",
+	title = "Load Balancing in Homogeneous Broadcast Distributed Systems",
+	booktitle = "Proceedings of the Computer Network Performance Symposium",
+	month = "April",
+	year = "1982",
+	address = "College Park, Maryland",
+}
+
+
+

+PhD Thesis of Miron Livny +

+
+@phdthesis{
+        livny-thesis,
+	author = "Miron Livny",
+	title = "The Study of Load Balancing Algorithms for Decentralized Distributed Processing Systems",
+	school = "Weizmann Institute of Science",
+	month = "August",
+	year = "1983
+}
+
+
+

+PhD Thesis of Matt Walter Mutka +

+
+@phdthesis{
+        mutka-thesis,
+	author = "Matt Walter Mutka",
+	title = "Sharing In A Privately Owned Workstation Environment",
+	school = "University of Wisconsin-Madison",
+	year = "1988"
+}
+
+
+

+PhD Thesis of James C. Pruyne +

+
+@phdthesis{
+        pruyne-thesis,
+	author = "James C. Pruyne",
+	title = "Resource Management Services for Parallel Applications",
+	school = "University of Wisconsin-Madison",
+	year = "1996"
+}
+
+
+

+PhD Thesis of Rajesh Raman +

+
+@phdthesis{
+        raman-thesis,
+	author = "Rajesh Raman",
+	title = "Matchmaking Frameworks for Distributed Resource Management",
+	school = "University of Wisconsin-Madison",
+	month = "October",
+	year = "2000"
+}
+
+
+

+PhD Thesis of Jim Basney +

+
+@phdthesis{
+        basney-thesis,
+	author = "Jim Basney",
+	title = "Network and CPU Co-Allocation in High Throughput Computing Environments",
+	school = "University of Wisconsin-Madison",
+	year = "2001"
+}
+
+
+

+PhD Thesis of Douglas Thain +

+
+@phdthesis{
+	thain-thesis,
+	author = "Douglas Thain",
+	title = "Coordinating Access to Computation and Data in Distributed Systems",
+	school = "University of Wisconsin-Madison",
+	year = "2004"
+}
+
+
+

+PhD Thesis of Tevfik Kosar +

+
+@phdthesis{
+	kosar-thesis,
+	author = "Tevfik Kosar",
+	title = "Data Placement in Widely Distributed Systems",
+	school = "University of Wisconsin-Madison",
+	year = "2005"
+}
+
+
+

+PhD Thesis of Sechang Son +

+
+@phdthesis{
+	sonny-thesis,
+	author = "Sechang Son",
+	title = "Middleware Approaches to Middlebox Traversal",
+	school = "University of Wisconsin-Madison",
+	year = "2006"
+}
+
+
+

+PhD Thesis of Nicholas Coleman +

+
+@phdthesis{
+    coleman-thesis,
+    author = "Nicholas Coleman",
+    title = "A Matchmaking Approach to Distributed Policy Specification and Interpretation",
+    school = "University of Wisconsin-Madison",
+    month = "August",
+    year = "2007"
+}
+
+
+

+PhD Thesis of Ian Alderman +

+
+@phdthesis{
+    coleman-thesis,
+    author = "Ian D. Alderman",
+    title = "A Security Framework for Distributed Batch Computing",
+    school = "University of Wisconsin-Madison",
+    month = "April",
+    year = "2010"
+}
+
+
+

+Paper citation for Journal of Phyics +10: Scalability +

+
+@article{1742-6596-219-6-062036,
+    author={D Bradley and I Sfiligoi and S Padhi and J Frey and T Tannenbaum},
+    title={Scalability and interoperability within glideinWMS},
+    journal={Journal of Physics: Conference Series},
+    volume={219},
+    number={6},
+    pages={062036},
+    url={http://stacks.iop.org/1742-6596/219/i=6/a=062036},
+    year={2010},
+}
+
+

+Paper citation for Journal +of Physics 11: Early experience +

+
+@article{1742-6596-331-6-062014,
+    author={W Andrews and B Bockelman and D Bradley and J Dost and D Evans and I Fisk and J Frey and B Holzman and M Livny and T Martin and A McCrea and A
+    Melo and S Metson and H Pi and I Sfiligoi and P Sheldon and T Tannenbaum and A Tiradani and F Würthwein and D Weitzel},
+    title={Early experience on using glideinWMS in the cloud},
+    journal={Journal of Physics: Conference Series},
+    volume={331},
+    number={6},
+    pages={062014},
+    url={http://stacks.iop.org/1742-6596/331/i=6/a=062014},
+    year={2011},
+}
+
+
+

+Paper citation for Journal of +Computational Science 20: Principles, technologies, and time: +The translational journey of the HTCondor-CE +

+
+@article{BOCKELMAN2020101213,
+    title={Principles, technologies, and time: The translational journey of the HTCondor-CE},
+    journal={Journal of Computational Science},
+    year={2020},
+    issn={1877-7503},
+    doi={https://doi.org/10.1016/j.jocs.2020.101213},
+    url={http://www.sciencedirect.com/science/article/pii/S1877750320305147},
+    author={Brian Bockelman and Miron Livny and Brian Lin and Francesco Prelz},
+    keywords={Distributed high throughput computing, High throughput computing, Translational computing, Distributed computing},
+}
+
+
+

+Paper citation for Journal of +Physics 15: Commissioning +

+
+@article{1742-6596-664-6-062003,
+    author={B Bockelman and T Cartwright and J Frey and E M Fajardo and B Lin and M Selmeci and T Tannenbaum and M Zvada},
+    title={Commissioning the HTCondor-CE for the Open Science Grid},
+    journal={Journal of Physics: Conference Series},
+    volume={664},
+    number={6},
+    pages={062003},
+    url={http://stacks.iop.org/1742-6596/664/i=6/a=062003},
+    year={2015},
+}
+
+
+

+Paper citation for Journal +of Physics 15: How much higher +

+
+@article{1742-6596-664-6-062014,
+    author={E M Fajardo and J M Dost and B Holzman and T Tannenbaum and J Letts and A Tiradani and B Bockelman and J Frey and D Mason},
+    title={How much higher can HTCondor fly?},
+    journal={Journal of Physics: Conference Series},
+    volume={664},
+    number={6},
+    pages={062014},
+    url={http://stacks.iop.org/1742-6596/664/i=6/a=062014},
+    year={2015},
+}
+
+
diff --git a/preview-fall2024-info/doc/condor-bibtex.html.bak b/preview-fall2024-info/doc/condor-bibtex.html.bak new file mode 100644 index 000000000..92c75ce95 --- /dev/null +++ b/preview-fall2024-info/doc/condor-bibtex.html.bak @@ -0,0 +1,1541 @@ + +Condor BibTex Source for Citations + + + + +
+

+Thesis citation from Data-Driven Batch Scheduling +

+
+@PhdThesis{BentPhd05,
+           title = "{Data-Driven Batch Scheduling}",
+          author = "John Bent",
+          school = "University of Wisconsin, Madison",
+           month = may,
+            year = "2005",
+}
+
+ +
+

+Journal article citation from Concurrency and Computation: Practice and Experience +

+
+@article{condor-practice, 
+  author    = "Douglas Thain and Todd Tannenbaum and Miron Livny",
+  title     = "Distributed computing in practice: the Condor experience.",
+  journal   = "Concurrency - Practice and Experience",
+  volume    = "17",
+  number    = "2-4",
+  year      = "2005",
+  pages     = "323-356",
+}
+
+ +
+

+Book chapter citation from The Grid: Blueprint for a New Computing Infrastructure +

+
+@incollection{
+        grid2-ch19,
+        author = "Douglas Thain and Miron Livny",
+        title = "Building Reliable Clients and Servers",
+        editor = "Ian Foster and Carl Kesselman",
+        booktitle = "The Grid: Blueprint for a New Computing Infrastructure",
+        publisher = "Morgan Kaufmann",
+        year = "2003"
+}
+
+ + +
+

+Book chapter citation from Grid Computing: Making the Global Infrastructure a Reality +

+
+@incollection{
+        condorgrid,
+	author = "Douglas Thain and Todd Tannenbaum and Miron Livny",
+	title = "{C}ondor and the Grid",
+	editor = "Fran Berman and Geoffrey Fox and Tony Hey",
+	booktitle = "Grid Computing: Making the Global Infrastructure a Reality",
+	publisher = "John Wiley \& Sons Inc.",
+	month = "December",
+	year = "2002"
+}
+
+ +
+

+Book chapter citation from Beowulf Cluster Computing with Linux +

+
+@incollection{
+        beowulfbook-condor,
+	author = "Todd Tannenbaum and Derek Wright and Karen Miller and Miron Livny",
+	title = "{C}ondor -- A Distributed Job Scheduler",
+	editor = "Thomas Sterling",
+	booktitle = "Beowulf Cluster Computing with {L}inux",
+	publisher = "MIT Press",
+	month = "October",
+	year = "2001"
+}
+
+ +
+

+Book chapter citation from High Performance Cluster Computing: Architectures and Systems, Volume 1 +

+
+@incollection{
+        htc-deployment-chapter,
+	author = "Jim Basney and Miron Livny",
+	title = "Deploying a High Throughput Computing Cluster",
+	editor = "Rajkumar Buyya",
+	booktitle = "High Performance Cluster Computing: Architectures and Systems, Volume 1",
+	publisher = "Prentice Hall PTR",
+	year = "1999"
+}
+
+ +
+

+Article citation from "SPEEDUP" +

+
+@article{
+        htc-mechanisms,
+	author = "Miron Livny and Jim Basney and Rajesh Raman and Todd Tannenbaum",
+	title = "Mechanisms for High Throughput Computing",
+	journal = "SPEEDUP Journal",
+	volume = "11",
+	number = "1",
+	month = "June",
+	year = "1997"
+}
+
+ +
+

+Article citation from "Future Generation Computer Systems" +

+
+@article{
+        condor-flock,
+	title = "A worldwide flock of {C}ondors: Load sharing among workstation clusters",
+	author = "D.H.J. Epema and M. Livny and R. van Dantzig and X. Evers and J. Pruyne",
+	journal = "Future Generation Computer Systems",
+	volume = "12",
+	year = "1996",
+	pages = "53--65"
+}
+
+ +
+

+Paper citation from Proceedings of the 8th International Conference of Distributed Computing Systems +

+
+@inproceedings{
+        condor-hunter,
+	author = "Michael Litzkow and Miron Livny and Matthew Mutka",
+	title = "{C}ondor - A Hunter of Idle Workstations",
+	booktitle = "Proceedings of the 8th International Conference of Distributed Computing Systems",
+	month = "June",
+	year = "1988"
+}
+
+ +
+

+Paper citation from the "Usenix Summer Conference" +

+
+@inproceedings{
+        litzkow-cycles,
+	author = "Michael Litzkow",
+	title = "Remote Unix - Turning Idle Workstations into Cycle Servers",
+	booktitle = "Usenix Summer Conference",
+	year = "1987"
+	pages = "381--384"
+}
+
+ +
+

+Technical report 1481 +

+
+@techreport{
+        ncoleman-tr1481,
+	author = "Nicholas Coleman and Rajesh Raman and Miron Livny and Marvin Solomon",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "Distributed Policy Management and Comprehension with Classified Advertisements",
+	number = "UW-CS-TR-1481",
+	month = "April",
+	year = "2003"
+}
+
+ +
+

+Masters' Project report of Nicolas Coleman +

+
+@mastersthesis{
+        ncoleman-mr,
+	author = "Nicholas Coleman",
+	title = "An Implementation of Matchmaking Analysis in Condor",
+	school = "University of Wisconsin-Madison",
+	month = "May",
+	year = "2001"
+}
+
+ +
+

+Paper citation from HPDC9 +

+
+@inproceedings{
+        raman-hpdc9,
+	author = "Rajesh Raman and Miron Livny and Marvin Solomon",
+	title = "Resource Management through Multilateral Matchmaking",
+	booktitle = "Proceedings of the Ninth {IEEE} Symposium on High Performance Distributed Computing ({HPDC9})",
+	address = "Pittsburgh, PA",
+	month = "August",
+	year = "2000",
+	pages = "290--291"
+}
+
+ +
+

+Paper citation from HPDC7 +

+
+@inproceedings{
+        raman-hpdc7,
+	author = "Rajesh Raman and Miron Livny and Marvin Solomon",
+	title = "Matchmaking: Distributed Resource Management for High Throughput Computing",
+	booktitle = "Proceedings of the Seventh {IEEE} International Symposium on High Performance Distributed Computing ({HPDC7})",
+	month = "July",
+	date = "28--31",
+	year = "1998",
+	address = "Chicago, IL"
+}
+
+ +
+

+Paper citation from HPDC9 +

+
+@inproceedings{
+        basney-hpdc9,
+	author = "Jim Basney and Miron Livny",
+	title = "Managing Network Resources in {C}ondor",
+	booktitle = "Proceedings of the Ninth {IEEE} Symposium on High Performance Distributed Computing ({HPDC9})",
+	address = "Pittsburgh, PA",
+	month = "August",
+	year = "2000",
+	pages = "298--299"
+}
+
+ +
+

+Article citation from "International Journal of High Performance Computing Applications" +

+
+@article{
+        basney-goodput,
+	title = "Improving Goodput by Co-scheduling CPU and Network Capacity",
+	author = "Jim Basney and Miron Livny",
+	journal = "International Journal of High Performance Computing Applications",
+	volume = "13",
+	number = "3",
+	year = "1999",
+}
+
+ +
+

+Book chapter citation from The Grid: Blueprint for a New Computing Infrastructure +

+
+@incollection{
+        gridbook-htc,
+	author = "Miron Livny and Rajesh Raman",
+	title = "High-throughput resource management",
+	editor = "Ian Foster and Carl Kesselman",
+	booktitle = "The Grid: Blueprint for a New Computing Infrastructure",
+	publisher = "Morgan Kaufmann",
+	year = "1998"
+}
+
+ +
+

+Article citation from "Performance Evaluation" +

+
+@article{
+        mutka-pe,
+	title = "The Available Capacity of a Privately Owned Workstation Environment",
+	author = "Matt Mutka and Miron Livny",
+	journal = "Performance Evaluation",
+	volume = "12",
+	number = "4",
+	year = "1991",
+	pages = "269--284"
+}
+
+ +
+

+Paper citation from Performance '87 +

+
+@inproceedings{
+        mutka-p87,
+	author = "Matt Mutka and Miron Livny",
+	title = "Profiling Workstations' Available Capacity for Remote Execution",
+	booktitle = "Performance '87, 12th IFIP WG 7.3",
+	address = "Brussels",
+	month = "December",
+	year = "1987",
+	pages = "529--544"
+}
+
+ + +
+

+Paper citation from CHEP 2000 +

+
+@inproceedings{
+        basney-chep2000,
+	author = "Jim Basney and Miron Livny and Paolo Mazzanti",
+	title = "Harnessing the Capacity of Computational Grids for High Energy Physics",
+	booktitle = "Proceedings of the International Conference on Computing in High Energy and Nuclear Physics (CHEP 2000)",
+	address = "Padova, Italy",
+	month = "February",
+	year = "2000"
+}
+
+ +
+

+Technical report 1346 +

+
+@techreport{
+        litzkow-tr1346,
+	author = "Michael Litzkow and Todd Tannenbaum and Jim Basney and Miron Livny",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "Checkpoint and Migration of {UNIX} Processes in the {C}Ondor Distributed Processing System",
+	number = "UW-CS-TR-1346",
+	month = "April",
+	year = "1997"
+}
+
+ +
+

+Paper citation from IPPS '96 +

+
+@inproceedings{
+        pruyne-ipps96,
+	author = "Jim Pruyne and Miron Livny",
+	title = "Managing Checkpoints for Parallel Programs",
+	booktitle = "Workshop on Job Scheduling Strategies for Parallel Processing (IPPS '96)",
+	address = "Honolulu, HI",
+	month = "April",
+	year = "1996"
+}
+
+ +
+

+Paper citation from MICS 2005 +

+
+@inproceedings{
+        meehean-mics2005,
+        author = "Joe Meehean and Miron Livny",
+        title = "A Service Migration Case Study: Migrating the {C}ondor Schedd",
+        booktitle = "Midwest Instruction and Computing Symposium",
+        month = "April",
+        year = 2005
+}
+
+ +
+

+Article citation from "Computer Physics Communications" +

+
+@article{
+        basney-cpc,
+	title = "Utilizing Widely Distributed Computational Resources Efficiently with Execution Domains",
+	author = "Jim Basney and Miron Livny and Paolo Mazzanti",
+	journal = "Computer Physics Communications",
+	publisher = "Elsevier Science",
+	volume = "140",
+	number = "1-2",
+	pages = "246+",
+	month = "October",
+	year = "2001"
+}
+
+ +
+

+Article citation from Dr Dobbs Journal +

+
+@article{
+        tannenba-dobbs,
+	author = "Todd Tannenbaum and Michael Litzkow",
+	title = "Checkpointing and Migration of UNIX Processes in the {C}ondor Distributed Processing System",
+	journal = "Dr Dobbs Journal",
+	month = "February",
+	year = "1995"
+}
+
+ +
+

+Paper citation from Usenix '92 +

+
+@inproceedings{
+        litzkow-usenix,
+	author = "Michael Litzkow and Miron Livny",
+	title = "Supporting Checkpointing and Process Migration Outside the {UNIX} Kernel",
+	booktitle = "Proceedings of the Winter 1992 {USENIX} Conference",
+	address = "San Francisco, CA",
+	month = "January",
+	year = "1992",
+	pages = "283--290"
+}
+
+ +
+

+Paper citation from Grid 2007 +

+
+@inproceedings{
+        chervenak-grid2007
+	author = "Ann Chervenak and Ewa Deelman and Miron Livny and Mei-Hui Su and Rob Schuler and Shishir Bharathi and Gaurang Mehta and Karan Vahi",
+	title = "Data Placement for Scientific Applications in Distributed Environments",
+	booktitle = "Proceedings of the 8th IEEE/ACM International Conference on Grid Computing (Grid 2007)",
+	address = "Austin, TX",
+	month = "September",
+	year = "2007"
+}
+
+ +
+

+Paper citation from Euro-Par 2004 +

+
+@inproceedings{
+        profiling-europar2004,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+	title = "Profiling Grid Data Transfer Protocols and Servers",
+	booktitle = "Proceedings of Euro-Par 2004",
+	address = "Pisa, Italy",
+	month = "September",
+	year = "2004"
+}
+
+ +
+

+Paper citation from NOSSDAV 2004 +

+
+@inproceedings{
+        kola-nossdav2004,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+	title = "A Fully Automated Fault-tolerant System for Distributed Video Processing and Off-site Replication",
+	booktitle = "Proceedings of the 14th {ACM} International Workshop on Network and Operating Systems Support for Digital Audio and Video ({NOSSDAV 2004})",
+	address = "Kinsale, Ireland",
+	month = "June",
+	year = "2004"
+}
+
+ +
+

+Journal citation from Parallel and Distributed Computing Practices +

+
+@article{
+        runtime-pdcp,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+	title = "Run-time Adaptation of Grid Data-placement Jobs",
+	journal = "Parallel and Distributed Computing Practices",
+	year = "2004"
+}
+
+ +
+

+Paper citation from ISPDC 2003 +

+
+@inproceedings{
+        kosar-ispdc2003,
+	author = "Tevfik Kosar and George Kola and Miron Livny",
+	title = "A Framework for Self-optimising, Fault-tolerant, High Performance Bulk Data Transfers in a Heterogeneous Grid Environment",
+	booktitle = "Proceedings of the Second International Symposium on Parallel and Distributed Computing ({ISPDC2003})",
+	address = "Ljubljana, Slovenia",
+	month = "October",
+	year = "2003"
+}
+
+ +
+

+Paper citation from AGRIDM 2003 +

+
+@inproceedings{
+        runtime-agridm2003,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+	title = "Run-time Adaptation of Grid Data-placement Jobs",
+	booktitle = "Proceedings of the International Workshop on Adaptive Grid Middleware ({AGridM2003})",
+	address = "New Orleans, LA",
+	month = "September",
+	year = "2003"
+}
+
+ +
+

+Technical report 1483 +

+
+@techreport{
+        kosart-tr1483,
+	author = "Tevfik Kosar and Miron Livny",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "Scheduling Data Placement Activities in Grid",
+	number = "UW-CS-TR-1483",
+	month = "July",
+	year = "2003"
+}
+
+ +
+

+Paper citation from WORLDS 2005 +

+
+@inproceedings{
+        santhanam-worlds05,
+	author = "Sriya Santhanam and Pradheep Elango and Andrea Arpaci-Dusseau and Miron Livny",
+        title = "Deploying Virtual Machines as Sandboxes for the Grid",
+        booktitle = "Second Workshop on Real, Large Distributed Systems ({WORLDS 2005})",
+        address = "San Francisco, CA",
+        month = "December",
+        year = "2005"
+}
+
+ +
+

+Paper citation from 2005 UK e-Science All Hands +

+
+@inproceedings{
+	birdbath2005,
+	author = "Clovis Chapman and Charaka Goonatilake and Wolfgang Emmerich and Matthew Farrellee and Todd Tannenbaum and Miron Livny and Mark Calleja and Martin Dove",
+        title = "Condor {B}ird{B}ath: Web Service interfaces to {C}ondor",
+        booktitle = "Proceedings of 2005 {UK} e-{S}cience {A}ll {H}ands {M}eeting",
+        address = "Nottingham, UK",
+		pages = "737--744",
+        month = "September",
+        year = "2005"
+}
+
+ +
+

+Paper citation from Supercomputing 2007 +

+
+@inproceedings{
+	SC07-Iosup,
+	author = "Alexandru Iosup and Dick H.J. Epema and Todd Tannenbaum and Matthew Farrellee and Miron Livny",
+        title = "Inter-Operating Grids through Delegated MatchMaking",
+	booktitle = "Proceedings of International Conference for High Performance Computing, Networking, Storage and Analysis (SC07)",
+	address = "Reno, NV",
+	month = "November",
+	year = "2007"
+}
+
+ +
+

+Paper citation from Cluster 2005 +

+
+@inproceedings{
+	Sonny-CLUSTER2005,
+	author = "Sechang Son and Matthew Farrellee and Miron Livny",
+        title = "A Generic Proxy Mechanism for Secure Middlebox Traversal",
+	booktitle = "Proceedings of CLUSTER 2005",
+	address = "Boston, MA",
+	month = "September",
+	year = "2005"
+}
+
+ +
+

+Paper citation from UK e-Science All Hands Meeting 2005 +

+
+@inproceedings{
+	AHM2005,
+	author = "Bruce Beckles and Sechang Son and John Kewley",
+        title = "Current methods for negotiating firewalls for the Condor system",
+	booktitle = "Proceedings of the 4th UK e-Science All Hands Meeting 2005",
+	address = "Nottingham, UK",
+	month = "September",
+	year = "2005"
+}
+
+ +
+

+Paper citation from HPDC 14 +

+
+@inproceedings{
+	CODO-hpdc-2005,
+	author = "Sechang Son and  Bill Allcock and Miron Livny",
+        title = "CODO: Firewall Traversal by Cooperative On-Demand Opening",
+	booktitle = "Proceedings of the Fourteenth {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Research Triangle Park, NC",
+	month = "July",
+	year = "2005"
+}
+
+ +
+

+Paper citation from 2004 UK e-Science All Hands +

+
+@inproceedings{
+	condor-ogsa-2004,
+	author = "Clovis Chapman and  Paul Wilson and  Todd Tannenbaum and  Matthew Farrellee and  Miron Livny and  John Brodholt and Wolfgang Emmerich",
+        title = "Condor services for the global grid: interoperability between {C}ondor and {OGSA}",
+        booktitle = "Proceedings of 2004 {UK} e-{S}cience {A}ll {H}ands {M}eeting",
+        address = "Nottingham, UK",
+		pages = "870--877",
+        month = "August",
+        year = "2004"
+}
+
+ +
+

+Paper citation from Cluster 2004 +

+
+@inproceedings{
+	samgrid-cluster2004,
+	author = "Andrew Baranovski and Gabriele Garzoglio and Igor Terekhov and Alain Roy and Todd Tannenbaum",
+        title = "Management of Grid Jobs and Data within {SAMG}rid",
+        booktitle = "Proceedings of 2004 {IEEE} International Conference on Cluster Computing",
+		publisher = "{IEEE}",
+        address = "San Diego, CA",
+		pages = "353--360",
+        month = "September",
+        year = "2004"
+}
+
+ +
+

+Paper citation from Cluster 2004 +

+
+@inproceedings{
+	gridknowledgebase-cluster2004,
+	author = "George Kola and Tevfik Kosar and Miron Livny",
+    title = "A Client-centric Grid Knowledgebase",
+    booktitle = "Proceedings of 2004 {IEEE} International Conference on Cluster Computing",
+	publisher = "{IEEE}",
+    address = "San Diego, CA",
+	pages = "431--438",
+    month = "September",
+    year = "2004"
+}
+
+ +
+

+Paper citation from NSDI 2004 +

+
+@inproceedings{
+        badfs-nsdi04,
+        author = "John Bent and Douglas Thain and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+        title = "Explicit Control in a Batch Aware Distributed File System",
+        booktitle = "Proceedings of the First {USENIX/ACM} Conference on Networked Systems Design and Implementation ({NSDI})",
+        address = "San Francisco, CA",
+        month = "March",
+        year = "2004"
+}
+
+ +
+

+Paper citation from HPDC 12 +

+
+@inproceedings{
+        thain-hpdc12-sharing,
+	author = "Douglas Thain and John Bent and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	title = "Pipeline and Batch Sharing in Grid Workloads",
+	booktitle = "Proceedings of the Twelfth {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Seattle, WA",
+	month = "June",
+	year = "2003"
+}
+
+ +
+

+Paper citation from HPDC 12 +

+
+@inproceedings{
+        thain-hpdc12-ethernet,
+	author = "Douglas Thain and Miron Livny",
+	title = "The Ethernet Approach to Grid Computing",
+	booktitle = "Proceedings of the Twelfth {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Seattle, WA",
+	month = "June",
+	year = "2003"
+}
+
+ +
+

+Book chapter citation from Grid Resource Management +

+
+@incollection{
+        nest-bookchapter,
+	author = "John Bent and Venkateshwaran Venkataramani and Nick LeRoy and Alain Roy and Joseph Stanley and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	title = "{N}e{ST} - A Grid Enabled Storage Appliance",
+	editor = "Jan Weglarz and Jarek Nabrzyski and Jennifer Schopf and Macief Stroinkski",
+	booktitle = "Grid Resource Management",
+	publisher = "Kluwer Academic Publishers",
+	year = "2003"
+}
+
+ +
+

+Book chapter citation from Grid Computing: Making the Global Infrastructure a Reality +

+
+@incollection{
+        condor-grid,
+	author = "Douglas Thain and Todd Tannenbaum and Miron Livny",
+	title = "{C}ondor and the Grid",
+	editor = "Fran Berman and Geoffrey Fox and Anthony Hey",
+	booktitle = "Grid Computing: Making the Global Infrastructure a Reality",
+	publisher = "John Wiley \& Sons Inc.",
+	month = "April",
+	year = "2003"
+}
+
+ +
+

+Technical report PPDG-20 +

+
+@techreport{
+        ppdg-20,
+	author = "Francesco Giacomini and Francesco Prelz and Massimo Sgaravatto and Igor Terekhov and Gabriele Garzoglio and and Todd Tannenbaum",
+	institution = "Particle Physics Data Grid collaboration (http://www.ppdg.net)",
+	title = "Planning on the Grid: A Status Report [DRAFT]",
+	number = "PPDG-20",
+	month = "October",
+	year = "2002"
+}
+
+ +
+

+Paper citation from HPDC 11 +

+
+@inproceedings{
+        storage-hpdc11,
+	author = "John Bent and Venkateshwaran Venkataramani and Nick LeRoy and Alain Roy and Joseph Stanley and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	title = "Flexibility, Manageability, and Performance in a Grid Storage Appliance",
+	booktitle = "Proceedings of the Eleventh {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Edinburgh, Scotland",
+	month = "July",
+	year = "2002"
+}
+
+ +
+

+Paper citation from HPDC 11 +

+
+@inproceedings{
+        thain-hpdc11,
+	author = "Douglas Thain and Miron Livny",
+	title = "Error Scope on a Computational Grid: Theory and Practice",
+	booktitle = "Proceedings of the Eleventh {IEEE} Symposium on High Performance Distributed Computing",
+	address = "Edinburgh, Scotland",
+	month = "July",
+	year = "2002"
+}
+
+ +
+

+Paper citation from Supercomputing 2001 +

+
+@inproceedings{
+        thain-sc2001,
+	author = "Douglas Thain and John Bent and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	title = "Gathering at the Well: Creating Communities for Grid {I/O}",
+	booktitle = "Proceedings of Supercomputing 2001",
+	address = "Denver, Colorado",
+	month = "November",
+	year = "2001"
+}
+
+ +
+

+Journal citation from Journal of Cluster Computing +

+
+@article{
+        condor-g-jcc,
+	author = "James Frey and Todd Tannenbaum and Ian Foster and Miron Livny and Steve Tuecke",
+	title = "{C}ondor-{G}: A Computation Management Agent for Multi-Institutional Grids",
+	journal = "Cluster Computing",
+	volume = "5",
+	year = "2002",
+	pages = "237--246"
+}
+
+ +
+

+Paper citation from HPDC 10 +

+
+@inproceedings{
+        condor-g-hpdc10,
+	author = "James Frey and Todd Tannenbaum and Ian Foster and Miron Livny and Steve Tuecke",
+	title = "{C}ondor-{G}: A Computation Management Agent for Multi-Institutional Grids",
+	booktitle = "Proceedings of the Tenth {IEEE} Symposium on High Performance Distributed Computing ({HPDC})",
+	address = "San Francisco, California",
+	pages = "7--9",
+	month = "August",
+	year = "2001"
+}
+
+ +
+

+Paper citation from HPDC 10 +

+
+@inproceedings{
+        kangaroo-hpdc10,
+        author = "Douglas Thain and Jim Basney and Se-Chang Son and Miron Livny",
+	title  = "The {K}angaroo Approach to Data Movement on the Grid",
+	booktitle = "Proceedings of the Tenth (IEEE} Symposium on High Performance Distributed Computing ({HPDC10})",
+	address = "San Francisco, CA",
+	month = "August",
+	date = "7--9",
+	year = "2001"
+}
+
+ +
+

+Masters' Project report of John Bent +

+
+@mastersthesis{
+        bent-mr,
+	author = "John Bent",
+	title = "Building Storage Appliances for the Grid and Beyond",
+	school = "University of Wisconsin-Madison",
+	month = "May",
+	year = "2001"
+}
+
+ +
+

+Paper citation from CCGRID 2003 +

+
+@inproceedings{
+        son-ccgrid03,
+        author = "Sechang Son and Miron Livny",
+	title  = "Recovering Internet Symmetry in Distributed Computing",
+	booktitle = "Proceedings of the 3rd International Symposium on Cluster Computing and the Grid",
+	address = "Tokyo, Japan",
+	month = "May",
+	year = "2003"
+}
+
+ +
+

+Paper citation from GRID 2000 +

+
+@inproceedings{
+        mw-grid2000,
+        author = " Elisa Heymann and Miquel A. Senar and Emilio Luque and and Miron Livny",
+	title  = "Adaptive Scheduling for Master-Worker Applications on the Computational Grid",
+	booktitle = "Proceedings of the First IEEE/ACM International Workshop on Grid Computing (GRID 2000)",
+	address = "Bangalore, India",
+	month = "December",
+	year = "2000"
+}
+
+ +
+

+Paper citation from HiPC 2000 +

+
+@inproceedings{
+        mw-hipc2000,
+        author = " Elisa Heymann and Miquel A. Senar and Emilio Luque and and Miron Livny",
+	title  = "Evaluation of an Adaptive Scheduling Strategy for Master-Worker Applications on Clusters of Workstations",
+	booktitle = "Proceedings of the 7th International Conference on High Performance Computing (HiPC 2000)",
+	address = "Bangalore, India",
+	month = "December",
+	year = "2000"
+}
+
+ +
+

+Paper citation from HPDC 9 +

+
+@inproceedings{
+        mw-hpdc9,
+	author = "Jeff Linderoth and Sanjeev Kulkarni and Jean-Pierre Goux and Michael Yoder",
+	title = "An Enabling Framework for Master-Worker Applications on the Computational Grid",
+	booktitle = "Proceedings of the Ninth {IEEE} Symposium on High Performance Distributed Computing ({HPDC9})",
+	address = "Pittsburgh, PA",
+	pages = "43--50",
+	month = "August",
+	year = "2000"
+}
+
+ +
+

+Technical report ANL/MCS-P792-0200 +

+
+@techreport{
+        mw-tr,
+	author = "Jeff Linderoth and Jean-Pierre Goux and Michael Yoder",
+	institution = "Mathematics and Computer Science Division, Argonne National Laboratory",
+	title = "Metacomputing and the Master-Worker Paradigm",
+	number = "ANL/MCS-P792-0200",
+	month = "February",
+	year = "2000"
+}
+
+ +
+

+Paper citation from the Second Workshop on Environments and Tools for Parallel Scientific Computing +

+
+@inproceedings{
+        pruyne94,
+	author = "Jim Pruyne and Miron Livny",
+	title = "Providing Resource Management Services to Parallel Applications",
+	booktitle = "Proceedings of the Second Workshop on Environments and Tools for Parallel Scientific Computing",
+	address = "Townsend, TN",
+	month = "May",
+	year = "1994"
+}
+
+ +
+

+Paper citation from the ACM Conference on JavaGrande +

+
+@inproceedings{
+        javagenes,
+        author = "Al Globus and Eric Langhirt and Miron Livny and Ravishankar Ramamurthy and Marvin Solomon and Steve Traugott",
+        title = "{J}ava{G}enes and {C}ondor: Cycle-Scavenging Genetic Algorithms",
+	booktitle = "Proceedings of the {ACM} Conference on {J}ava {G}rande",
+	address = "San Francisco, CA",
+	year = "2000",
+	pages = "134--139"
+}
+
+ +
+

+Paper citation from the Journal of Supercomputing +

+
+@article{
+        parrot-jsc,
+        author = "Douglas Thain and Sander Klous and Miron Livny",
+        title = "Deploying Complex Applications in Unfriendly Systems with Parrot",
+	journal = "Journal of Supercomputing",
+	year = "2004"
+}
+
+ +
+

+Paper citation from the Journal of Parallel and Distributed Computing Practices +

+
+@article{
+        parrot-jpdcp,
+        author = "Douglas Thain and Miron Livny",
+        title = "Parrot: An Application Environment for Data-Intensive Computing",
+	journal = "Journal of Parallel and Distributed Computing Practices",
+	year = "2004"
+}
+
+ + +
+

+Paper citation from the Workshop on Adaptive Grid Middleware +

+
+@inproceedings{
+	parrot-agm2003,
+	author = "Douglas Thain and Miron Livny",
+	title = "Parrot: Transparent User-Level Middleware for Data-Intensive Computing",
+	booktitle = "Workshop on Adaptive Grid Middleware",
+	address = "New Orleans, Louisiana",
+	month = "September",
+	year = "2003"
+}
+
+
+

+Technical report 1448 +

+
+@techreport{
+        thain-tr1448,
+	author = "Douglas Thain and Miron Livny",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "Error Management in the Pluggable File System",
+	number = "UW-CS-TR-1448",
+	month = "October",
+	year = "2002"
+}
+
+ +
+

+Journal citation from Journal of Cluster Computing +

+
+@article{
+        bypass-jcc,
+	author = "Douglas Thain and Miron Livny",
+	title = "Multiple Bypass: Interposition Agents for Distributed Computing",
+	journal = "Journal of Cluster Computing",
+	volume = "5",
+	year = "2001",
+	pages = "39--47"
+}
+
+ +
+

+Paper citation from HPDC 9 +

+
+@inproceedings{
+        bypass-hpdc9,
+	author = "Douglas Thain and Miron Livny",
+	title = "Bypass: A Tool for Building Split Execution Systems",
+	booktitle = "Proceedings of the Ninth {IEEE} Symposium on High Performance Distributed Computing ({HPDC9})",
+	address = "Pittsburgh, PA",
+	pages = "79--85",
+	month = "August",
+	year = "2000"
+}
+
+ +
+

+Paper citation from HPDC 8 +

+
+@inproceedings{
+        hijack-hpdc8,
+	author = "Victor C. Zandy and Barton P. Miller and Miron Livny",
+	title = "Process Hijacking",
+	booktitle = "Proceedings of the 8th {IEEE} Symposium on High Performance Distributed Computing ({HPDC8})",
+	address = "Redondo Beach, CA",
+	pages = "177--184",
+	month = "August",
+	year = "1999"
+}
+
+ +
+

+Paper citation from the 3rd Workshop on Workstation Operating Systems +

+
+@inproceedings{
+        batch-friendly,
+	author = "Miron Livny and Michael Litzkow",
+	title = "Making Workstations a Friendly Environment for Batch Jobs",
+	booktitle = "Third {IEEE} Workshop on Workstation Operating Systems",
+	month = "April",
+	year = "1992",
+	address = "Key Biscayne, FL"
+}
+
+ +
+

+Paper citation from Journal of Physics +

+
+@article{1742-6596-219-4-042017,
+  author={Zach Miller and Dan Bradley and Todd Tannenbaum and Igor Sfiligoi},
+  title={Flexible session management in a distributed environment},
+  journal={Journal of Physics: Conference Series},
+  volume={219},
+  number={4},
+  pages={042017},
+  url={http://stacks.iop.org/1742-6596/219/i=4/a=042017},
+  year={2010}
+}
+
+ +
+

+Paper citation for CHEP10 +

+
+@article{1742-6596-331-6-062002,
+  author={D Bradley and T St Clair and M Farrellee and Z Guo and M Livny and I Sfiligoi and T Tannenbaum},
+  title={An update on the scalability limits of the Condor batch system},
+  journal={Journal of Physics: Conference Series},
+  volume={331},
+  number={6},
+  pages={062002},
+  url={http://stacks.iop.org/1742-6596/331/i=6/a=062002},
+  year={2011},
+}
+
+ +
+

+Paper citation for CHEP09 +

+
+@article{1742-6596-219-6-062035,
+  author={D Bradley and D Dasu and M Livny and A Mohapatra and T Tannenbaum and G Thain},
+  title={Condor enhancements for a rapid-response adaptive computing environment for LHC},
+  journal={Journal of Physics: Conference Series},
+  volume={219},
+  number={6},
+  pages={062035},
+  url={http://iopscience.iop.org/1742-6596/219/6/062035/pdf/1742-6596_219_6_062035.pdf},
+  year={2010},
+}
+
+ +
+

+Paper citation from POLICY 2004 +

+
+@inproceedings{
+        wang-security,
+	author = "Hao Wang and Somesh Jha and Miron Livny and Patrick D. McDaniel",
+	title = "Security Policy Reconciliation in Distributed Computing Environments",
+	booktitle = "Proceedings of the 5th {IEEE} International Workshop on Policies for Distributed
+	Systems and Networks({POLICY2004})",
+	month = "June",
+	year = "2004",
+	address = "Yorktown Heights, NY"
+}
+
+ + +
+

+Technical report 1499 +

+
+@techreport{
+        wang-tr1499,
+	author = "Hao Wang and Somesh Jha and Miron Livny and Patrick D. McDaniel",
+	title = "Security Policy Reconciliation in Distributed Computing Environments",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	number = "UW-CS-TR-1499",
+	month = "March",
+	year = "2004"
+}
+
+ +
+

+Paper citation from the Workshop on Experimental Distributed Systems +

+
+@inproceedings{
+        condor-experience,
+	author = "Michael Litzkow and Miron Livny",
+	title = "Experience with the {C}ondor Distributed Batch System",
+	booktitle = "Proceedings of the {IEEE} Workshop on Experimental Distributed Systems",
+	month = "October",
+	year = "1990",
+	address = "Huntsville, AL"
+}
+
+ +
+

+Technical report 1464 +

+
+@techreport{
+        thain-tr1464,
+	author = "Douglas Thain and John Bent and Andrea Arpaci-Dusseau and Remzi Arpaci-Dusseau and Miron Livny",
+	institution = "University of Wisconsin - Madison Computer Sciences Department",
+	title = "The Architectural Implications of Pipeline and Batch Sharing in Scientific Workloads",
+	number = "UW-CS-TR-1464",
+	month = "January",
+	year = "2003"
+}
+
+ +
+

+Paper citation from the 9th SIAM Conference on Parallel Processing for Scientific Computing +

+
+@inproceedings{
+        ht-montecarlo,
+	author = "Jim Basney and Rajesh Raman and Miron Livny",
+	title = "High Throughput Monte Carlo",
+	booktitle = "Proceedings of the Ninth SIAM Conference on Parallel Processing for Scientific Computing",
+	month = "March",
+	year = "1999",
+	address = "San Antonio, TX"
+}
+
+ +
+

+Paper citation from the 16th ICDCS +

+
+@inproceedings{
+        dbc-dcs16,
+	author = "Chung-min Chen and Kenneth Salem and Miron Livny",
+	title = "The {DBC}: Processing Scientific Data Over the Internet",
+	booktitle = "16th International Conference on Distributed Computing Systems",
+	pages = "673-682",
+	month = "May",
+	year = "1996",
+	address = "Hong Kong"
+}
+
+ +
+

+Paper citation from HPDC 15 +

+
+@inproceedings
+{
+   hpdc15-silberstein,
+   author = "Mark Silberstein and Dan Geiger and Assaf Schuster and Miron Livny",
+   title = "Scheduling Mixed Workloads in Multi-grids: The Grid Execution Hierarchy",
+   booktitle = "Proceedings of the 15th {IEEE} Symposium on High Performance Distributed Computing ({HPDC-15})",
+   address = "Paris, France",
+   month = "June",
+   year = "2006"
+}
+
+ +
+

+Paper citation from "Proceedings of the Linux Clusters: The HPC Revolution conference" +

+
+@inproceedings
+{
+   wright2001cheap-cycles,
+   author = "Derek Wright",
+   title = "Cheap cycles from the desktop to the dedicated cluster: combining opportunistic and dedicated scheduling with {C}ondor",
+   booktitle = "Proceedings of the {L}inux Clusters: The HPC Revolution conference"
+   month = "June",
+   year = "2001",
+   address = "Champaign - Urbana, IL"
+}
+
+ +
+

+Paper citation from the 8th ICDCS +

+
+@inproceedings{
+        krueger-icdcs8,
+	author = "Phillip Krueger and Miron Livny",
+	title = "A Comparison of Preemptive and Non-Preemptive Load Distributing",
+	booktitle = "8th International Conference on Distributed Computing Systems",
+	month = "June",
+	year = "1988",
+	address = "San Jose, CA",
+	pages = "123--130"
+}
+
+ +
+

+Paper citation from the 7th ICDCS +

+
+@inproceedings{
+        mutka-icdcs7,
+	author = "Matt Mutka and Miron Livny",
+	title = "Scheduling Remote Processing Capacity In A Workstation-Processing Bank Computing System",
+	booktitle = "7th International Conference on Distributed Computing Systems",
+	month = "September",
+	year = "1987",
+	address = "Berlin, Germany",
+	pages = "2--9"
+}
+
+ +
+

+Paper citation from the 2006 USENIX Large Installation Systems Administration Conference +

+
+@inproceedings{
+        nmi-lisa2006,
+	author = "Andrew Pavlo and Peter Couvares and Rebekah Gietzel and Anatoly Karp and Ian D. Alderman and Miron Livny and Charles Bacon",
+	title = "The {NMI} {B}uild \& {T}est {L}aboratory: Continuous Integration Framework for Distributed Computing Software",
+	booktitle = "Proceedings of {LISA} '06: Twentieth Systems Administration Conference",
+        pages = "263--273",
+	month = "December",
+	year = "2006",
+	address = "Washington, DC",
+}
+
+ +
+

+Paper citation from CCGRID 2007 +

+
+@inproceedings{
+        nmi-ccgrid07,
+        author = {Iosup, Alexandru and Epema, Dick and Couvares, Peter and Karp, Anatoly and Livny, Miron},
+        title = {Build-and-Test Workloads for Grid Middleware: Problem, Analysis, and Applications},
+        booktitle = {Seventh IEEE International Symposium on Cluster Computing and the Grid (CCGRID)},
+        month = {May},
+        year = {2007},
+        pages = {205--213},
+        doi = {10.1109/CCGRID.2007.29}
+}
+
+ +
+

+Paper citation from the 1982 Computer Network Performance Symposium +

+ +
+@inproceedings{
+        livny-melman-82,
+	author = "Miron Livny and Myron Melman",
+	title = "Load Balancing in Homogeneous Broadcast Distributed Systems",
+	booktitle = "Proceedings of the Computer Network Performance Symposium",
+	month = "April",
+	year = "1982",
+	address = "College Park, Maryland",
+}
+
+
+

+PhD Thesis of Miron Livny +

+
+@phdthesis{
+        livny-thesis,
+	author = "Miron Livny",
+	title = "The Study of Load Balancing Algorithms for Decentralized Distributed Processing Systems",
+	school = "Weizmann Institute of Science",
+	month = "August",
+	year = "1983
+}
+
+
+

+PhD Thesis of Matt Walter Mutka +

+
+@phdthesis{
+        mutka-thesis,
+	author = "Matt Walter Mutka",
+	title = "Sharing In A Privately Owned Workstation Environment",
+	school = "University of Wisconsin-Madison",
+	year = "1988"
+}
+
+
+

+PhD Thesis of James C. Pruyne +

+
+@phdthesis{
+        pruyne-thesis,
+	author = "James C. Pruyne",
+	title = "Resource Management Services for Parallel Applications",
+	school = "University of Wisconsin-Madison",
+	year = "1996"
+}
+
+
+

+PhD Thesis of Rajesh Raman +

+
+@phdthesis{
+        raman-thesis,
+	author = "Rajesh Raman",
+	title = "Matchmaking Frameworks for Distributed Resource Management",
+	school = "University of Wisconsin-Madison",
+	month = "October",
+	year = "2000"
+}
+
+
+

+PhD Thesis of Jim Basney +

+
+@phdthesis{
+        basney-thesis,
+	author = "Jim Basney",
+	title = "Network and CPU Co-Allocation in High Throughput Computing Environments",
+	school = "University of Wisconsin-Madison",
+	year = "2001"
+}
+
+
+

+PhD Thesis of Douglas Thain +

+
+@phdthesis{
+	thain-thesis,
+	author = "Douglas Thain",
+	title = "Coordinating Access to Computation and Data in Distributed Systems",
+	school = "University of Wisconsin-Madison",
+	year = "2004"
+}
+
+
+

+PhD Thesis of Tevfik Kosar +

+
+@phdthesis{
+	kosar-thesis,
+	author = "Tevfik Kosar",
+	title = "Data Placement in Widely Distributed Systems",
+	school = "University of Wisconsin-Madison",
+	year = "2005"
+}
+
+
+

+PhD Thesis of Sechang Son +

+
+@phdthesis{
+	sonny-thesis,
+	author = "Sechang Son",
+	title = "Middleware Approaches to Middlebox Traversal",
+	school = "University of Wisconsin-Madison",
+	year = "2006"
+}
+
+
+

+PhD Thesis of Nicholas Coleman +

+
+@phdthesis{
+    coleman-thesis,
+    author = "Nicholas Coleman",
+    title = "A Matchmaking Approach to Distributed Policy Specification and Interpretation",
+    school = "University of Wisconsin-Madison",
+    month = "August",
+    year = "2007"
+}
+
+
+

+PhD Thesis of Ian Alderman +

+
+@phdthesis{
+    coleman-thesis,
+    author = "Ian D. Alderman",
+    title = "A Security Framework for Distributed Batch Computing",
+    school = "University of Wisconsin-Madison",
+    month = "April",
+    year = "2010"
+}
+
+
diff --git a/preview-fall2024-info/doc/condor-hunter.pdf b/preview-fall2024-info/doc/condor-hunter.pdf new file mode 100644 index 000000000..38ccb703e Binary files /dev/null and b/preview-fall2024-info/doc/condor-hunter.pdf differ diff --git a/preview-fall2024-info/doc/condor-ogsa-2004.pdf b/preview-fall2024-info/doc/condor-ogsa-2004.pdf new file mode 100644 index 000000000..fe87f8c5c Binary files /dev/null and b/preview-fall2024-info/doc/condor-ogsa-2004.pdf differ diff --git a/preview-fall2024-info/doc/condor-practice.pdf b/preview-fall2024-info/doc/condor-practice.pdf new file mode 100644 index 000000000..401fe2161 Binary files /dev/null and b/preview-fall2024-info/doc/condor-practice.pdf differ diff --git a/preview-fall2024-info/doc/condor1.gif b/preview-fall2024-info/doc/condor1.gif new file mode 100644 index 000000000..2b8ce6969 Binary files /dev/null and b/preview-fall2024-info/doc/condor1.gif differ diff --git a/preview-fall2024-info/doc/condor2.gif b/preview-fall2024-info/doc/condor2.gif new file mode 100644 index 000000000..c368a2d94 Binary files /dev/null and b/preview-fall2024-info/doc/condor2.gif differ diff --git a/preview-fall2024-info/doc/condor3.gif b/preview-fall2024-info/doc/condor3.gif new file mode 100644 index 000000000..418608e90 Binary files /dev/null and b/preview-fall2024-info/doc/condor3.gif differ diff --git a/preview-fall2024-info/doc/condor3.ps b/preview-fall2024-info/doc/condor3.ps new file mode 100644 index 000000000..8b1149fcb Binary files /dev/null and b/preview-fall2024-info/doc/condor3.ps differ diff --git a/preview-fall2024-info/doc/condor_preemptive_scheduling_2003.pdf b/preview-fall2024-info/doc/condor_preemptive_scheduling_2003.pdf new file mode 100644 index 000000000..2b10a480e Binary files /dev/null and b/preview-fall2024-info/doc/condor_preemptive_scheduling_2003.pdf differ diff --git a/preview-fall2024-info/doc/condor_pvm_framework.ps b/preview-fall2024-info/doc/condor_pvm_framework.ps new file mode 100644 index 000000000..f7670e2f3 Binary files /dev/null and b/preview-fall2024-info/doc/condor_pvm_framework.ps differ diff --git a/preview-fall2024-info/doc/condorg-hpdc10.doc b/preview-fall2024-info/doc/condorg-hpdc10.doc new file mode 100644 index 000000000..f67807495 Binary files /dev/null and b/preview-fall2024-info/doc/condorg-hpdc10.doc differ diff --git a/preview-fall2024-info/doc/condorg-hpdc10.pdf b/preview-fall2024-info/doc/condorg-hpdc10.pdf new file mode 100644 index 000000000..adfdf3ed0 Binary files /dev/null and b/preview-fall2024-info/doc/condorg-hpdc10.pdf differ diff --git a/preview-fall2024-info/doc/condorg-hpdc10.ps b/preview-fall2024-info/doc/condorg-hpdc10.ps new file mode 100644 index 000000000..6ed9e4afb Binary files /dev/null and b/preview-fall2024-info/doc/condorg-hpdc10.ps differ diff --git a/preview-fall2024-info/doc/condorgrid.pdf b/preview-fall2024-info/doc/condorgrid.pdf new file mode 100644 index 000000000..4c4a9cd7b Binary files /dev/null and b/preview-fall2024-info/doc/condorgrid.pdf differ diff --git a/preview-fall2024-info/doc/condorgrid.ps b/preview-fall2024-info/doc/condorgrid.ps new file mode 100644 index 000000000..992172da8 Binary files /dev/null and b/preview-fall2024-info/doc/condorgrid.ps differ diff --git a/preview-fall2024-info/doc/cpc.pdf b/preview-fall2024-info/doc/cpc.pdf new file mode 100644 index 000000000..b586ab1a9 Binary files /dev/null and b/preview-fall2024-info/doc/cpc.pdf differ diff --git a/preview-fall2024-info/doc/cpc.ps b/preview-fall2024-info/doc/cpc.ps new file mode 100644 index 000000000..6f00bae1f Binary files /dev/null and b/preview-fall2024-info/doc/cpc.ps differ diff --git a/preview-fall2024-info/doc/dbc-dcs16.pdf b/preview-fall2024-info/doc/dbc-dcs16.pdf new file mode 100644 index 000000000..905b29324 Binary files /dev/null and b/preview-fall2024-info/doc/dbc-dcs16.pdf differ diff --git a/preview-fall2024-info/doc/dbc-dcs16.ps b/preview-fall2024-info/doc/dbc-dcs16.ps new file mode 100644 index 000000000..c438235ae Binary files /dev/null and b/preview-fall2024-info/doc/dbc-dcs16.ps differ diff --git a/preview-fall2024-info/doc/disc-worlds2004.pdf b/preview-fall2024-info/doc/disc-worlds2004.pdf new file mode 100644 index 000000000..9f3ed2a37 Binary files /dev/null and b/preview-fall2024-info/doc/disc-worlds2004.pdf differ diff --git a/preview-fall2024-info/doc/disc-worlds2004.ps b/preview-fall2024-info/doc/disc-worlds2004.ps new file mode 100644 index 000000000..cf4103fbd Binary files /dev/null and b/preview-fall2024-info/doc/disc-worlds2004.ps differ diff --git a/preview-fall2024-info/doc/distributed_policy_2012.pdf b/preview-fall2024-info/doc/distributed_policy_2012.pdf new file mode 100644 index 000000000..841489703 Binary files /dev/null and b/preview-fall2024-info/doc/distributed_policy_2012.pdf differ diff --git a/preview-fall2024-info/doc/dobbs_95.ps b/preview-fall2024-info/doc/dobbs_95.ps new file mode 100644 index 000000000..f40aa6b1d Binary files /dev/null and b/preview-fall2024-info/doc/dobbs_95.ps differ diff --git a/preview-fall2024-info/doc/epjconf_chep2021_02020.pdf b/preview-fall2024-info/doc/epjconf_chep2021_02020.pdf new file mode 100644 index 000000000..16a2a228f Binary files /dev/null and b/preview-fall2024-info/doc/epjconf_chep2021_02020.pdf differ diff --git a/preview-fall2024-info/doc/error-scope.pdf b/preview-fall2024-info/doc/error-scope.pdf new file mode 100644 index 000000000..b29f3d050 Binary files /dev/null and b/preview-fall2024-info/doc/error-scope.pdf differ diff --git a/preview-fall2024-info/doc/error-scope.ps b/preview-fall2024-info/doc/error-scope.ps new file mode 100644 index 000000000..b832842a9 Binary files /dev/null and b/preview-fall2024-info/doc/error-scope.ps differ diff --git a/preview-fall2024-info/doc/ethernet-hpdc12.pdf b/preview-fall2024-info/doc/ethernet-hpdc12.pdf new file mode 100644 index 000000000..65d5599ae Binary files /dev/null and b/preview-fall2024-info/doc/ethernet-hpdc12.pdf differ diff --git a/preview-fall2024-info/doc/ethernet-hpdc12.ps b/preview-fall2024-info/doc/ethernet-hpdc12.ps new file mode 100644 index 000000000..415ec4836 Binary files /dev/null and b/preview-fall2024-info/doc/ethernet-hpdc12.ps differ diff --git a/preview-fall2024-info/doc/exec-domains.pdf b/preview-fall2024-info/doc/exec-domains.pdf new file mode 100644 index 000000000..aa467dfa6 Binary files /dev/null and b/preview-fall2024-info/doc/exec-domains.pdf differ diff --git a/preview-fall2024-info/doc/exec-domains.ps b/preview-fall2024-info/doc/exec-domains.ps new file mode 100644 index 000000000..20e5027fa Binary files /dev/null and b/preview-fall2024-info/doc/exec-domains.ps differ diff --git a/preview-fall2024-info/doc/experience.pdf b/preview-fall2024-info/doc/experience.pdf new file mode 100644 index 000000000..4a477f2a6 Binary files /dev/null and b/preview-fall2024-info/doc/experience.pdf differ diff --git a/preview-fall2024-info/doc/experience.ps b/preview-fall2024-info/doc/experience.ps new file mode 100644 index 000000000..b45e69d22 Binary files /dev/null and b/preview-fall2024-info/doc/experience.ps differ diff --git a/preview-fall2024-info/doc/flexible_sessions.pdf b/preview-fall2024-info/doc/flexible_sessions.pdf new file mode 100644 index 000000000..8b30016cb Binary files /dev/null and b/preview-fall2024-info/doc/flexible_sessions.pdf differ diff --git a/preview-fall2024-info/doc/flock.pdf b/preview-fall2024-info/doc/flock.pdf new file mode 100644 index 000000000..6bdb62b9d Binary files /dev/null and b/preview-fall2024-info/doc/flock.pdf differ diff --git a/preview-fall2024-info/doc/flock.ps b/preview-fall2024-info/doc/flock.ps new file mode 100644 index 000000000..aaede83e3 Binary files /dev/null and b/preview-fall2024-info/doc/flock.ps differ diff --git a/preview-fall2024-info/doc/friendly-wos3.pdf b/preview-fall2024-info/doc/friendly-wos3.pdf new file mode 100644 index 000000000..bfbb988e2 Binary files /dev/null and b/preview-fall2024-info/doc/friendly-wos3.pdf differ diff --git a/preview-fall2024-info/doc/friendly-wos3.ps b/preview-fall2024-info/doc/friendly-wos3.ps new file mode 100644 index 000000000..8ee491937 Binary files /dev/null and b/preview-fall2024-info/doc/friendly-wos3.ps differ diff --git a/preview-fall2024-info/doc/gangmatching-hpdc12.pdf b/preview-fall2024-info/doc/gangmatching-hpdc12.pdf new file mode 100644 index 000000000..e19bd47c2 Binary files /dev/null and b/preview-fall2024-info/doc/gangmatching-hpdc12.pdf differ diff --git a/preview-fall2024-info/doc/gangmatching-hpdc12.ps b/preview-fall2024-info/doc/gangmatching-hpdc12.ps new file mode 100644 index 000000000..f36d844dd Binary files /dev/null and b/preview-fall2024-info/doc/gangmatching-hpdc12.ps differ diff --git a/preview-fall2024-info/doc/gangmatching.pdf b/preview-fall2024-info/doc/gangmatching.pdf new file mode 100644 index 000000000..4e752d9de Binary files /dev/null and b/preview-fall2024-info/doc/gangmatching.pdf differ diff --git a/preview-fall2024-info/doc/gangmatching.ps b/preview-fall2024-info/doc/gangmatching.ps new file mode 100644 index 000000000..7756d92eb Binary files /dev/null and b/preview-fall2024-info/doc/gangmatching.ps differ diff --git a/preview-fall2024-info/doc/glideinWMS-JPhysics-2012.pdf b/preview-fall2024-info/doc/glideinWMS-JPhysics-2012.pdf new file mode 100644 index 000000000..51783a386 Binary files /dev/null and b/preview-fall2024-info/doc/glideinWMS-JPhysics-2012.pdf differ diff --git a/preview-fall2024-info/doc/globus-online-JPhysics-2012.pdf b/preview-fall2024-info/doc/globus-online-JPhysics-2012.pdf new file mode 100644 index 000000000..bdfd9fb89 Binary files /dev/null and b/preview-fall2024-info/doc/globus-online-JPhysics-2012.pdf differ diff --git a/preview-fall2024-info/doc/globus-online-SciDAC-2011.pdf b/preview-fall2024-info/doc/globus-online-SciDAC-2011.pdf new file mode 100644 index 000000000..1394db64c Binary files /dev/null and b/preview-fall2024-info/doc/globus-online-SciDAC-2011.pdf differ diff --git a/preview-fall2024-info/doc/goodput.pdf b/preview-fall2024-info/doc/goodput.pdf new file mode 100644 index 000000000..3b7bb3441 Binary files /dev/null and b/preview-fall2024-info/doc/goodput.pdf differ diff --git a/preview-fall2024-info/doc/goodput.ps b/preview-fall2024-info/doc/goodput.ps new file mode 100644 index 000000000..e87139b42 Binary files /dev/null and b/preview-fall2024-info/doc/goodput.ps differ diff --git a/preview-fall2024-info/doc/grid-08-pilot-accounting.pdf b/preview-fall2024-info/doc/grid-08-pilot-accounting.pdf new file mode 100644 index 000000000..8ab43a95c Binary files /dev/null and b/preview-fall2024-info/doc/grid-08-pilot-accounting.pdf differ diff --git a/preview-fall2024-info/doc/grid-planning.pdf b/preview-fall2024-info/doc/grid-planning.pdf new file mode 100644 index 000000000..9576b8a74 Binary files /dev/null and b/preview-fall2024-info/doc/grid-planning.pdf differ diff --git a/preview-fall2024-info/doc/grid-planning.ps b/preview-fall2024-info/doc/grid-planning.ps new file mode 100644 index 000000000..fe91e095e Binary files /dev/null and b/preview-fall2024-info/doc/grid-planning.ps differ diff --git a/preview-fall2024-info/doc/grid-storage.pdf b/preview-fall2024-info/doc/grid-storage.pdf new file mode 100644 index 000000000..e69de29bb diff --git a/preview-fall2024-info/doc/grid-storage.ps b/preview-fall2024-info/doc/grid-storage.ps new file mode 100644 index 000000000..e69de29bb diff --git a/preview-fall2024-info/doc/grid2-ch19.pdf b/preview-fall2024-info/doc/grid2-ch19.pdf new file mode 100644 index 000000000..637fc2c48 Binary files /dev/null and b/preview-fall2024-info/doc/grid2-ch19.pdf differ diff --git a/preview-fall2024-info/doc/grid2007.pdf b/preview-fall2024-info/doc/grid2007.pdf new file mode 100644 index 000000000..959fc4ffa Binary files /dev/null and b/preview-fall2024-info/doc/grid2007.pdf differ diff --git a/preview-fall2024-info/doc/gridknowledgebase.pdf b/preview-fall2024-info/doc/gridknowledgebase.pdf new file mode 100644 index 000000000..cc3afafe2 Binary files /dev/null and b/preview-fall2024-info/doc/gridknowledgebase.pdf differ diff --git a/preview-fall2024-info/doc/gridknowledgebase.ps b/preview-fall2024-info/doc/gridknowledgebase.ps new file mode 100644 index 000000000..183672682 Binary files /dev/null and b/preview-fall2024-info/doc/gridknowledgebase.ps differ diff --git a/preview-fall2024-info/doc/hijack-hpdc8.pdf b/preview-fall2024-info/doc/hijack-hpdc8.pdf new file mode 100644 index 000000000..07701b9bc Binary files /dev/null and b/preview-fall2024-info/doc/hijack-hpdc8.pdf differ diff --git a/preview-fall2024-info/doc/hijack-hpdc8.ps b/preview-fall2024-info/doc/hijack-hpdc8.ps new file mode 100644 index 000000000..4d91ad290 Binary files /dev/null and b/preview-fall2024-info/doc/hijack-hpdc8.ps differ diff --git a/preview-fall2024-info/doc/hpcc-chapter.pdf b/preview-fall2024-info/doc/hpcc-chapter.pdf new file mode 100644 index 000000000..0afcd7506 Binary files /dev/null and b/preview-fall2024-info/doc/hpcc-chapter.pdf differ diff --git a/preview-fall2024-info/doc/hpcc-chapter.ps b/preview-fall2024-info/doc/hpcc-chapter.ps new file mode 100644 index 000000000..5524c85f2 Binary files /dev/null and b/preview-fall2024-info/doc/hpcc-chapter.ps differ diff --git a/preview-fall2024-info/doc/hpdc-chapter.pdf b/preview-fall2024-info/doc/hpdc-chapter.pdf new file mode 100644 index 000000000..bea228c1e Binary files /dev/null and b/preview-fall2024-info/doc/hpdc-chapter.pdf differ diff --git a/preview-fall2024-info/doc/hpdc-chapter.ps b/preview-fall2024-info/doc/hpdc-chapter.ps new file mode 100644 index 000000000..81dc23cfd Binary files /dev/null and b/preview-fall2024-info/doc/hpdc-chapter.ps differ diff --git a/preview-fall2024-info/doc/hpdc-paper.ps b/preview-fall2024-info/doc/hpdc-paper.ps new file mode 100644 index 000000000..ed093bfb6 Binary files /dev/null and b/preview-fall2024-info/doc/hpdc-paper.ps differ diff --git a/preview-fall2024-info/doc/hpdc03-ncoleman.pdf b/preview-fall2024-info/doc/hpdc03-ncoleman.pdf new file mode 100644 index 000000000..95e2d1306 Binary files /dev/null and b/preview-fall2024-info/doc/hpdc03-ncoleman.pdf differ diff --git a/preview-fall2024-info/doc/hpdc03-ncoleman.ps b/preview-fall2024-info/doc/hpdc03-ncoleman.ps new file mode 100644 index 000000000..1ee1974f1 Binary files /dev/null and b/preview-fall2024-info/doc/hpdc03-ncoleman.ps differ diff --git a/preview-fall2024-info/doc/hpdc98.pdf b/preview-fall2024-info/doc/hpdc98.pdf new file mode 100644 index 000000000..f00d2a5dd Binary files /dev/null and b/preview-fall2024-info/doc/hpdc98.pdf differ diff --git a/preview-fall2024-info/doc/hpdc98.ps b/preview-fall2024-info/doc/hpdc98.ps new file mode 100644 index 000000000..843d62015 Binary files /dev/null and b/preview-fall2024-info/doc/hpdc98.ps differ diff --git a/preview-fall2024-info/doc/htc_mech.pdf b/preview-fall2024-info/doc/htc_mech.pdf new file mode 100644 index 000000000..140e60000 Binary files /dev/null and b/preview-fall2024-info/doc/htc_mech.pdf differ diff --git a/preview-fall2024-info/doc/htc_mech.ps b/preview-fall2024-info/doc/htc_mech.ps new file mode 100644 index 000000000..75f8237b8 Binary files /dev/null and b/preview-fall2024-info/doc/htc_mech.ps differ diff --git a/preview-fall2024-info/doc/htmc-siam9.pdf b/preview-fall2024-info/doc/htmc-siam9.pdf new file mode 100644 index 000000000..90d0ec2fa Binary files /dev/null and b/preview-fall2024-info/doc/htmc-siam9.pdf differ diff --git a/preview-fall2024-info/doc/htmc-siam9.ps b/preview-fall2024-info/doc/htmc-siam9.ps new file mode 100644 index 000000000..e88c54e00 Binary files /dev/null and b/preview-fall2024-info/doc/htmc-siam9.ps differ diff --git a/preview-fall2024-info/doc/icdcs1988-2.pdf b/preview-fall2024-info/doc/icdcs1988-2.pdf new file mode 100644 index 000000000..4adc3b271 Binary files /dev/null and b/preview-fall2024-info/doc/icdcs1988-2.pdf differ diff --git a/preview-fall2024-info/doc/icdcs1988.pdf b/preview-fall2024-info/doc/icdcs1988.pdf new file mode 100644 index 000000000..38ccb703e Binary files /dev/null and b/preview-fall2024-info/doc/icdcs1988.pdf differ diff --git a/preview-fall2024-info/doc/install.ps b/preview-fall2024-info/doc/install.ps new file mode 100644 index 000000000..928ba9e40 Binary files /dev/null and b/preview-fall2024-info/doc/install.ps differ diff --git a/preview-fall2024-info/doc/install.txt b/preview-fall2024-info/doc/install.txt new file mode 100644 index 000000000..e79d9b419 --- /dev/null +++ b/preview-fall2024-info/doc/install.txt @@ -0,0 +1,858 @@ + + + + + + + + + + + + CONDOR INSTALLATION GUIDE + + + Michael Litzkow + + Computer Sciences Department + University of Wisconsin - Madison + mike@cs.wisc.edu + + + + + +1. GENERAL + + This document explains how to create and install + condor from the source. To do this, you will need the + capability of becoming super user on all of the machines + where you want condor to run. We also assume that you + are familiar with local procedures for creating UNIXr + accounts, and allocating user and group id numbers. In + addition, if you wish to use NFSr to share some of the + condor executables and libraries between machines, you + will need to be familiar with exporting and importing NFS + file systems. + +2. REQUIRED TOOLS + + Building this version of condor requires use of + imake and makedepend which are part of the X11 distribu- + tion. Imake is a program for creating "Makefiles" which + are customized for a particular platform and local + environment. This is accomplished by combining informa- + tion in a generic description of a "Makefile", called an + "Imakefile", along with configuration files which have + been customized for your particular platform and your + local site. Makedepend is a program for adding dependen- + cies to "Makefiles" for C programs which use various + include files. These tools are available from MIT as + part of the X11 distribution. In case you do not have + them, they are included in this distribution in the + "CONDOR/imake_tools" directory. Instructions for build- + ing them are given in a separate document, "Building +____________________ + r + rUNIX is a trademark of AT&T. + NFS is a trademark of Sun Microsystems. + + + + +Version 4.1b 5/26/92 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + Imake, Cpp, and Makedepend". + +3. OPERATIONS IN A HETEROGENEOUS ENVIRONMENT + + Most local area networks contain a variety of + machine and operating system types. We refer to a par- + ticular combination of machine architecture and UNIX + variant as a "platform". To maximize the sharing of + resources, condor attempts to provide some interoperabil- + ity between the various platforms. This does add some + complication to the condor code, and will require some + consideration by the person creating and installing the + condor executables. + + Condor is designed so that all of the various + machines may reside in a single "resource" pool. All of + the condor daemons communicate through XDRr library rou- + tines, and are thus compatible even between machines + which use different byte ordering. Users of a machine of + any particular architecture and operating system type + will be able to submit jobs to be run on other architec- + ture and operating system combinations. (Of course those + jobs will need to be compiled and linked for the + appropriate target architecture/operating system combina- + tion.)[1] + + ___________________________________________________________________________ + | SSSSuuuuppppppppoooorrrrtttteeeedddd AAAArrrrcccchhhhiiiitttteeeeccccttttuuuurrrreeee aaaannnndddd OOOOppppeeeerrrraaaattttiiiinnnngggg SSSSyyyysssstttteeeemmmm CCCCoooommmmbbbbiiiinnnnaaaattttiiiioooonnnnssss | + |_A_r_c_h_i_t_e_c_t_u_r_e____O_p_e_r_a_t_i_n_g__S_y_s_t_e_m____D_e_s_c_r_i_p_t_i_o_n______________________________| + |R6000r AIX31r IBM R6000 running AIX 3.1|- | + |MIPSr ULTRIXr DECstation 3100 running Ultrix 3.0+ | + |MIPS ULTRIX40 DECstation 3100 running Ultrix 4.0+ | + |SPARCr SUNOS41r Sun 3 running SunOs4.1 | + |MC68020r SUNOS41 Sun 4 running SunOs4.1 | + |I386r DYNIXr Sequent symmetry running Dynix | + |VAXr ULTRIX Vax running Ultrix 3.0+ | + |MC68020 BSD43r HP 9000 running BSD4.3+ | + |SGI IRIX332r Silicon Graphics 4D running IRIX 3.3.1+ | + |__________________________________________________________________________| + +____________________ + r + XDR is a trademark of Sun Microsystems. + [1]Currently it is not possible to submit jobs compiled +for R6000 machines from other platforms, nor is it possible +to submit jobs compiled for other platforms from R6000 +machines. + |- Checkpointing is not currently supported on R6000/AIX +machrines. + R6000 and AIX are trademarks of International Business +Machines Corporation. + + + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 2222 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + +4. PLANNING + + 4.1. Sharing Source + + If you use NFS, you can maintain the condor + source on a single machine. It is then possible to + build object trees for the various platforms either on + the source machine or on the various machines for + which those objects are being built. If you do not + use NFS, you will need to distribute the source tree + to the appropriate machines using "rcp" or "rdist". + __________________________________________________ + |fig_1.idraw | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + |__________________________________________________| + + Figure 1 illustrates sharing of the condor + source, configuration, and document directories + between three different platforms. In this case the +____________________ + rVAX and ULTRIX are trademarks of Digital Equipment +Corporation. + rMIPS is a trademark of Mips Computer Systems. + rSPARC is a trademark of Sparc International. + rSunOS is a trademark of Sun Microsystems. + rHP9000 is a trademark of Hewlett Packard. + rMC68020 is a trademark of Motorola Semiconductor +Corporation. + rSymmetry and Dynix are a trademarks of Sequent Computer +Systems. + rBSD 4.3 is a trademark of the Regents of the University +of California. + rIRIX is a trademark of Silicon Graphics Incorporated. + + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 3333 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + MIPS_ULTRIX and R6000_AIX31 object directories and all + the non-object directories reside physically on the + R6000. The SPARC_SUNOS41 object directory resides + physically on the SUN 4. Both the SUN 4 and the DECs- + tation mount "~condor/CONDOR" from the R6000. Note + that "SPARC_SUNOS41" is a symbolic link to + "~condor/SPARC_OBJ". From the point of view of the + R6000, this is a dangling symbolic link, but from the + point of view of the SUN 4 it is valid and points into + the local disk. + + 4.2. Sharing Objects + + Once you have compiled executables suitable for + the machines you wish to include in your condor pool, + you will need to make those executables available on + the member machines. Following these instructions for + building "condor" for a particular platform type will + result in the creation of a platform specific object + directory under the "CONDOR" directory. Examples of + such platform specific directories are the + "MIPS_ULTRIX40" and "R6000_AIX31" shown in figure 2. + Each platform specific object directory will contain + several subdirectories for building the various condor + programs and libraries. Also each platform specific + object directory will contain a special subdirectory + called "release_dir". Copies of all the completely + linked programs and archived libraries (but no ".o" + files) will be placed there. In the following discus- + sion the pathname of this platform specific + "release_dir" will be referred to as . + Another very important pathname is the one which you + will set up for your users to access the condor + software. In the example shown in figure 2 this is + "/usr/uw/condor". For the following discussion, this + "well known" directory will be referred to as + . Note that will be a dif- + ferent pathname for every platform type, while + should be same pathname on every plat- + form. When you install condor you will need to make + the software in available to your users + under the name . There are a number of + possible ways to do this. To include the machine + where you did the compilation in the pool, you might + want to make a symbolic link to + . For other machines in the pool, you + might choose to use the compilation machine as a + fileserver; in this case you can use NFS to mount the + appropriate as on those + machines. If the machines to be in the condor pool + already have some of their executables remotely + mounted from fileservers, you might want to copy + onto the fileservers using "rcp" or + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 4444 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + "rdist". In any case it is strongly suggested that + you make the condor libraries and executables avail- + able to your users via the same path name on all + machines. + __________________________________________________ |fig_2.idraw | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + | | + |__________________________________________________| + + + Figure 2 illustrates the user's access to the + condor software on three different platforms. On the + R6000, "/usr/uw/condor" is a symbolic link to + "~condor/CONDOR/R6000_AIX31/release_dir". On the + DECstation "/usr/uw/condor" is remotely mounted from + the R6000 which is acting as a fileserver. On the + DECstation "/usr/uw/condor" is remotely mounted from a + dedicated fileserver. + + 4.3. Administrative Details + + Each machine which is a member of the condor pool + will need to have a "condor" account. If you use NFS, + it will be necessary for all of the "condor" accounts + to share common user and group id's. Condor must have + its own group id. Group id's such as "daemon" or + "bin" are not suitable for use by condor. Each "con- + dor" account will need a home directory containing 3 + subdirectories, "log", "spool", and "execute". A + script is provided which will create these directories + with the proper ownership and mode. If you choose to + have these directories remotely mounted, be sure each + condor machine has its own private version of these + directories. Each "condor" account will have two + files in the home directory called "condor_config" and + "condor_config.local". It is intended that + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 5555 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + "Condor_config" be shared via NFS, but + "condor_config.local" should be private. In this way + you can use "condor_config" to globally configure your + condor pool, and use "condor_config.local" to make + changes for individual machines. Finally, each member + machine will need to start the condor daemons at boot + time. This will be done by placing an entry in + "/etc/rc" or "/etc/rc.local" which starts the + "condor_master". The master will determine which dae- + mons should be run on its machine, and will monitor + their health, restarting them and mailing system per- + sonnel about problems as necessary. + + In addition to the member machines, you will need + to designate one machine to act as the "central + manager". The central manager will run two extra dae- + mons which communicate with and coordinate the daemons + on all member machines. These daemons will also be + started and monitored by the master. + +5. CREATING AND DISTRIBUTING EXECUTABLES + + (1) Create a user account for "condor", on the machine + where you want to maintain the condor source + files, and set up a condor group there as well. + Change directory to the condor home directory and + run as "condor", e.g. "su condor". + + (2) Extract the "~condor/CONDOR" directory from the + distribution file, e.g. "uncompress + Condor.tar.Z", then "tar xf Condor.tar". + + (3) If you wish to make executables for a platform + type other than the machine where you have + extracted the tape, you will need to either copy + the files to the "compilation" machine, or prefer- + ably remotely mount them via NFS. In any case, + all the condor files should have owner "condor", + and group "condor". You should always be running + as "condor" when you make condor executables. + + (4) Determine which versions of imake, cpp, and mak- + edepend you will use. If you want to use the ver- + sions supplied with this distribution, you should + build and install them now. Regardless where you + obtain your imake, you will want to invoke it with + a standard set of parameters and with a particular + environment so that it behaves as assumed in these + instructions. We recommend that you set up an + alias for the appropriate invocation on your com- + pilation machine. Ideally this would be done in + the condor login shell script. Some versions of + imake automatically invoke make on the generated + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 6666 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + "Makefile", while others do not. These instruc- + tions assume that make will be invoked separately, + and your alias can include the "-s Makefile" to + achieve this behavior if it is is not the default + for your version of imake. Also, you will need to + tell imake where your condor configuration files + are by using the "-I" flag. For example on a + machine where the default imake behavior is to + execute make, your alias might look like + + alias imake 'imake -s Makefile + -I/u/condor/CONDOR/config' + + Since imake expects very specific behavior from + the cpp program it invokes, it may be necessary to + tell imake to use a special version. You can do + this by placing the pathname in your environment + variable "IMAKECPP". Again, this should be done + in the condor login script, and the command might + look like + + setenv IMAKECPP + /usr/lpp/X11/Xamples/util/cpp/cpp + + + (5) "Cd" to the "CONDOR/config" directory, and edit + the "site.def" file. + + Top + Pathname of directory where you will keep con- + dor sources and objects. Use of the "$(TILDE)" + macro is encouraged here so that you can build + on several platforms without requiring ~condor + to be the same pathname on all of them. + + $(TILDE)/CONDOR + + should be appropriate in most cases. + + InstallDir + Pathname where you will ultimately install con- + dor executables and libraries. This is the + pathname referred to as in these + instructions. + + TmpDir + Pathname of directory where you wish temporary + files to be placed during the condor building + and testing process. + + CFlags + Global flags to the C compiler which you will + want used on all platforms, e.g. "-g". + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 7777 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + CentralMgrHost + Hostname of machine where you will run condor's + central manager processes. + + CondorAdministrator + Mailing address of person who will be responsi- + ble for managing and maintaining condor at your + site. Condor will send mail about problems to + this address. + + YpLib + If you run "Yellow Pages", and your standard C + library does not already contain the yp func- + tions, set this to the pathname of the associ- + ated library, otherwise leave it blank. On + most systems the C library will contain these + functions, and you can confirm that by running + something like "nm -t /lib/libc.a | egrep + yp_bind". If the file "yp_bind.o" is found, + then your C library already has the needed + functions. + + (6) "Cd" to the "CONDOR/config" directory, and edit + the appropriate platform configuration file, e.g. + if you want to build condor for a Sun 4, edit + "SPARC_SUNOS41.cf". Only a few items will need to + be changed. + + Tilde + Pathname of ~condor on the compilation machine. + This may be different from ~condor on the + machine where you store the condor source + directories, see figure 1. + + SimpleLibC + Pathname of the C library. + + XLibPath + Pathname of the X library. If you don't run + the X window system on machines of this plat- + form type, don't worry about the X library + pathname, but set "HAS_X" to "NO". + + XExtLibPath + Pathname of the X extension library. If you + don't have an X extension library on machines + of this platform type, don't worry about this + pathname, but set "HAS_X_EXTENSIONS" to "NO". + + MkDepend + Pathname of the makedepend program you intend + to use. This could be a previously existing + X11 version, or the shell script provided in + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 8888 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + this distribution in the "GENERIC" directory. + + TypicalMemory + The amount of physical memory which will be + typical on platforms of this type at your ins- + tallation. Information about individual + machines which don't fit the norm can be cus- + tomized later. + + (7) If you are setting up Condor on an R6000 system, + you should set up alternate entry points for both + the C and FORTRAN compilers to aid in the correct + linking of condor programs. Note: building the + condor test software depends on these entry + points. + + C compiler + Edit the "/etc/xlc.cfg" file and create two new + configuration stanzas, one called "condorcc" + and another called "ckptcc". Both stanzas + should be copied from "bsdcc" with the follow- + ing changes. For "condorcc" change + + /lib/libc.a + + to + + /lib/libcondor.a + + For "ckptcc" change + + /lib/libc.a + + to + + /lib/libckpt.a + + For both "ckptcc" and "condorcc" change + + /lib/crt0.o + + to + + /lib/condor_rt0.o + + and add + + -bI:/lib/syscall.exp + + to the "libraries" line. Finally, create links + from "/bin/bsdcc" to "/bin/condorcc" and + "/bin/ckptcc". + + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 9999 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + FORTRAN compiler + Edit the "/etc/xlf.cfg" file and create two new + configuration stanzas, one called "condorxlf" + and another called "ckptxlf". Both stanzas + should be copied from "xlf" with the following + changes. For "condorxlf" change + + /lib/libc.a + + to + + /lib/libcondor.a + + For "ckptxlf" change + + /lib/libc.a + + to + + /lib/libckpt.a + + For both "ckptxlf" and "condorxlf" change + + /lib/crt0.o + + to + + /lib/condor_rt0.o + + and add + + -bI:/lib/syscall.exp + + to the "libraries" line. Finally, create links + from "usr/bin/xlf" to "usr/bin/condorxlf" and + "usr/bin/ckptxlf". + + The "condorcc" and "condorxlf" entries will link + programs for execution by condor. The "ckptcc" + and "ckptxlf" entries will link programs for local + execution with checkpointing. Example "xlc.cfg" + and "xlf.cfg" files are included in the "config" + directory. + + (8) Create an object tree for the specific platform + type for which you are building executables. If + you have the condor source remotely mounted, and + want to use the trick shown in figure 1 to build + your executables on the physical disk of the com- + pilation machine, set up the directory on the com- + pilation machine and the symbolic link now. For + the example shown, the commands executed on the + Sun 4 were + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 11110000 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + mkdir ~condor/SPARC_OBJ + ln -s ~condor/SPARC_OBJ ~condor/CONDOR/SPARC_SUNOS41 + + To build the object tree, "cd" to the + "~condor/CONDOR" directory and first run "imake" + then "make". + + (9) If you are building executables for an R6000 sys- + tem, you will need to make accessi- + ble as on the compilation machine at + this time. This is because some of the condor + programs will be built using a shared library. + The AIX load routine will need to be able to find + that library at execution time and will search for + it at a pathname compiled into the affected pro- + gram. It is therefore desirable that this path- + name is relative to rather than + so that if you later copy execut- + ables to a machine other than the compilation + machine, that library can still be found. The + "Imakefiles" supplied with this distribution are + set up to do it that way. + + (10) "Cd" to the newly created object directory for the + platform type of interest, and compile all of con- + dor by running "make release". + + (11) Make condor available to all of the machines you + wish to have in your pool as appropriate for your + site. This may mean creating a symbolic link, + distributing to a fileserver, or granting permis- + sion to other machines to mount the condor + software via NFS. See figure 2. + + (12) You will also need to install the condor man + pages. These will be found in + "CONDOR/doc/man/{manl,catl}". The exact commands + will vary somewhat depending on the situation at + your site. If you mount your man pages on a + shared fileserver, they may look something like + this: + + rcp manl/* :/usr/man/manl + rcp catl/* :/usr/man/catl + + + (13) To make and install executables for other plat- + forms, go back to step 3. + +6. STARTING CONDOR AND ADDING MEMBERS TO THE POOL + + Complete the following steps on each machine you want to + add to the condor resource pool. Add the machine which + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 11111111 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + will act as the "central manager" first. N.B. all of the + steps in setting up a member of the condor pool will + require you to operate as the super user. + + (1) Create an account for "condor" on the member + machine. Be sure to use the same user and group + id's on all member machines. + + (2) If you are planning to access the condor execut- + ables on this machine via a remotely mounted file + system, make sure that file system is currently + mounted, and that there is an appropriate entry in + "/etc/fstab" so that it will get mounted whenever + the machine is booted. + + For example on a Sun 4 running SunOs 4.1 the fstab entry might look something + like: + + :/SPARC_SUNOS41/release + nfs ro 0 0 + + + + (3) Run the script "condor_init". This will link + "condor_config" to a site specific version of that + file, and create the "log", "spool", and "execute" + directories with correct ownership and permis- + sions. + + (4) Run the script "condor_on". This will create and + edit "condor_config.local" setting "START_DAEMONS" + to "True" so that the condor daemons are able to + run, then it will actually start them. + + (5) At this point the member machine should be fully + operational. On all machines you should find the + "condor_master", "condor_startd", and + "condor_schedd" running. Machines which run the X + window system, should also be running the + "condor_kbdd". Additionally the "central manager" + machine should be running the "condor_collector" + and "condor_negotiator". You can check to see + that the proper daemons are running with + + ps -ax | egrep condor + + You should also run "condor_status" to see that + the new machine shows up in the resource pool. If + you wish to run some trivial jobs to check opera- + tion of all the condor software, example user pro- + grams and "job description" files have been com- + piled and are provided in the + + +IIIINNNNSSSSTTTTAAAALLLLLLLLAAAATTTTIIIIOOOONNNN GGGGUUUUIIIIDDDDEEEE 11112222 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 5555////22226666////99992222 + + + "condor_test_suite_C" and "condor_test_suite_F" + directories. + + (6) Add lines to "/etc/rc" or "/etc/rc.local" which + will start "condor_master" at boot time. + + The entry will look something like this: + + if [ -f /usr/uw/condor/bin/condor_master ]; then + /usr/uw/condor/bin/condor_master; echo -n ' condor' >/dev/console + fi + + + Note: do not attempt to run this command now, con- + dor is already running. + +7. Copyright Information + + Copyright 1986, 1987, 1988, 1989, 1990, 1991 by the Con- + dor Design Team + + Permission to use, copy, modify, and distribute this + software and its documentation for any purpose and + without fee is hereby granted, provided that the above + copyright notice appear in all copies and that both that + copyright notice and this permission notice appear in + supporting documentation, and that the name of the + University of Wisconsin not be used in advertising or + publicity pertaining to distribution of the software + without specific, written prior permission. The Univer- + sity of Wisconsin and the Condor Design team make no + representations about the suitability of this software + for any purpose. It is provided "as is" without express + or implied warranty. + + THE UNIVERSITY OF WISCONSIN AND THE CONDOR DESIGN TEAM + DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS. IN NO EVENT SHALL THE UNIVERSITY OF WISCONSIN OR + THE CONDOR DESIGN TEAM BE LIABLE FOR ANY SPECIAL, + INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSO- + EVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER + IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THIS SOFTWARE. + + Authors: Allan Bricker, Michael J. Litzkow, and others. + University of Wisconsin, Computer Sciences + Dept. + + + + + + +INSTALLATION GUIDE 13 + + + diff --git a/preview-fall2024-info/doc/javagenes.pdf b/preview-fall2024-info/doc/javagenes.pdf new file mode 100644 index 000000000..96c70e5e0 Binary files /dev/null and b/preview-fall2024-info/doc/javagenes.pdf differ diff --git a/preview-fall2024-info/doc/jbasney-dissert-condensed.pdf b/preview-fall2024-info/doc/jbasney-dissert-condensed.pdf new file mode 100644 index 000000000..897ec902f Binary files /dev/null and b/preview-fall2024-info/doc/jbasney-dissert-condensed.pdf differ diff --git a/preview-fall2024-info/doc/jbasney-dissert-condensed.ps b/preview-fall2024-info/doc/jbasney-dissert-condensed.ps new file mode 100644 index 000000000..ee461b5be Binary files /dev/null and b/preview-fall2024-info/doc/jbasney-dissert-condensed.ps differ diff --git a/preview-fall2024-info/doc/jocs20-principles.pdf b/preview-fall2024-info/doc/jocs20-principles.pdf new file mode 100644 index 000000000..672cb1eda Binary files /dev/null and b/preview-fall2024-info/doc/jocs20-principles.pdf differ diff --git a/preview-fall2024-info/doc/jphys10-scalability.pdf b/preview-fall2024-info/doc/jphys10-scalability.pdf new file mode 100644 index 000000000..45c5a371c Binary files /dev/null and b/preview-fall2024-info/doc/jphys10-scalability.pdf differ diff --git a/preview-fall2024-info/doc/jphys11-early-exerience.pdf b/preview-fall2024-info/doc/jphys11-early-exerience.pdf new file mode 100644 index 000000000..7b9754fa8 Binary files /dev/null and b/preview-fall2024-info/doc/jphys11-early-exerience.pdf differ diff --git a/preview-fall2024-info/doc/jphys15-commissioning.pdf b/preview-fall2024-info/doc/jphys15-commissioning.pdf new file mode 100644 index 000000000..d70ebfe6e Binary files /dev/null and b/preview-fall2024-info/doc/jphys15-commissioning.pdf differ diff --git a/preview-fall2024-info/doc/jphys15-how-much-higher.pdf b/preview-fall2024-info/doc/jphys15-how-much-higher.pdf new file mode 100644 index 000000000..f6bfe2de6 Binary files /dev/null and b/preview-fall2024-info/doc/jphys15-how-much-higher.pdf differ diff --git a/preview-fall2024-info/doc/kangaroo-hpdc10.pdf b/preview-fall2024-info/doc/kangaroo-hpdc10.pdf new file mode 100644 index 000000000..fb7478ad5 Binary files /dev/null and b/preview-fall2024-info/doc/kangaroo-hpdc10.pdf differ diff --git a/preview-fall2024-info/doc/kangaroo-hpdc10.ps b/preview-fall2024-info/doc/kangaroo-hpdc10.ps new file mode 100644 index 000000000..913badab1 Binary files /dev/null and b/preview-fall2024-info/doc/kangaroo-hpdc10.ps differ diff --git a/preview-fall2024-info/doc/kosar-dissertation.pdf b/preview-fall2024-info/doc/kosar-dissertation.pdf new file mode 100644 index 000000000..568882c09 Binary files /dev/null and b/preview-fall2024-info/doc/kosar-dissertation.pdf differ diff --git a/preview-fall2024-info/doc/lark-ccgrid2015.pdf b/preview-fall2024-info/doc/lark-ccgrid2015.pdf new file mode 100644 index 000000000..d9268f6f6 Binary files /dev/null and b/preview-fall2024-info/doc/lark-ccgrid2015.pdf differ diff --git a/preview-fall2024-info/doc/livny-dissertation.pdf b/preview-fall2024-info/doc/livny-dissertation.pdf new file mode 100644 index 000000000..0c2e87e10 Binary files /dev/null and b/preview-fall2024-info/doc/livny-dissertation.pdf differ diff --git a/preview-fall2024-info/doc/livny-melman.pdf b/preview-fall2024-info/doc/livny-melman.pdf new file mode 100644 index 000000000..3d177b647 Binary files /dev/null and b/preview-fall2024-info/doc/livny-melman.pdf differ diff --git a/preview-fall2024-info/doc/main.ps b/preview-fall2024-info/doc/main.ps new file mode 100644 index 000000000..fe8b69a9e Binary files /dev/null and b/preview-fall2024-info/doc/main.ps differ diff --git a/preview-fall2024-info/doc/match_analysis.pdf b/preview-fall2024-info/doc/match_analysis.pdf new file mode 100644 index 000000000..e30266044 Binary files /dev/null and b/preview-fall2024-info/doc/match_analysis.pdf differ diff --git a/preview-fall2024-info/doc/match_analysis.ps b/preview-fall2024-info/doc/match_analysis.ps new file mode 100644 index 000000000..e69de29bb diff --git a/preview-fall2024-info/doc/murphy-usenix2012.pdf b/preview-fall2024-info/doc/murphy-usenix2012.pdf new file mode 100644 index 000000000..4f8b7ba75 Binary files /dev/null and b/preview-fall2024-info/doc/murphy-usenix2012.pdf differ diff --git a/preview-fall2024-info/doc/mutka-dissertation.pdf b/preview-fall2024-info/doc/mutka-dissertation.pdf new file mode 100644 index 000000000..4fc83f711 Binary files /dev/null and b/preview-fall2024-info/doc/mutka-dissertation.pdf differ diff --git a/preview-fall2024-info/doc/mutka-livny-dcs.pdf b/preview-fall2024-info/doc/mutka-livny-dcs.pdf new file mode 100644 index 000000000..3e6bfbfb0 Binary files /dev/null and b/preview-fall2024-info/doc/mutka-livny-dcs.pdf differ diff --git a/preview-fall2024-info/doc/mw-hpdc9.pdf b/preview-fall2024-info/doc/mw-hpdc9.pdf new file mode 100644 index 000000000..f5589e936 Binary files /dev/null and b/preview-fall2024-info/doc/mw-hpdc9.pdf differ diff --git a/preview-fall2024-info/doc/mw-hpdc9.ps b/preview-fall2024-info/doc/mw-hpdc9.ps new file mode 100644 index 000000000..2076baa4a Binary files /dev/null and b/preview-fall2024-info/doc/mw-hpdc9.ps differ diff --git a/preview-fall2024-info/doc/mw-preprint.pdf b/preview-fall2024-info/doc/mw-preprint.pdf new file mode 100644 index 000000000..e0727e85a Binary files /dev/null and b/preview-fall2024-info/doc/mw-preprint.pdf differ diff --git a/preview-fall2024-info/doc/mw-preprint.ps b/preview-fall2024-info/doc/mw-preprint.ps new file mode 100644 index 000000000..6405ecd7a Binary files /dev/null and b/preview-fall2024-info/doc/mw-preprint.ps differ diff --git a/preview-fall2024-info/doc/ncoleman-dissert.pdf b/preview-fall2024-info/doc/ncoleman-dissert.pdf new file mode 100644 index 000000000..ae1dc159a Binary files /dev/null and b/preview-fall2024-info/doc/ncoleman-dissert.pdf differ diff --git a/preview-fall2024-info/doc/ncoleman-dissert.ps b/preview-fall2024-info/doc/ncoleman-dissert.ps new file mode 100644 index 000000000..10c616eff Binary files /dev/null and b/preview-fall2024-info/doc/ncoleman-dissert.ps differ diff --git a/preview-fall2024-info/doc/ncoleman_tr1481.pdf b/preview-fall2024-info/doc/ncoleman_tr1481.pdf new file mode 100644 index 000000000..87ef8dcac Binary files /dev/null and b/preview-fall2024-info/doc/ncoleman_tr1481.pdf differ diff --git a/preview-fall2024-info/doc/ncoleman_tr1481.ps b/preview-fall2024-info/doc/ncoleman_tr1481.ps new file mode 100644 index 000000000..5aadb6c96 Binary files /dev/null and b/preview-fall2024-info/doc/ncoleman_tr1481.ps differ diff --git a/preview-fall2024-info/doc/nest-chapter.pdf b/preview-fall2024-info/doc/nest-chapter.pdf new file mode 100644 index 000000000..38f9369e1 Binary files /dev/null and b/preview-fall2024-info/doc/nest-chapter.pdf differ diff --git a/preview-fall2024-info/doc/nest-chapter.ps b/preview-fall2024-info/doc/nest-chapter.ps new file mode 100644 index 000000000..86bb3050b Binary files /dev/null and b/preview-fall2024-info/doc/nest-chapter.ps differ diff --git a/preview-fall2024-info/doc/nest-hpdc-02.pdf b/preview-fall2024-info/doc/nest-hpdc-02.pdf new file mode 100644 index 000000000..d2684b083 Binary files /dev/null and b/preview-fall2024-info/doc/nest-hpdc-02.pdf differ diff --git a/preview-fall2024-info/doc/nest-hpdc-02.ps b/preview-fall2024-info/doc/nest-hpdc-02.ps new file mode 100644 index 000000000..9aee8a4ee Binary files /dev/null and b/preview-fall2024-info/doc/nest-hpdc-02.ps differ diff --git a/preview-fall2024-info/doc/netman-hpdc9.pdf b/preview-fall2024-info/doc/netman-hpdc9.pdf new file mode 100644 index 000000000..419afb668 Binary files /dev/null and b/preview-fall2024-info/doc/netman-hpdc9.pdf differ diff --git a/preview-fall2024-info/doc/netman-hpdc9.ps b/preview-fall2024-info/doc/netman-hpdc9.ps new file mode 100644 index 000000000..49f11b544 Binary files /dev/null and b/preview-fall2024-info/doc/netman-hpdc9.ps differ diff --git a/preview-fall2024-info/doc/nmi-lisa2006-slides.pdf b/preview-fall2024-info/doc/nmi-lisa2006-slides.pdf new file mode 100644 index 000000000..cfa4dbf65 Binary files /dev/null and b/preview-fall2024-info/doc/nmi-lisa2006-slides.pdf differ diff --git a/preview-fall2024-info/doc/nmi-lisa2006.pdf b/preview-fall2024-info/doc/nmi-lisa2006.pdf new file mode 100644 index 000000000..ed839c2e7 Binary files /dev/null and b/preview-fall2024-info/doc/nmi-lisa2006.pdf differ diff --git a/preview-fall2024-info/doc/nmi-lisa2006.ps b/preview-fall2024-info/doc/nmi-lisa2006.ps new file mode 100644 index 000000000..a2bc29b14 Binary files /dev/null and b/preview-fall2024-info/doc/nmi-lisa2006.ps differ diff --git a/preview-fall2024-info/doc/parrot-agm2003.pdf b/preview-fall2024-info/doc/parrot-agm2003.pdf new file mode 100644 index 000000000..bde21af8d Binary files /dev/null and b/preview-fall2024-info/doc/parrot-agm2003.pdf differ diff --git a/preview-fall2024-info/doc/parrot-agm2003.ps b/preview-fall2024-info/doc/parrot-agm2003.ps new file mode 100644 index 000000000..baa35c5ce Binary files /dev/null and b/preview-fall2024-info/doc/parrot-agm2003.ps differ diff --git a/preview-fall2024-info/doc/parrot-jpdcp-preprint.pdf b/preview-fall2024-info/doc/parrot-jpdcp-preprint.pdf new file mode 100644 index 000000000..e2fea510f Binary files /dev/null and b/preview-fall2024-info/doc/parrot-jpdcp-preprint.pdf differ diff --git a/preview-fall2024-info/doc/parrot-jpdcp-preprint.ps b/preview-fall2024-info/doc/parrot-jpdcp-preprint.ps new file mode 100644 index 000000000..145d4ca7a Binary files /dev/null and b/preview-fall2024-info/doc/parrot-jpdcp-preprint.ps differ diff --git a/preview-fall2024-info/doc/parrot-jsc-preprint.pdf b/preview-fall2024-info/doc/parrot-jsc-preprint.pdf new file mode 100644 index 000000000..adef7e145 Binary files /dev/null and b/preview-fall2024-info/doc/parrot-jsc-preprint.pdf differ diff --git a/preview-fall2024-info/doc/parrot-jsc-preprint.ps b/preview-fall2024-info/doc/parrot-jsc-preprint.ps new file mode 100644 index 000000000..f7968e064 Binary files /dev/null and b/preview-fall2024-info/doc/parrot-jsc-preprint.ps differ diff --git a/preview-fall2024-info/doc/parrot-scpe.pdf b/preview-fall2024-info/doc/parrot-scpe.pdf new file mode 100644 index 000000000..d140a693e Binary files /dev/null and b/preview-fall2024-info/doc/parrot-scpe.pdf differ diff --git a/preview-fall2024-info/doc/pfs-tr.pdf b/preview-fall2024-info/doc/pfs-tr.pdf new file mode 100644 index 000000000..c32952e45 Binary files /dev/null and b/preview-fall2024-info/doc/pfs-tr.pdf differ diff --git a/preview-fall2024-info/doc/pfs-tr.ps b/preview-fall2024-info/doc/pfs-tr.ps new file mode 100644 index 000000000..f2840380d Binary files /dev/null and b/preview-fall2024-info/doc/pfs-tr.ps differ diff --git a/preview-fall2024-info/doc/phoenix-grid2004.pdf b/preview-fall2024-info/doc/phoenix-grid2004.pdf new file mode 100644 index 000000000..23df4755b Binary files /dev/null and b/preview-fall2024-info/doc/phoenix-grid2004.pdf differ diff --git a/preview-fall2024-info/doc/phoenix-grid2004.ps b/preview-fall2024-info/doc/phoenix-grid2004.ps new file mode 100644 index 000000000..8a9760ce2 Binary files /dev/null and b/preview-fall2024-info/doc/phoenix-grid2004.ps differ diff --git a/preview-fall2024-info/doc/pm5_2_0201.pdf b/preview-fall2024-info/doc/pm5_2_0201.pdf new file mode 100644 index 000000000..454c3e266 Binary files /dev/null and b/preview-fall2024-info/doc/pm5_2_0201.pdf differ diff --git a/preview-fall2024-info/doc/policy_2004.pdf b/preview-fall2024-info/doc/policy_2004.pdf new file mode 100644 index 000000000..3bff15d4a Binary files /dev/null and b/preview-fall2024-info/doc/policy_2004.pdf differ diff --git a/preview-fall2024-info/doc/profiling-europar2004.pdf b/preview-fall2024-info/doc/profiling-europar2004.pdf new file mode 100644 index 000000000..6819cef7b Binary files /dev/null and b/preview-fall2024-info/doc/profiling-europar2004.pdf differ diff --git a/preview-fall2024-info/doc/profiling-tr.pdf b/preview-fall2024-info/doc/profiling-tr.pdf new file mode 100644 index 000000000..e73e868b2 Binary files /dev/null and b/preview-fall2024-info/doc/profiling-tr.pdf differ diff --git a/preview-fall2024-info/doc/profiling-tr.ps b/preview-fall2024-info/doc/profiling-tr.ps new file mode 100644 index 000000000..c9235ca62 Binary files /dev/null and b/preview-fall2024-info/doc/profiling-tr.ps differ diff --git a/preview-fall2024-info/doc/profiling.pdf b/preview-fall2024-info/doc/profiling.pdf new file mode 100644 index 000000000..2ae79553a Binary files /dev/null and b/preview-fall2024-info/doc/profiling.pdf differ diff --git a/preview-fall2024-info/doc/profiling.ps b/preview-fall2024-info/doc/profiling.ps new file mode 100644 index 000000000..8ae167ace Binary files /dev/null and b/preview-fall2024-info/doc/profiling.ps differ diff --git a/preview-fall2024-info/doc/profiling_availability.pdf b/preview-fall2024-info/doc/profiling_availability.pdf new file mode 100644 index 000000000..817ec305e Binary files /dev/null and b/preview-fall2024-info/doc/profiling_availability.pdf differ diff --git a/preview-fall2024-info/doc/pruyne.dissert.pdf b/preview-fall2024-info/doc/pruyne.dissert.pdf new file mode 100644 index 000000000..2cf18a21b Binary files /dev/null and b/preview-fall2024-info/doc/pruyne.dissert.pdf differ diff --git a/preview-fall2024-info/doc/pruyne.dissert.ps b/preview-fall2024-info/doc/pruyne.dissert.ps new file mode 100644 index 000000000..94b7df9a5 Binary files /dev/null and b/preview-fall2024-info/doc/pruyne.dissert.ps differ diff --git a/preview-fall2024-info/doc/rajesh.dissert.pdf b/preview-fall2024-info/doc/rajesh.dissert.pdf new file mode 100644 index 000000000..b2b48c369 Binary files /dev/null and b/preview-fall2024-info/doc/rajesh.dissert.pdf differ diff --git a/preview-fall2024-info/doc/recorte.doc b/preview-fall2024-info/doc/recorte.doc new file mode 100644 index 000000000..14180df94 Binary files /dev/null and b/preview-fall2024-info/doc/recorte.doc differ diff --git a/preview-fall2024-info/doc/recorte.pdf b/preview-fall2024-info/doc/recorte.pdf new file mode 100644 index 000000000..db98d0625 Binary files /dev/null and b/preview-fall2024-info/doc/recorte.pdf differ diff --git a/preview-fall2024-info/doc/recorte.ps b/preview-fall2024-info/doc/recorte.ps new file mode 100644 index 000000000..70cfbc260 Binary files /dev/null and b/preview-fall2024-info/doc/recorte.ps differ diff --git a/preview-fall2024-info/doc/remoteunix.pdf b/preview-fall2024-info/doc/remoteunix.pdf new file mode 100644 index 000000000..d529d5bb9 Binary files /dev/null and b/preview-fall2024-info/doc/remoteunix.pdf differ diff --git a/preview-fall2024-info/doc/remoteunix.ps b/preview-fall2024-info/doc/remoteunix.ps new file mode 100644 index 000000000..4d1d7a572 Binary files /dev/null and b/preview-fall2024-info/doc/remoteunix.ps differ diff --git a/preview-fall2024-info/doc/runtime_adaptation-agridm2003.pdf b/preview-fall2024-info/doc/runtime_adaptation-agridm2003.pdf new file mode 100644 index 000000000..dc3eb2b98 Binary files /dev/null and b/preview-fall2024-info/doc/runtime_adaptation-agridm2003.pdf differ diff --git a/preview-fall2024-info/doc/runtime_adaptation-pdcp2004.pdf b/preview-fall2024-info/doc/runtime_adaptation-pdcp2004.pdf new file mode 100644 index 000000000..dc3eb2b98 Binary files /dev/null and b/preview-fall2024-info/doc/runtime_adaptation-pdcp2004.pdf differ diff --git a/preview-fall2024-info/doc/samgrid-cluster2004.pdf b/preview-fall2024-info/doc/samgrid-cluster2004.pdf new file mode 100644 index 000000000..317f31b83 Binary files /dev/null and b/preview-fall2024-info/doc/samgrid-cluster2004.pdf differ diff --git a/preview-fall2024-info/doc/schedd-migration-mics.pdf b/preview-fall2024-info/doc/schedd-migration-mics.pdf new file mode 100644 index 000000000..4e3c4c205 Binary files /dev/null and b/preview-fall2024-info/doc/schedd-migration-mics.pdf differ diff --git a/preview-fall2024-info/doc/schedd-migration-mics.ps b/preview-fall2024-info/doc/schedd-migration-mics.ps new file mode 100644 index 000000000..c4a0042bb Binary files /dev/null and b/preview-fall2024-info/doc/schedd-migration-mics.ps differ diff --git a/preview-fall2024-info/doc/sol_ckpt.ps b/preview-fall2024-info/doc/sol_ckpt.ps new file mode 100644 index 000000000..faf61cc8c Binary files /dev/null and b/preview-fall2024-info/doc/sol_ckpt.ps differ diff --git a/preview-fall2024-info/doc/sonny-dissertation.pdf b/preview-fall2024-info/doc/sonny-dissertation.pdf new file mode 100644 index 000000000..2cbde2fa8 Binary files /dev/null and b/preview-fall2024-info/doc/sonny-dissertation.pdf differ diff --git a/preview-fall2024-info/doc/stork-icdcs2004.pdf b/preview-fall2024-info/doc/stork-icdcs2004.pdf new file mode 100644 index 000000000..82ff42c0e Binary files /dev/null and b/preview-fall2024-info/doc/stork-icdcs2004.pdf differ diff --git a/preview-fall2024-info/doc/tannenba-claimlength-study.pdf b/preview-fall2024-info/doc/tannenba-claimlength-study.pdf new file mode 100644 index 000000000..756a453ae Binary files /dev/null and b/preview-fall2024-info/doc/tannenba-claimlength-study.pdf differ diff --git a/preview-fall2024-info/doc/tech.ps b/preview-fall2024-info/doc/tech.ps new file mode 100644 index 000000000..f801a7cc6 Binary files /dev/null and b/preview-fall2024-info/doc/tech.ps differ diff --git a/preview-fall2024-info/doc/tech.text b/preview-fall2024-info/doc/tech.text new file mode 100644 index 000000000..873e700e6 --- /dev/null +++ b/preview-fall2024-info/doc/tech.text @@ -0,0 +1,685 @@ + + +CONDOR TECHNICAL SUMMARY + +Allan Bricker Michael Litzkow and Miron Livny + +Computer Sciences Department University of Wisconsin - Madison +allan@chorus.fr, mike@cs.wisc.edu, miron@cs.wisc.edu + +1. Introduction to the Problem + +A common computing environment consists of many workstations connected +together by a high speed local area network. These workstations have +grown in power over the years, and if viewed as an aggregate they can +represent a significant computing resource. However in many cases even +though these workstations are owned by a single organization, they are +dedicated to the exclusive use of indivi duals. + +In examining the usage patterns of the workstations, we find it useful +to identify three ``typical'' types of users. ``Type 1'' users are +individuals who mostly use their workstations for sending and +receiving mail or preparing papers. Theoreticians and administrative +people often fall into this category. We identify many software +development people as ``type 2'' users. These people are fre quently +involved in the debugging cycle where they edit software, compile, +then run it possibly using some kind of debugger. This cycle is +repeated many times during a typical working day. Type 2 users +sometimes have too much computing capacity on their workstations such +as when editing, but then dur ing the compilation and debugging phases +they could often use more CPU power. Finally there are ``type 3'' +users. These are people who frequently do large numbers of +simulations, or combinitoric searches. These people are almost never +happy with just a workstation, because it really isn't powerful enough +to meet their needs. Another point is that most type 1 and type 2 +users leave their machines completely idle when they are not working, +while type 3 users often keep their machines busy 24 hours a day. + +Condor is an attempt to make use of the idle cycles from type 1 and 2 +users to help satisfy the needs of the type 3 users. The condor +software monitors the activity on all the participating worksta tions +in the local network. Those machines which are determined to be idle, +are placed into a resource pool or ``processor bank''. Machines are +then allocated from the bank for the execution of jobs belonging to +the type 3 users. The bank is a dynamic entity; workstations enter the +bank when they become idle, and leave again when they get busy. + +2. Design Features + +(1) No special programming is required to use condor. Condor is able +to run normal UNIX 1 pro grams, only requiring the user to relink, not +recompile them or change any code. + +(2) The local execution environment is preserved for remotely +executing processes. Users do not have to worry about moving data +files to remote workstations before executing programs there. + +************************************ + +1 UNIX is a trademark of AT&T. + +Version 4.1b 10/9/91 + +Version 4.1b 10/9/91 + +(3) The condor software is responsible for locating and allocating +idle workstations. Condor users do not have to search for idle +machines, nor are they restricted to using machines only during a +static portion of the day. + +(4) ``Owners'' of workstations have complete priority over their own +machines. Workstation own ers are generally happy to let somebody else +compute on their machines while they are out, but they want their +machines back promptly upon returning, and they don't want to have to +take special action to regain control. Condor handles this +automatically. + +(5) Users of condor may be assured that their jobs will eventually +complete. If a user submits a job to condor which runs on somebody +else's workstation, but the job is not finished when the workstation +owner returns, the job will be checkpointed and restarted as soon as +possible on another machine. + +(6) Measures have been taken to assure owners of workstations that +their filesystems will not be touched by remotely executing jobs. + +(7) Condor does its work completely outside the kernel, and is +compatible with Berkeley 4.2 and 4.3 UNIX kernels and many of their +derivitives. You do not have to run a custom operating system to get +the benefits of condor. + +3. Limitations + +(1) Only single process jobs are supported, i.e. the fork(2), exec(2), +and similar calls are not imple mented. + +(2) Signals and signal handlers are not supported, i.e. the signal(3), +sigvec(2), and kill(2) calls are not implemented. + +(3) Interprocess communication (IPC) calls are not supported, i.e. the +socket(2), send(2), recv(2), and similar calls are not implemented. + +(4) All file operations must be idempotent -- read-only and write-only +file accesses work correctly, but programs which both read and write +the same file may not. + +(5) Each condor job has an associated ``checkpoint file'' which is +approximately the size of the address space of the process. Disk space +must be available to store the checkpoint file both on the submitting +and executing machines. + +(6) Condor does a significant amount of work to prevent security +hazards, but some loopholes are known to exist. One problem is that +condor user jobs are supposed to do only remote system calls, but this +is impossible to guarantee. User programs are limited to running as an +ordinary user on the executing machine, but a sufficiently malicious +and clever user could still cause problems by doing local system calls +on the executing machine. + +(7) A different security problem exists for owners of condor jobs who +necessarily give remotely running processes access to their own file +system. + +4. Overview of Condor Software + +In some circumstances condor user programs may utilize ``remote system +calls'' to access files on the machine from which they were +submitted. In other situations files on the submitting machine are +accessed more efficiently by use of NFS. In either case the user +program is provided with the illusion that it is operating in the +environment of the submitting machine. Programs written for operation +in the local environment are converted to using remote file access +simply by relinking with a special library. The remote file access +mechanisms are described in Section 5. + +Condor user programs are constructed in such a way that they can be +checkpointed and restarted at will. This assures users that their jobs +will complete, even if they are interrupted during execution by the +return of a hosting workstation's owner. Checkpointing is also +implemented by linking with the special library. The checkpointing +mechanism is described more fully in Section 6. + +Condor includes control software consisting of three daemons which run +on each member of the condor pool, and two other daemons which run on +a single machine called the central manager. This software +automatically locates and releases ``target machines'' and manages the +queue of jobs waiting + +CONDOR TECHNICAL SUMMARY 2 + +Version 4.1b 10/9/91 + +for condor resources. The control software is described in Section 7. + +5. Remote File Access + +Condor programs executing on a remote workstation may access files on +the submitting worksta tion in one of two ways. The preferred +mechanism is direct access to the file via NFS, but this is only +possible if those files appear to be in the filesystem of the +executing machine, i.e. they are either physi cally located on the +executing machine, or are mounted there via NFS. If the desired file +does not appear in the filesystem of the executing workstation, condor +provides called ``remote system calls'' which allows access to most of +the normal system calls available on the submitting machine, including +those that access files. In either case, the remote access is +completely transparent to the user program, i.e. it simply executes +such system calls as open(), close(), read(), and write(). The condor +library pro vides the remote access below the system call level. + +To better understand how the condor remote system calls work, it is +appropriate to quickly review how normal UNIX system calls +work. Figure 1 illustrates the normal UNIX system call mechanism. The +user program is linked with a standard library called the ``C +library''. This is true even for programs written in languages other +than C. The C library contains routines, often referred to as ``system +call stubs'', which cause the actual system calls to happen. What the +stubs really do is push the system call number, and system call +arguments onto the stack, then execute an instruction which causes a +trap to the kernel. When the kernel trap handler is called, it reads +the system call number and arguments, and performs the system call on +behalf of the user program. The trap handler will then place the +system call return value in a well known register or registers, and +return control to the user program. The system call stub then returns +the result to the calling process, completing the system call. + +Figure 2 illustrates how this mechanism has been altered by condor to +implement remote system calls. Whenever condor is executing a user +program remotely, it also runs a ``shadow'' program on the initiating +host. The shadow acts an agent for the remotely executing program in +doing system calls. Condor user programs are linked with a special +version of the C library. The special version contains all of the +functions provided by the normal C library, but the system call stubs +have been changed to accomplish remote system calls. The remote system +call stubs package up the system call number and arguments and send +them to the shadow using the network. The shadow, which is linked with +the nor mal C library, then executes the system call on behalf of the +remotely running job in the normal way. The shadow then packages up +the results of the system call and sends them back to the system call +stub in the special C library on the remote machine. The remote system +call stub then returns its result to the + +User Program + +C Library + +(Trap to Kernel) + +Kernel + +Kernel Services e.g. File System + +Figure 1: Normal UNIX System Calls + +CONDOR TECHNICAL SUMMARY 3 + +Version 4.1b 10/9/91 + +Initiating Machine Executing Machine + +Figure 2: Remote System Calls + +Shadow (UID = User) + +C Library + +Kernel + +Condor User Program (UID = User) + +Special C Library + +Local File System + +System Call Request + +Reply System Call + +calling procedure which is unaware that the call was done remotely +rather than locally. Note that the shadow runs with its UID set to the +owner of the remotely running job so that it has the correct permis +sions into the local file system. + +In many cases, it is more efficient to access files using NFS rather +than via the remote system call mechanism. This is generally the case +when the desired file is not physically located on the submitting +machine, e.g. the file actually resides on a fileserver. In such a +situation data transferred to or from the file would require two trips +over the network, one via NFS to the shadow, and another via remote +sys tem call to the condor user program. The open() system call +provided in the condor version of the C library can detect such +circumstances, and will open files via NFS rather than remote system +calls when this is possible. The condor open() routine does this by +sending the desired pathname to the shadow + +Initiating Machine Executing Machine + +Figure 3: NFS File Access + +Shadow + +Condor User Program + +Special C Library + +/u2/john + +fileserver:/staff/john + +/ + +var usr1 usr2 + +Fileserver + +/ + +faculty staff students + +/ + +var u1 u2 + +open(/usr1/john) + +open(/u2/john) + +CONDOR TECHNICAL SUMMARY 4 + +Version 4.1b 10/9/91 + +program on the submitting machine along with a translation +request. The shadow replies with the name of the host where the file +physically resides along with a pathname for the file which is +appropriate on the host where the file actually resides. The open() +routine then examines the mount table on the exe cuting machine to +determine whether the file is accessible via NFS and what pathname it +is known by. This pathanme translation is repeated whenever the user +job moves from one execution machine to another. Note that condor does +not assume that all files are available from all machines, nor that +every machine will mount filesystems in such a way that the same +pathnames refer to the same physical files. Figure 3 illustrates a +situation where the condor user program opens a file which is known as +``/u2/john'' on the submitting machine, but the same file is known as +``/usr1/jobn'' on the executing machine. + +6. Checkpointing + +To checkpoint a UNIX process, several things must be preserved. The +text, data, stack, and register contents are needed, as well as +information about what files are open, where they are seek'd to, and +what mode they were opened in. The data, and stack are available in a +core file, while the text is available in the original +executable. Condor gathers the information about currently open files +through the special C library. In condor's special C library the +system call stubs for ``open'', ``close'', and ``dup'' not only do +those things remotely, but they also record which files are opened in +what mode, and which file descriptors correspond to which files. + +Condor causes a running job to checkpoint by sending it a signal. When +the program is linked, a special version of ``crt0'' is included which +sets up CKPT() as that signal handler. When CKPT() is called, it +updates the table of open files by seeking each one to the current +location and recording the file position. Next a setjmp(3) is executed +to save key register contents in a global data area, then the process +sends itself a signal which results in a core dump. The condor +software then combines the ori ginal executable file, and the core +file to produce a ``checkpoint'' file, (figure 4). The checkpoint file +is + +data + +stack + +uarea + +text + +initialized data + +symbol and debugging info + +Core + +Executable + +Checkpoint + +text + +initialized data + +stack + +Figure 4: Creating a Checkpiont File + +symbol and debugging info + +CONDOR TECHNICAL SUMMARY 5 + +Version 4.1b 10/9/91 + +itself executable. + +When the checkpoint file is restarted, it starts from the crt0 code +just like any UNIX executable, but again this code is special, and it +will set up the restart() routine as a signal handler with a special +signal stack, then send itself that signal. When restart() is called, +it will operate in the temporary stack area and read the saved stack +in from the checkpoint file, reopen and reposition all files from the +saved file state information, and execute a longjmp(3) back to +CKPT(). When the restart routine returns, it does so with respect to +the restored stack, and CKPT() returns to the routine which was active +at the time of the checkpoint signal, not crt0. To the user code, +checkpointing looks exactly like a signal handler was called, and +restarting from a checkpoint looks like a return from that signal +handler. + +7. Control Software + +Each machine in the condor pool runs two daemons, the schedd and the +startd. In addition, one machine runs two other daemons called the +collector and the negotiator. While the collector and the negotiator +are separate processes, they work closely together, and for purposes +of this discussion can be considered one logical process called the +central manager. The central manager has the job of keeping track of +which machines are idle, and allocating those machines to other +machines which have condor jobs to run. On each machine the schedd +maintains a queue of condor jobs, and negotiates with the central +manager to get permission to run those jobs on remote machines. The +startd determines whether its machine is idle, and also is responsible +for starting and managing foreign jobs which it may be hosting. On +machines running the X window system, an additional daemon the kbdd +will periodi cally inform the startd of the keyboard and mouse ``idle +time''. Periodically the startd will examine its machine, and update +the central manager on its degree of "idleness". Also periodically the +schedd will examine its job queue and update the central manager on +how many jobs it wants to run and how many jobs it is currently +running. Figure 5 illustrates the situation when no condor jobs are +running. + +At some point the central manager may learn that machine b is idle, +and decide that machine c should execute one of its jobs remotely on +machine b. The central manager will then contact the schedd on machine +c and give it ``permission'' to run a job on machine b. The schedd on +machine c will then select a job from its queue and spawn off a shadow +process to run it. The shadow will then contact the startd on machine +b and tell it that it would like to run a job. If the situation on +machine b hasn't changed since the last update to the central manager, +machine b will still be idle, and will respond with an OK. The startd +on machine b then spawns a process called the starter. It's the + +Initiating Machine + +Central Manager Machine + +Central Manager + +Execution Machine + +Startd + +Kbdd + +Startd + +Kbdd + +Schedd Schedd + +Figure 5: Condor Processes With No Jobs Running + +Legend process started by fork/exec communication link + +CONDOR TECHNICAL SUMMARY 6 + +Version 4.1b 10/9/91 + +starter's job to start and manage the remotely running job (figure 6). + +The shadow on machine c will transfer the checkpoint file to the +starter on machine b. The starter then sets a timer and spawns off the +remotely running job from machine c (figure 7). The sha dow on machine +c will handle all system calls for the job. When the starter's timer +expires it will send the user job a checkpoint signal, causing it to +save its file state and stack, then dump core. The starter then builds +a new version of the checkpoint file which is stored temporarily on +machine b. The starter restarts the job from the new checkpoint file, +and the cycle of execute and checkpoint continues. At some point, +either the job will finish, or machine b's user will return. If the +job finishes, the job's owner is notified by mail, and the starter and +shadow clean up. If machine b becomes busy, the startd on machine b +will detect that either by noting recent activity on one of the tty or +pty's, or by the rising load average. When the startd on machine b +detects this activity, it will send a ``suspend'' signal to the +starter, and the starter will temporarily suspend the user job. This +is because frequently the own ers of machines are active for only a +few seconds, then become idle again. This would be the case if the +owner were just checking to see if there were new mail for example. If +machine b remains busy for a period of about 5 minutes, the startd +there will send a ``vacate'' signal to the starter. In this case, the +starter will abort the user job and return the latest checkpoint file +to the shadow on machine c. If the job had not run long enough on +machine b to reach a checkpoint, the job is just aborted, and will be +restarted later from the most recent checkpoint on machine c. Notice +that the starter checkpoints the condor user job periodically rather +than waiting until the remote workstation's owner wants it +back. Checkpointing, and in particular core dumping, is an I/O +intensive activity which we avoid doing when the hosting workstation's +owner is active. + +8. Control Expressions + +The condor control software is driven by a set of powerful ``control +expressions''. These expres sions are read from the file +``~condor/condor_config'' on each machine at run time. It is often con +venient for many machines of the same type to share common control +expressions, and this may be done through a fileserver. To allow +flexibility for control of individual machines, the file +``~condor/condor_config.local'' is provided, and expressions defined +there take precedence over those defined in condor_config. Following +are examples of a few of the more important condor control expressions +with explanations. See condor_config(5) for a detailed description of +all the control expressions. + +Initiating Machine + +Central Manager Machine + +Central Manager + +Shadow + +Execution Machine + +Starter + +Startd + +Kbdd + +Startd + +Kbdd + +Schedd Schedd + +Figure 6: Condor Processes While Starting a Job + +Legend process started by fork/exec communication link + +CONDOR TECHNICAL SUMMARY 7 + +Version 4.1b 10/9/91 + +Initiating Machine + +Central Manager Machine + +Central Manager + +Shadow + +Execution Machine + +Starter + +Startd + +Kbdd + +Startd + +Kbdd + +Schedd Schedd + +Figure 7: Condor Processes With One Job Running + +User Job + +Legend process started by fork/exec communication link + +8.1. Starting Foreign Jobs + +This set of expressions is used by the startd to determine when to +allow a foreign job to begin execution. + +BackgroundLoad = 0.3 StartIdleTime = 15 * $(MINUTE) CPU_Idle = LoadAvg +<= $(BackgroundLoad) START : $(CPU_Idle) && KeyboardIdle > +$(StartIdleTime) + +This example of the START expression specifies that to begin execution +of a foreign job the load average must be less than 0.3, and there +must have been no keyboard activity during the past 15 minutes. + +Other expressions are used to determine when to suspend, resume, and +abort foreign jobs. + +8.2. Prioritizing Jobs + +The schedd must prioritize its own jobs and negotiate with the central +manager to get per mission to run them. It uses a control expression +to assign priorities to its local jobs. + +PRIO : (UserPrio * 10) + $(Expanded) - (QDate / 1000000000.0) + +``UserPrio'' is a number defined by the jobs owner in a similar spirit +to the UNIX ``nice'' com mand. ``Expanded'' will be 1 if the job has +already completed some execution, and 0 otherwise. This is an issue +because expanded jobs require more disk space than unexpanded +ones. ``QDate'' is the UNIX time when the job was submitted. The +constants are chosen so that ``UserPrio'' will be the major criteria, +``Expanded'' will be less important, and ``QDate'' will be the minor +criteria in determining job priority. ``UserPrio'', ``Expanded'', and +``QDate'' are variables known to the schedd which it determines for +each job before applying the PRIO expression. + +8.3. Prioritizing Machines + +The central manager does not keep track of individual jobs on the +member machines. Instead it keeps track of how many jobs a machine +wants to run, and how many it is running at any + +CONDOR TECHNICAL SUMMARY 8 + +Version 4.1b 10/9/91 + +particular time. This keeps the information that must be transmitted +between the schedd and the central manager to a minimum. The central +manager has the job of prioritizing the machines which want to run +jobs, then it can give permission to the schedd on high priority +machines and let them make their own decision about what jobs to run. + +UPDATE_PRIO : Prio + Users - Running + +Periodically the central manager will apply this expression to all of +the machines in the pool. The priority of each machine will be +incremented by the number of individual users on that machine who have +jobs in the queue, and decremented by the number of jobs that machine +is already execut ing remotely. Machines which are running lots of +jobs will tend to have low priorities, and machines which have jobs to +run, but can't run them, will accumulate high priorities. + +9. Acknowledgements + +This project is based on the idea of a ``processor bank'', which was +introduced by Maurice Wilkes in connection with his work on the +Cambridge Ring. 2 + +We would like to thank Don Neuhengen and Tom Virgilio for their +pioneering work on the remote system call implementation; Matt Mutka +and Miron Livny for first convincing us that a general checkpointing +mechanism could be practical and for ideas on how to distribute +control and prioritize the jobs; and David Dewitt and Marvin Solomon +for their continued guidance and support throughout this project. + +This research was supported by the National Science Foundataion under +grants MCS81-05904 and DCR-8512862, by a Digital Equipment Corporation +External Research Grant, and by an Interna tional Business Machines +Department Grant. Porting to the SGI 4D Workstation was funded by +NRL/SFA. + +10. Copyright Information + +Copyright 1986, 1987, 1988, 1989, 1990, 1991 by the Condor Design Team + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of the University of +Wisconsin not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. The University of Wisconsin and the Condor Design team +make no representations about the suitability of this software for any +purpose. It is pro vided "as is" without express or implied warranty. + +THE UNIVERSITY OF WISCONSIN AND THE CONDOR DESIGN TEAM DISCLAIM ALL +WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRAN +TIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE UNIVERSITY +OF WISCONSIN OR THE CONDOR DESIGN TEAM BE LIABLE FOR ANY SPECIAL, +INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +NEGLI GENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +Authors: Allan Bricker, Michael J. Litzkow, and others. University of +Wisconsin, Computer Sciences Dept. + +11. Bibliography + +(1) Mutka, M. and Livny, M. ``Profiling Workstations' Available +Capacity For Remote Execu tion''. Proceedings of Performance-87, The +12th IFIP W.G. 7.3 International Symposium on Computer Performance +Modeling, Measurement and Evaluation. Brussels, Belgium, +************************************ + +2 Wilkes, M. V., Invited Keynote Address, 10th Annual International +Symposium on Computer Architecture, June 1983. + +CONDOR TECHNICAL SUMMARY 9 + +Version 4.1b 10/9/91 + +December 1987. + +(2) Litzkow, M. ``Remote Unix -- Turning Idle Workstations Into Cycle +Servers''. Proceedings of the Summer 1987 Usenix Conference. Phoenix, +Arizona. June 1987 + +(3) Mutka, M. Sharing in a Privately Owned Workstation +Environment. Ph.D. Th., University of Wisconsin, May 1988. + +(4) Litzkow, M., Livny, M. and Mutka, M. ``Condor -- A Hunter of Idle +Workstations''. Proceedings of the 8th International Conference on +Distributed Computing Systems. San Jose, Calif. June 1988 + +(5) Bricker, A. and Litzkow M. ``Condor Installation Guide''. May 1989 + +(6) Bricker, A. and Litzkow, M. Unix manual pages: condor_intro(1), +condor(1), condor_q(1), condor_rm(1), condor_status(1), +condor_summary(1), condor_config(5), condor_control(8), and +condor_master(8). January 1991 + +CONDOR TECHNICAL SUMMARY 10 diff --git a/preview-fall2024-info/doc/tech.txt b/preview-fall2024-info/doc/tech.txt new file mode 100644 index 000000000..8dde99047 --- /dev/null +++ b/preview-fall2024-info/doc/tech.txt @@ -0,0 +1,990 @@ + + + + + + + + + + + + CONDOR TECHNICAL SUMMARY + + + _A_l_l_a_n _B_r_i_c_k_e_r + _M_i_c_h_a_e_l _L_i_t_z_k_o_w + _a_n_d + _M_i_r_o_n _L_i_v_n_y + + Computer Sciences Department + University of Wisconsin - Madison + allan@chorus.fr, mike@cs.wisc.edu, miron@cs.wisc.edu + + + + + + _A_b_s_t_r_a_c_t + + + Condor is a software package for executing long running +"batch" type jobs on workstations which would otherwise be +idle. Major features of Condor are automatic location and +allocation of idle machines, and checkpointing and migration +of processes. All of these features are achieved without +any modifications to the UNIX kernel whatsoever. Also, +users of Condor do not need to change their source programs +to run with Condor, although such programs must be specially +linked. The features of Condor for both users and worksta- +tion owners along with the limitations on the kinds of jobs +which may be executed by Condor are described. The mechan- +isms behind our implementations of checkpointing and process +migration are discussed in detail. Finally, the software +which detects idle machines and allocates those machines to +Condor users is described along with the techniques used to +configure that software to meet the demands of a particular +computing site or workstation owner. + +_1. _I_n_t_r_o_d_u_c_t_i_o_n _t_o _t_h_e _P_r_o_b_l_e_m + + A common computing environment consists of many + workstations connected together by a high speed local + area network. These workstations have grown in power + over the years, and if viewed as an aggregate they can + represent a significant computing resource. However in + many cases even though these workstations are owned by a + single organization, they are dedicated to the exclusive + use of individuals. + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + In examining the usage patterns of the workstations, + we find it useful to identify three "typical" types of + users. "Type 1" users are individuals who mostly use + their workstations for sending and receiving mail or + preparing papers. Theoreticians and administrative peo- + ple often fall into this category. We identify many + software development people as "type 2" users. These + people are frequently involved in the debugging cycle + where they edit software, compile, then run it possibly + using some kind of debugger. This cycle is repeated many + times during a typical working day. Type 2 users some- + times have too much computing capacity on their worksta- + tions such as when editing, but then during the compila- + tion and debugging phases they could often use more CPU + power. Finally there are "type 3" users. These are peo- + ple who frequently do large numbers of simulations, or + combinatoric searches. These people are almost never + happy with just a workstation, because it really isn't + powerful enough to meet their needs. Another point is + that most type 1 and type 2 users leave their machines + completely idle when they are not working, while type 3 + users often keep their machines busy 24 hours a day. + + _C_o_n_d_o_r is an attempt to make use of the idle cycles + from type 1 and 2 users to help satisfy the needs of the + type 3 users. The _c_o_n_d_o_r software monitors the activity + on all the participating workstations in the local net- + work. Those machines which are determined to be idle, + are placed into a resource pool or "processor bank". + Machines are then allocated from the bank for the execu- + tion of jobs belonging to the type 3 users. The bank is + a dynamic entity; workstations enter the bank when they + become idle, and leave again when they get busy. + +_2. _D_e_s_i_g_n _F_e_a_t_u_r_e_s + + (1) No special programming is required to use condor. + Condor is able to run normal UNIX[1] programs, + only requiring the user to relink, not recompile + them or change any code. + + (2) The local execution environment is preserved for + remotely executing processes. Users do not have + to worry about moving data files to remote works- + tations before executing programs there. + + (3) The condor software is responsible for locating + and allocating idle workstations. Condor users do +____________________ + + [1]UNIX is a trademark of AT&T. + + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 2222 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + not have to search for idle machines, nor are they + restricted to using machines only during a static + portion of the day. + + (4) "Owners" of workstations have complete priority + over their own machines. Workstation owners are + generally happy to let somebody else compute on + their machines while they are out, but they want + their machines back promptly upon returning, and + they don't want to have to take special action to + regain control. Condor handles this automati- + cally. + + (5) Users of condor may be assured that their jobs + will eventually complete. If a user submits a job + to condor which runs on somebody else's worksta- + tion, but the job is not finished when the works- + tation owner returns, the job will be checkpointed + and restarted as soon as possible on another + machine. + + (6) Measures have been taken to assure owners of + workstations that their filesystems will not be + touched by remotely executing jobs. + + (7) Condor does its work completely outside the ker- + nel, and is compatible with Berkeley 4.2 and 4.3 + UNIX kernels and many of their derivatives. You + do not have to run a custom operating system to + get the benefits of condor. + +_3. _L_i_m_i_t_a_t_i_o_n_s + + (1) Only single process jobs are supported, i.e. the + fork(2), exec(2), and similar calls are not imple- + mented. + + (2) Signals and signal handlers are not supported, + i.e. the signal(3), sigvec(2), and kill(2) calls + are not implemented. + + (3) Interprocess communication (IPC) calls are not + supported, i.e. the socket(2), send(2), recv(2), + and similar calls are not implemented. + + (4) All file operations must be idempotent - read-only + and write-only file accesses work correctly, but + programs which both read and write the same file + may not. + + (5) Each condor job has an associated "checkpoint + file" which is approximately the size of the + address space of the process. Disk space _m_u_s_t be + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 3333 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + available to store the checkpoint file _b_o_t_h on the + _s_u_b_m_i_t_t_i_n_g and _e_x_e_c_u_t_i_n_g machines. + + (6) Condor does a significant amount of work to + prevent security hazards, but some loopholes are + known to exist. One problem is that condor user + jobs are supposed to do only remote system calls, + but this is impossible to guarantee. User pro- + grams are limited to running as an ordinary user + on the executing machine, but a sufficiently mali- + cious and clever user could still cause problems + by doing local system calls on the executing + machine. + + (7) A different security problem exists for owners of + condor jobs who necessarily give remotely running + processes access to their own file system. + +_4. _O_v_e_r_v_i_e_w _o_f _C_o_n_d_o_r _S_o_f_t_w_a_r_e + + In some circumstances condor user programs may util- + ize "remote system calls" to access files on the machine + from which they were submitted. In other situations + files on the submitting machine are accessed more effi- + ciently by use of NFS. In either case the user program + is provided with the illusion that it is operating in the + environment of the submitting machine. Programs written + for operation in the local environment are converted to + using remote file access simply by relinking with a spe- + cial library. The remote file access mechanisms are + described in Section 5. + + Condor user programs are constructed in such a way + that they can be checkpointed and restarted at will. + This assures users that their jobs will complete, even if + they are interrupted during execution by the return of a + hosting workstation's owner. Checkpointing is also + implemented by linking with the special library. The + checkpointing mechanism is described more fully in Sec- + tion 6. + + Condor includes control software consisting of three + daemons which run on each member of the condor pool, and + two other daemons which run on a single machine called + the _c_e_n_t_r_a_l _m_a_n_a_g_e_r. This software automatically locates + and releases "target machines" and manages the queue of + jobs waiting for condor resources. The control software + is described in Section 7. + +_5. _R_e_m_o_t_e _F_i_l_e _A_c_c_e_s_s + + Condor programs executing on a remote workstation + may access files on the submitting workstation in one of + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 4444 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + two ways. The preferred mechanism is direct access to + the file via NFS, but this is only possible if those + files appear to be in the filesystem of the executing + machine, i.e. they are either physically located on the + executing machine, or are mounted there via NFS. If the + desired file does not appear in the filesystem of the + executing workstation, condor provides called "remote + system calls" which allows access to most of the normal + system calls available on the submitting machine, includ- + ing those that access files. In either case, the remote + access is completely transparent to the user program, + i.e. it simply executes such system calls as open(), + close(), read(), and write(). The condor library pro- + vides the remote access below the system call level. + + To better understand how the condor remote system + calls work, it is appropriate to quickly review how nor- + mal UNIX system calls work. Figure 1 illustrates the + normal UNIX system call mechanism. The user program is + linked with a standard library called the "C library". + This is true even for programs written in languages other + than C. The C library contains routines, often referred + to as "system call stubs", which cause the actual system + calls to happen. What the stubs really do is push the + system call number, and system call arguments onto the + stack, then execute an instruction which causes a trap to + the kernel. When the kernel trap handler is called, it + reads the system call number and arguments, and performs + the system call on behalf of the user program. The trap + handler will then place the system call return value in a + + __________________________ +|fig_1.idraw | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +|__________________________| + + + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 5555 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + well known register or registers, and return control to + the user program. The system call stub then returns the + result to the calling process, completing the system + call. + + Figure 2 illustrates how this mechanism has been + altered by condor to implement remote system calls. + Whenever condor is executing a user program remotely, it + also runs a "shadow" program on the initiating host. The + _s_h_a_d_o_w acts an agent for the remotely executing program + in doing system calls. Condor user programs are linked + with a special version of the C library. The special + version contains all of the functions provided by the + normal C library, but the system call stubs have been + changed to accomplish remote system calls. The remote + system call stubs package up the system call number and + arguments and send them to the _s_h_a_d_o_w using the network. + The _s_h_a_d_o_w, which is linked with the normal C library, + then executes the system call on behalf of the remotely + running job in the normal way. The _s_h_a_d_o_w then packages + up the results of the system call and sends them back to + the system call stub in the special C library on the + remote machine. The remote system call stub then returns + its result to the calling procedure which is unaware that + the call was done remotely rather than locally. Note + that the _s_h_a_d_o_w runs with its UID set to the owner of the + remotely running job so that it has the correct permis- + sions into the local file system. + + In many cases, it is more efficient to access files + using NFS rather than via the remote system call mechan- + ism. This is generally the case when the desired file is + + __________________________________________________ +|fig_2.idraw | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +|__________________________________________________| + + + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 6666 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + + __________________________________________________ +|fig_3.idraw | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +|__________________________________________________| + + not physically located on the submitting machine, e.g. + the file actually resides on a fileserver. In such a + situation data transferred to or from the file would + require two trips over the network, one via NFS to the + shadow, and another via remote system call to the condor + user program. The open() system call provided in the + condor version of the C library can detect such cir- + cumstances, and will open files via NFS rather than + remote system calls when this is possible. The condor + open() routine does this by sending the desired pathname + to the shadow program on the submitting machine along + with a translation request. The shadow replies with the + name of the host where the file physically resides along + with a pathname for the file which is appropriate on the + host where the file actually resides. The open() routine + then examines the mount table on the executing machine to + determine whether the file is accessible via NFS and what + pathname it is known by. This pathanme translation is + repeated whenever the user job moves from one execution + machine to another. Note that condor does not assume + that all files are available from all machines, nor that + every machine will mount filesystems in such a way that + the same pathnames refer to the same physical files. + Figure 3 illustrates a situation where the condor user + program opens a file which is known as "/u2/john" on the + submitting machine, but the same file is known as + "/usr1/john" on the executing machine. + + + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 7777 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + +_6. _C_h_e_c_k_p_o_i_n_t_i_n_g + + To checkpoint a UNIX process, several things must be + preserved. The text, data, stack, and register contents + are needed, as well as information about what files are + open, where they are seek'd to, and what mode they were + opened in. The data, and stack are available in a core + file, while the text is available in the original execut- + able. Condor gathers the information about currently + open files through the special C library. In condor's + special C library the system call stubs for "open", + "close", and "dup" not only do those things remotely, but + they also record which files are opened in what mode, and + which file descriptors correspond to which files. + + Condor causes a running job to checkpoint by sending + it a signal. When the program is linked, a special ver- + sion of "crt0" is included which sets up CKPT() as that + signal handler. When CKPT() is called, it updates the + table of open files by seeking each one to the current + location and recording the file position. Next a + setjmp(3) is executed to save key register contents in a + global data area, then the process sends itself a signal + which results in a core dump. The condor software then + + _______________________________________ +|fig_4.idraw | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +|_______________________________________| + + + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 8888 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + combines the original executable file, and the core file + to produce a "checkpoint" file, (figure 4). The check- + point file is itself executable. + + When the checkpoint file is restarted, it starts + from the crt0 code just like any UNIX executable, but + again this code is special, and it will set up the res- + tart() routine as a signal handler with a special signal + stack, then send itself that signal. When restart() is + called, it will operate in the temporary stack area and + read the saved stack in from the checkpoint file, reopen + and reposition all files from the saved file state infor- + mation, and execute a longjmp(3) back to CKPT(). When + the restart routine returns, it does so with respect to + the restored stack, and CKPT() returns to the routine + which was active at the time of the checkpoint signal, + not crt0. To the user code, checkpointing looks exactly + like a signal handler was called, and restarting from a + checkpoint looks like a return from that signal handler. + +_7. _C_o_n_t_r_o_l _S_o_f_t_w_a_r_e + + Each machine in the condor pool runs two daemons, + the _s_c_h_e_d_d and the _s_t_a_r_t_d. In addition, one machine runs + two other daemons called the _c_o_l_l_e_c_t_o_r and the _n_e_g_o_t_i_a_- + _t_o_r. While the _c_o_l_l_e_c_t_o_r and the _n_e_g_o_t_i_a_t_o_r are separate + processes, they work closely together, and for purposes + of this discussion can be considered one logical process + called the _c_e_n_t_r_a_l _m_a_n_a_g_e_r. The _c_e_n_t_r_a_l _m_a_n_a_g_e_r has the + job of keeping track of which machines are idle, and + allocating those machines to other machines which have + condor jobs to run. On each machine the _s_c_h_e_d_d maintains + a queue of condor jobs, and negotiates with the _c_e_n_t_r_a_l + _m_a_n_a_g_e_r to get permission to run those jobs on remote + machines. The _s_t_a_r_t_d determines whether its machine is + idle, and also is responsible for starting and managing + foreign jobs which it may be hosting. On machines run- + ning the X window system, an additional daemon the _k_b_d_d + will periodically inform the _s_t_a_r_t_d of the keyboard and + mouse "idle time". Periodically the _s_t_a_r_t_d will examine + its machine, and update the _c_e_n_t_r_a_l _m_a_n_a_g_e_r on its degree + of "idleness". Also periodically the _s_c_h_e_d_d will examine + its job queue and update the _c_e_n_t_r_a_l _m_a_n_a_g_e_r on how many + jobs it wants to run and how many jobs it is currently + running. Figure 5 illustrates the situation when no con- + dor jobs are running. + + At some point the _c_e_n_t_r_a_l _m_a_n_a_g_e_r may learn that + _m_a_c_h_i_n_e _b is idle, and decide that _m_a_c_h_i_n_e _c should exe- + cute one of its jobs remotely on _m_a_c_h_i_n_e _b. The _c_e_n_t_r_a_l + _m_a_n_a_g_e_r will then contact the _s_c_h_e_d_d on _m_a_c_h_i_n_e _c and + give it "permission" to run a job on _m_a_c_h_i_n_e _b. The + _s_c_h_e_d_d on _m_a_c_h_i_n_e _c will then select a job from its queue + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 9999 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + + __________________________________________________ +|fig_5.idraw | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +|__________________________________________________| + + and spawn off a _s_h_a_d_o_w process to run it. The _s_h_a_d_o_w + will then contact the _s_t_a_r_t_d on _m_a_c_h_i_n_e _b and tell it + that it would like to run a job. If the situation on + _m_a_c_h_i_n_e _b hasn't changed since the last update to the + _c_e_n_t_r_a_l _m_a_n_a_g_e_r, _m_a_c_h_i_n_e _b will still be idle, and will + respond with an OK. The _s_t_a_r_t_d on _m_a_c_h_i_n_e _b then spawns + a process called the _s_t_a_r_t_e_r. It's the _s_t_a_r_t_e_r'_s job to + start and manage the remotely running job (figure 6). + + __________________________________________________ +|fig_6.idraw | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +|__________________________________________________| + + + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 11110000 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + The _s_h_a_d_o_w on _m_a_c_h_i_n_e _c will transfer the checkpoint + file to the _s_t_a_r_t_e_r on _m_a_c_h_i_n_e _b. The _s_t_a_r_t_e_r then sets + a timer and spawns off the remotely running job from + _m_a_c_h_i_n_e _c (figure 7). The _s_h_a_d_o_w on _m_a_c_h_i_n_e _c will han- + dle all system calls for the job. When the _s_t_a_r_t_e_r'_s + timer expires it will send the user job a checkpoint sig- + nal, causing it to save its file state and stack, then + dump core. The _s_t_a_r_t_e_r then builds a new version of the + checkpoint file which is stored temporarily on _m_a_c_h_i_n_e _b. + The _s_t_a_r_t_e_r restarts the job from the new checkpoint + file, and the cycle of execute and checkpoint continues. + At some point, either the job will finish, or _m_a_c_h_i_n_e _b'_s + user will return. If the job finishes, the job's owner + is notified by mail, and the _s_t_a_r_t_e_r and _s_h_a_d_o_w clean up. + If _m_a_c_h_i_n_e _b becomes busy, the _s_t_a_r_t_d on _m_a_c_h_i_n_e _b will + detect that either by noting recent activity on one of + the tty or pty's, or by the rising load average. When + the _s_t_a_r_t_d on _m_a_c_h_i_n_e _b detects this activity, it will + send a "suspend" signal to the _s_t_a_r_t_e_r, and the _s_t_a_r_t_e_r + will temporarily suspend the user job. This is because + frequently the owners of machines are active for only a + few seconds, then become idle again. This would be the + case if the owner were just checking to see if there were + new mail for example. If _m_a_c_h_i_n_e _b remains busy for a + period of about 5 minutes, the _s_t_a_r_t_d there will send a + "vacate" signal to the _s_t_a_r_t_e_r. In this case, the _s_t_a_r_- + _t_e_r will abort the user job and return the latest check- + point file to the _s_h_a_d_o_w on _m_a_c_h_i_n_e _c. If the job had + not run long enough on _m_a_c_h_i_n_e _b to reach a checkpoint, + the job is just aborted, and will be restarted later from + the most recent checkpoint on _m_a_c_h_i_n_e _c. Notice that the + _s_t_a_r_t_e_r checkpoints the condor user job periodically + rather than waiting until the remote workstation's owner + wants it back. Checkpointing, and in particular core + dumping, is an I/O intensive activity which we avoid + doing when the hosting workstation's owner is active. + +_8. _C_o_n_t_r_o_l _E_x_p_r_e_s_s_i_o_n_s + + The condor control software is driven by a set of + powerful "control expressions". These expressions are + read from the file "~condor/condor_config" on each + machine at run time. It is often convenient for many + machines of the same type to share common control expres- + sions, and this may be done through a fileserver. To + allow flexibility for control of individual machines, the + file "~condor/condor_config.local" is provided, and + expressions defined there take precedence over those + defined in condor_config. Following are examples of a + few of the more important condor control expressions with + explanations. See condor_config(5) for a detailed + description of all the control expressions. + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 11111111 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + + __________________________________________________ +|fig_7.idraw | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +| | +|__________________________________________________| + + _8._1. _S_t_a_r_t_i_n_g _F_o_r_e_i_g_n _J_o_b_s + + This set of expressions is used by the _s_t_a_r_t_d to + determine when to allow a foreign job to begin execu- + tion. + + BackgroundLoad = 0.3 + StartIdleTime = 15 * $(MINUTE) + CPU_Idle = LoadAvg <= $(BackgroundLoad) + START : $(CPU_Idle) && KeyboardIdle > $(StartIdleTime) + + + This example of the START expression specifies that to + begin execution of a foreign job the load average must + be less than 0.3, and there must have been no keyboard + activity during the past 15 minutes. + + Other expressions are used to determine when to + suspend, resume, and abort foreign jobs. + + _8._2. _P_r_i_o_r_i_t_i_z_i_n_g _J_o_b_s + + The _s_c_h_e_d_d must prioritize its own jobs and nego- + tiate with the _c_e_n_t_r_a_l _m_a_n_a_g_e_r to get permission to + run them. It uses a control expression to assign + priorities to its local jobs. + + PRIO : (UserPrio * 10) + $(Expanded) - (QDate / 1000000000.0) + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 11112222 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + "UserPrio" is a number defined by the jobs owner in a + similar spirit to the UNIX "nice" command. "Expanded" + will be 1 if the job has already completed some execu- + tion, and 0 otherwise. This is an issue because + expanded jobs require more disk space than unexpanded + ones. "QDate" is the UNIX time when the job was sub- + mitted. The constants are chosen so that "UserPrio" + will be the major criteria, "Expanded" will be less + important, and "QDate" will be the minor criteria in + determining job priority. "UserPrio", "Expanded", and + "QDate" are variables known to the _s_c_h_e_d_d which it + determines for each job before applying the PRIO + expression. + + _8._3. _P_r_i_o_r_i_t_i_z_i_n_g _M_a_c_h_i_n_e_s + + The _c_e_n_t_r_a_l _m_a_n_a_g_e_r does not keep track of indi- + vidual jobs on the member machines. Instead it keeps + track of how many jobs a machine wants to run, and how + many it is running at any particular time. This keeps + the information that must be transmitted between the + _s_c_h_e_d_d and the _c_e_n_t_r_a_l _m_a_n_a_g_e_r to a minimum. The _c_e_n_- + _t_r_a_l _m_a_n_a_g_e_r has the job of prioritizing the machines + which want to run jobs, then it can give permission to + the _s_c_h_e_d_d on high priority machines and let them make + their own decision about what jobs to run. + + UPDATE_PRIO : Prio + Users - Running + + + Periodically the _c_e_n_t_r_a_l _m_a_n_a_g_e_r will apply this + expression to all of the machines in the pool. The + priority of each machine will be incremented by the + number of individual users on that machine who have + jobs in the queue, and decremented by the number of + jobs that machine is already executing remotely. + Machines which are running lots of jobs will tend to + have low priorities, and machines which have jobs to + run, but can't run them, will accumulate high priori- + ties. + +_9. _A_c_k_n_o_w_l_e_d_g_m_e_n_t_s + + This project is based on the idea of a "processor + bank", which was introduced by Maurice Wilkes in connec- + tion with his work on the Cambridge Ring.[2] + +____________________ + + [2]Wilkes, M. V., Invited Keynote Address, 10th Annual +International Symposium on Computer Architecture, June 1983. + + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 11113333 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + + We would like to thank Don Neuhengen and Tom Virgi- + lio for their pioneering work on the remote system call + implementation; Matt Mutka and Miron Livny for first con- + vincing us that a general checkpointing mechanism could + be practical and for ideas on how to distribute control + and prioritize the jobs; and David Dewitt and Marvin + Solomon for their continued guidance and support + throughout this project. + + This research was supported by the National Science + Foundation under grants MCS81-05904 and DCR-8512862, by a + Digital Equipment Corporation External Research Grant, + and by an International Business Machines Department + Grant. Porting to the SGI 4D Workstation was funded by + NRL/SFA. + +_1_0. _C_o_p_y_r_i_g_h_t _I_n_f_o_r_m_a_t_i_o_n + + Copyright 1986, 1987, 1988, 1989, 1990, 1991 by the Con- + dor Design Team + + Permission to use, copy, modify, and distribute this + software and its documentation for any purpose and + without fee is hereby granted, provided that the above + copyright notice appear in all copies and that both that + copyright notice and this permission notice appear in + supporting documentation, and that the name of the + University of Wisconsin not be used in advertising or + publicity pertaining to distribution of the software + without specific, written prior permission. The Univer- + sity of Wisconsin and the Condor Design team make no + representations about the suitability of this software + for any purpose. It is provided "as is" without express + or implied warranty. + + THE UNIVERSITY OF WISCONSIN AND THE CONDOR DESIGN TEAM + DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS. IN NO EVENT SHALL THE UNIVERSITY OF WISCONSIN OR + THE CONDOR DESIGN TEAM BE LIABLE FOR ANY SPECIAL, + INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSO- + EVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER + IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THIS SOFTWARE. + + Authors: Allan Bricker, Michael J. Litzkow, and others. + University of Wisconsin, Computer Sciences + Dept. + + + + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 11114444 + + + + + + + +VVVVeeeerrrrssssiiiioooonnnn 4444....1111bbbb 1111////22228888////99992222 + + +_1_1. _B_i_b_l_i_o_g_r_a_p_h_y + + (1) Mutka, M. and Livny, M. "Profiling Workstations' + Available Capacity For Remote Execution". + _P_r_o_c_e_e_d_i_n_g_s _o_f _P_e_r_f_o_r_m_a_n_c_e-_8_7, _T_h_e _1_2_t_h _I_F_I_P _W._G. + _7._3 _I_n_t_e_r_n_a_t_i_o_n_a_l _S_y_m_p_o_s_i_u_m _o_n _C_o_m_p_u_t_e_r _P_e_r_f_o_r_- + _m_a_n_c_e _M_o_d_e_l_i_n_g, _M_e_a_s_u_r_e_m_e_n_t _a_n_d _E_v_a_l_u_a_t_i_o_n. + Brussels, Belgium, December 1987. + + (2) Litzkow, M. "Remote Unix - Turning Idle Worksta- + tions Into Cycle Servers". _P_r_o_c_e_e_d_i_n_g_s _o_f _t_h_e + _S_u_m_m_e_r _1_9_8_7 _U_s_e_n_i_x _C_o_n_f_e_r_e_n_c_e. Phoenix, Arizona. + June 1987 + + (3) Mutka, M. _S_h_a_r_i_n_g _i_n _a _P_r_i_v_a_t_e_l_y _O_w_n_e_d _W_o_r_k_s_t_a_- + _t_i_o_n _E_n_v_i_r_o_n_m_e_n_t. Ph.D. Th., University of + Wisconsin, May 1988. + + (4) Litzkow, M., Livny, M. and Mutka, M. "Condor - A + Hunter of Idle Workstations". _P_r_o_c_e_e_d_i_n_g_s _o_f _t_h_e + _8_t_h _I_n_t_e_r_n_a_t_i_o_n_a_l _C_o_n_f_e_r_e_n_c_e _o_n _D_i_s_t_r_i_b_u_t_e_d _C_o_m_- + _p_u_t_i_n_g _S_y_s_t_e_m_s. San Jose, Calif. June 1988 + + (5) Bricker, A. and Litzkow M. "Condor Installation + Guide". May 1989 + + (6) Bricker, A. and Litzkow, M. Unix manual pages: + condor_intro(1), condor(1), condor_q(1), + condor_rm(1), condor_status(1), condor_summary(1), + condor_config(5), condor_control(8), and + condor_master(8). January 1991 + + + + + + + + + + + + + + + + + + + + + + + + +CCCCOOOONNNNDDDDOOOORRRR TTTTEEEECCCCHHHHNNNNIIIICCCCAAAALLLL SSSSUUUUMMMMMMMMAAAARRRRYYYY 11115555 + + + diff --git a/preview-fall2024-info/doc/thain-dissertation.pdf b/preview-fall2024-info/doc/thain-dissertation.pdf new file mode 100644 index 000000000..2cbca4746 Binary files /dev/null and b/preview-fall2024-info/doc/thain-dissertation.pdf differ diff --git a/preview-fall2024-info/doc/thain-dissertation.ps b/preview-fall2024-info/doc/thain-dissertation.ps new file mode 100644 index 000000000..5e744310f Binary files /dev/null and b/preview-fall2024-info/doc/thain-dissertation.ps differ diff --git a/preview-fall2024-info/doc/tr1499.ps b/preview-fall2024-info/doc/tr1499.ps new file mode 100644 index 000000000..29ab10b78 Binary files /dev/null and b/preview-fall2024-info/doc/tr1499.ps differ diff --git a/preview-fall2024-info/doc/tutorial.ps.gz b/preview-fall2024-info/doc/tutorial.ps.gz new file mode 100644 index 000000000..0ac4316ac Binary files /dev/null and b/preview-fall2024-info/doc/tutorial.ps.gz differ diff --git a/preview-fall2024-info/doc/usenix_1.92.pdf b/preview-fall2024-info/doc/usenix_1.92.pdf new file mode 100644 index 000000000..781346cc1 Binary files /dev/null and b/preview-fall2024-info/doc/usenix_1.92.pdf differ diff --git a/preview-fall2024-info/doc/usenix_1.92.ps b/preview-fall2024-info/doc/usenix_1.92.ps new file mode 100644 index 000000000..855cc9e78 Binary files /dev/null and b/preview-fall2024-info/doc/usenix_1.92.ps differ diff --git a/preview-fall2024-info/doc/v5install.txt b/preview-fall2024-info/doc/v5install.txt new file mode 100644 index 000000000..32089961c --- /dev/null +++ b/preview-fall2024-info/doc/v5install.txt @@ -0,0 +1,159 @@ + +These installation instructions apply to the binary distributions of +version 5 Condor. + +These distributions are available in source form also, but we are not +attempting to provide instructions on how to build condor from the +source this time around. We have found that there is a great deal of +variation of the availability and in the implementation of the tools we +use to build Condor, and we can no longer keep up with the questions +and problems that arise. We don't want to discourage anybody from +building and modifying the code to meet their needs, it's just that if +you choose to go this route, you're on your own. + +The binary distribution is packaged in the following 4 directories: + + bin - executable programs + lib - libraries to link condor user programs and scripts + doc - postscript and ascii versions of documents + examples - C, Fortran, and C++ example programs + +You should plan to install "bin" and "lib" under a common directory in +some well known location so that your users can find the condor +programs and libraries in a consistent place. The doc directory can go +wherever is customary at your site. The examples directory could also +go anywhere, but the "Makefiles" do contain relative references to the +"lib" directory. If you move the examples directories, you will need to +change the "Makefiles" before you can build the examples. + +SOLARIS SPECIFIC NOTE: Condor no longer needs to run under the root +user-id *on Solaris only*. Instead, all daemons may be run under the +condor user-id. We do not recommend mixing root daemons with condor +daemons. Some documents will still refer to the need for the root +user-id. You can ignore those parts. + +1. Un-compress and un-tar the distribution. Example: you want to install + all the condor related stuff in "/usr/condor". + 1. mkdir /usr/condor + 2. cd /usr/condor + 3. (fetch the distribution file) + 4. uncompress condor_5.1a_DecAlpha.tar.Z + 5. tar xf condor_5.1a_DecAlpha.tar + +2. One machine in your condor pool will act as a "central manager" + for the pool. You should decide which machine will serve that + function, and install Condor there first. Because of the importance + of this machine, we recommend you pick one which is likely to be + reliable, or at least to get rebooted promptly if it does crash. + +3. Each machine in your pool will need a "condor" account, and a + "condor" group. Condor's UID and GID should be consistent across all + machines in your pool. You can easily add machines to your pool + at any time, but you should decide at this point where your + condor home directories will be located. You should install + the "condor" account for at least your central manager machine now. + +4. Several of the programs must be "setgid condor". Go to +the condor "bin" directory and make certain that the following programs +have owner "condor", group "condor", and permission "-rwxrwsr-x": + condor_globalq + condor_history + condor_jobqueue + condor_preen + condor_prio + condor_q + condor_rm + condor_submit + condor_summary + condor_throttle.generic + + +5. If Condor's home directory will be shared across the machines in + your pool, you will need to create a directory for each machine where + it can keep machine specific data. Make the directories under + Condor's home directory, and name each directory with the hostname + of the machine whose data it will hold. + + If Condor's home directory will not be shared across the machines in + your pool, you don't need to do this. + +6. If condor runs into a problem at your site, it will send mail describing + what went wrong. You need to decide who should get such mail. You + may want to make this an alias so that you can change the recipient + of the mail later without changing condor. + +7. Once a week Condor will try to send a status report back to its + authors. This function can be defeated, but if there is any + way within reason that you can allow this at your site, we would + really appreciate your cooperation. This kind of information + is really helpful to us in determining (and demonstrating) the + effectiveness of our work. Our internet mailing address is + condor_admin@cs.wisc.edu + please consider whether you can send mail to this address from your + site, or whether you can make some modification of the address to + get the mail delivered. You will be asked for this information + later. + +8. Some of the scripts and configuration files in the binary distribution + will need to be customized for your installation. A program called + "condor_customize" is provided for this (in the "bin" directory). + The program will ask you 5 things, providing defaults for each. You + should be prepared to answer these questions before you run the + customization program. + + a. Which host you want act as the central manager for your Condor pool. + See step 2 above. + b. The local email address where you want Condor to notify you + regarding problems. See step 5 above. + c. The email address where condor can send a weekly status report + back to its authors. See step 6 above. + d. The pathname of the directory which contains the "bin" and + "lib" directories. See step 1 above. + e. The pathname of the directory which contains the machine specific + data. See step 4 above. Note: two macros are available to + simplify the specification of this directory. The $(TILDE) + macro translates to the name of Condor's home directory on + whatever machine it is evaluated on, and the $(HOSTNAME) + macro evaluates to the hostname of whatever machine it is + evaluated on. Thus if you have separate home directories for + all the condor accounts in your pool you could specify + $(TILDE) + for this value, and if you have a shared home directory for condor + you could specify + $(TILDE)/$(HOSTNAME) + +9. Build subdirectories to hold the machine specific data by running + "condor_init" on each machine you want in your pool. N.B. you must + be "root" when you run "condor_init" (except on Solaris). + + +10. Start the condor daemons by running "condor_on" on each machine you + want in your pool. N.B. you must be "root" when you run this + program (except on Solaris). + +11. Ensure that condor is running. You can run + ps -e | egrep condor_ + on you central manager machine you should have processes for + + condor_master + condor_collector + condor_negotiator + condor_kbdd + condor_startd + condor_schedd + + On all other machines in your pool you should have processes for + + condor_master + condor_kbdd + condor_startd + condor_schedd + + (Suns and HP's don't run "condor_kbdd" as they dont' need it.) + +12. Ensure that the condor daemons are communicating. You can + run "condor_status" to get a one line summary of the status + of each machine in your pool. + +13. Try building and running some test jobs. A separate document + describes how to link and run these jobs. diff --git a/preview-fall2024-info/doc/video_pipeline-nossdav2004.pdf b/preview-fall2024-info/doc/video_pipeline-nossdav2004.pdf new file mode 100644 index 000000000..239494905 Binary files /dev/null and b/preview-fall2024-info/doc/video_pipeline-nossdav2004.pdf differ diff --git a/preview-fall2024-info/doc/wodi_users_guide.html b/preview-fall2024-info/doc/wodi_users_guide.html new file mode 100644 index 000000000..77b7bd12e --- /dev/null +++ b/preview-fall2024-info/doc/wodi_users_guide.html @@ -0,0 +1,228 @@ + + +WoDi User Guide + + + +
+

+WoDi User Guide +

+
+WoDi (short for Work Distributor) is intended to assist in writing +"Master-Worker" style parallel applications. In particular, WoDi is able +to make decisions about what work tasks should be assigned to which worker +processes. WoDi also is able to monitor resources, and insure that the +results of all work steps are reported exactly once to the master, even in +a dynamic resource environment in which machines may be lost at any time. +

+ +WoDi is implemented as a library of routines which are called by the master +and worker processes. This library provides functions for starting WoDi, +delimiting "cycles", and sending and receiving work steps. A WoDi cycle is +a collection of work steps which must all be completed before the next +cycle can be started. When cycles are used, WoDi will maintain a history +of the CPU utilization of work steps within a cycle, and use this history +to schedule future work steps. A variety of log files are also produced, +and some of these can be used by the DeVise visualization tool to visualize +the execution of the program. +

+ +

Overview of WoDi functions

+ +WoDi functions can be broken into two groups, those used by the master, and +those used by the workers. The vast majority of the functions are for use +by the master. The workers' only functions are for receiving work steps, +and sending results. A description of each library function follows. +

+ +

+int
+wodi_init(int init_bufs, int buflist[], int taglist[], int class_count, 
+         int class_needs[], char **slave_argv, int work_tag, int resp_tag,
+         int do_ordering);
+
+ +wodi_init should be the first WoDi function invoked by the master process, +it starts WoDi, and provides a lot of information required by WoDi to start +running. Because WoDi is responsible for starting new workers as machines +become available, it needs enough information in order to successfully +start these workers. +

+ +The first three parameters provide to WoDi a collection of messages which +should be sent to worker processes when they are first started. init_bufs +is the number of messages to be sent at start-up, buflist is an integer +array of PVM buffer identifiers which will be sent with the corresponding +tags in the taglist array. If no initialization messages are needed +init_bufs should be set to 0, and the values for buflist and taglist are +not used. +

+ +The next two parameters, class_count and class_needs specify the number of +machines the application would like to use. Because this application must +compete for resources with other users, it may not be able to allocate this +number of machines, but this specifies an upper bound. class_count is the +number of machine classes or types which are to be used by the application. +For a homogeneous run, this is always 1. The class_needs array must be of +size class_count, and provides the number of desired resources in each +class. If a negative value is given for the count, WoDi will use a +heuristic based on the past history of work steps to estimate a good value +for the number of machines to be used. +

+ +The slave_argv parameter gives an "argv" style name of the worker +executable to run, and the command line arguments to be given. This is +very much like the argv given to the pvm_spawn() function. +

+ +work_tag and resp_tag give PVM message tags to be used for work and result +messages. If a non-zero value is given for do_ordering, WoDi will attempt +to schedule the work steps of a cycle based on their past behavior. +

+ +

+int wodi_begin_cycle(int cycle_num, int cycle_bufs, int buflist[], int
+			taglist[], int step_count);
+
+ +wodi_begin_cycle is used to specify the beginning of a new cycle of work +steps. The first parameter is simply a cycle number and is usually +incremented starting from 0 on each call to this function. cycle_bufs is +the number of messages which will be sent to each worker at the beginning +of the cycle. These are usually used to update the state of each worker +before entering the cycle. buflist and taglist are the same as in +wodi_init. step_count gives the number of steps in this cycle. The +current implementation requires at the number of steps be the same for +every cycle. +

+ +

+int wodi_end_cycle(int cycle_num, int cycle_bufs, int buflist[], 
+			int taglist[])
+
+ + +This function specifies the end of the cycle. It is called when the master +has received results from every step in the cycle. The first parameter is +the cycle number, and should be the same as in the most recent call to +wodi_begin_cycle. cycle_bufs, buflist, and taglist have the same meaning as +in wodi_begin_cycle, and these messages will be sent to all workers +immediately. +

+ +

+int wodi_sendwork(int step_num)
+
+ + +wodi_sendwork is used by the master to send a task to WoDi to be forwarded +to a worker process. A message specifying the task is assumed to have been +packed into the current PVM send buffer. In this way, wodi_sendwork is a +replacement for a normal pvm_send(). The argument step_number assigns an +identifier to this task. It is usually a number between 0 and the +step_count-1 as given to wodi_begin_cycle() when cycles are being used. +

+ +

+int wodi_recvresponse(int tag)
+
+ + +wodi_recvresponse is used by the master to receive a result from a worker. +The tag provided should be the same as the resp_tag given to wodi_init(). +The return value of wodi_recvresponse() is an integer specifying what task +this result is for. This is the same integer given to wodi_sendwork() when +the task was sent. After calling wodi_recvresponse() the master can unpack +the results just as after a call to pvm_recv(). +

+ +

+int wodi_complete()
+
+ + +wodi_complete() is simply called by the master to terminate the WoDi program. +

+ +The remaining two functions are called by the worker processes, and not by +the master. +

+ +

+int wodi_recvwork(int from_tid, int tag)
+
+ + +wodi_recvwork() is used to receive a work task. The from_tid value should +be -1, and the tag should be the same as the work_tag given by the master +in the call to wodi_init(). Following a call to wodi_recvwork, a message +corresponding to a work step can be unpacked just as in a call to +pvm_recv(). +

+ +

+int wodi_sendresponse(int to_tid, int tag)
+
+ + +wodi_sendresponse() is used to send a result message which has already been +packed into the current PVM send buffer back to the master. The to_tid +value is not important, and the tag should be the same as the resp_tag +given by the master in its call to wodi_init(). +

+ + +

Compiling WoDi programs

+ +When compiling both the master and worker processes for use with WoDi, they +must be linked with the WoDi library (wodi_lib.a). The WoDi library should +be placed before the PVM library (libpvm3.a) in the link line because it +uses PVM functions. +

+ + +

Running WoDi programs under Condor

+ +To run a WoDi, or any other, program under Condor, you must first write a +submission file and submit it to Condor. A sample submission file is +provided below. +

+ +

+universe = PVM
+executable = wodi_test
+arguments = foo bar
+output = wodi_out
+error = wodi_err
+machine_count = 1..1
+requirements = OpSys == "SunOS4.1.3" && Arch == "sun4m"
+queue
+
+ +The first line specifies that this job will be using PVM. The executable +line specifies the name of the master executable program (presumably in the +same directory as this submission file), and it should be started with the +command line arguments "foo bar." All output generated by the master will +be written to the file wodi_out, and the standard error output will be on +wodi_err. Note that WoDi itself writes much output to the error file also. +The machine_count line specifies that we would like somewhere between 1 and +1 machines at startup. That is, in this example, the program will start +with exactly one machine ready. The requirements expression specifies, +essentially, that we want a Sun workstation. More information on +submitting jobs, and monitoring them while they run can be found in the +condor_submit and other Condor man pages. +

+ +When the job starts running, the executable file "wodi" must also be in the +directory where the submission was done. Additionally, the executable for +the worker processes must also be in this directory. +

+ +


+Last modified: Sun Nov 24 16:14:01 1996 by Jim Basney +
+condor@cs.wisc.edu +
+ + diff --git a/preview-fall2024-info/doc/workflow_condor_2007.pdf b/preview-fall2024-info/doc/workflow_condor_2007.pdf new file mode 100644 index 000000000..9155d4b21 Binary files /dev/null and b/preview-fall2024-info/doc/workflow_condor_2007.pdf differ diff --git a/preview-fall2024-info/edit.sh b/preview-fall2024-info/edit.sh new file mode 100755 index 000000000..024b7ffa5 --- /dev/null +++ b/preview-fall2024-info/edit.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -e + +mkdir -p "$PWD/vendor/bundle" + +docker run \ + -it --rm \ + --mount type=bind,source="$PWD",target=/srv/jekyll \ + --mount type=bind,source="$PWD/vendor/bundle",target=/usr/local/bundle \ + --publish 8080:8080 \ + jekyll/jekyll \ + jekyll serve -P 8080 $* diff --git a/preview-fall2024-info/eht-story.html b/preview-fall2024-info/eht-story.html new file mode 100644 index 000000000..bc487005c --- /dev/null +++ b/preview-fall2024-info/eht-story.html @@ -0,0 +1,403 @@ + + + + + + +Junior researchers advance black hole research with OSPool open capacity + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Junior researchers advance black hole research with OSPool open capacity +

+

Some of the greatest scientists and researchers the world has ever seen made their greatest contributions before the age of thirty. +From Albert Einstein to Sir Isaac Newton, history shows that young scientists can greatly advance their field of research provided +they have the opportunity and resources to do so. The Event Horizon Telescope (EHT) Collaboration +— which is propelled forward by the innovative work of its junior researchers — is an example of young scientists at work making +important contributions to science.

+ +

Postdoctoral fellow Angelo Ricarte and graduate students +Abhishek Joshi and Leon Chan +are three EHT junior researchers currently making waves in black hole research. The EHT Collaboration that these researchers contribute +to has been making advances in black hole research for over a decade. The collaboration’s goal is to capture detailed black hole images +by creating a virtual earth-sized telescope. These images are then analyzed through running a multitude of simulations by replicating +the flow of matter and light in the warped space-time of a black hole.

+ +

Ricarte, Joshi, and other team members have recently focused on making images of the black holes through polarized light, which can help +create distinctions between simulations. With this new element, they must now consider another parameter for every step of the process. +What originally took 5 million computational tasks to perform the necessary simulations now takes around 20 million whilst considering polarized light.

+ +
+ Postdoctoral fellow Angelo Ricarte +
Postdoctoral fellow Angelo Ricarte. +
+ Graduate student Abhishek Joshi +
Graduate Student Abhishek Joshi. +
+
+ +

That’s why the EHT collaboration looked toward the Open Science Pool (OSPool) in 2022. +The OSPool provides open, freely available capacity available to any researcher affiliated with a U.S. institution, including junior +researchers, who typically have limited funding for computing. The open capacity of the OSPool can be a significant asset for young +researchers seeking to participate in cutting-edge research. “In the past, creating a whole new simulation library for even one additional +parameter would have been too time consuming.” Joshi explained, “Now, it’s actually possible to do it.”

+ +

This capacity enabled Ricarte, one of the leads of two recent collaborative papers +dealing with the telescope results, to make new discoveries. The papers unveil a new +black hole image that shows “strong and organized” magnetic fields spiraling from the edge of Sgr A. This is the very first polarized image of Sgr A, +and the magnetic field structure hints that strong magnetic fields may occur around all black holes. Ricarte stresses the important role polarized light +has played in a recent press release, +noting that “Polarized light teaches us a lot more about the astrophysics, the properties of the gas, and mechanisms that take place as a black hole feeds.”

+ +
+ EHT's first black image of Sgr A* utilizing polarized light. +
EHT's first black image of Sgr A* utilizing polarized light. +
+
+ +

As the EHT provides more accurate observations, it also starts to unveil limitations of theory and simulations. Leon Chan, a graduate student in astrophysics +at the University of Colorado and a Croucher Scholar, has been working on addressing such a +limitation identified by the EHT Collaboration. His research focuses on understanding why current black hole simulations twinkle much more than what is seen +from the EHT. Suspecting electron temperature, he and his collaborators decided to take pictures of the simulations with differing electron temperatures. This +required a large amount of calculations, and the OSPool was able to help with this problem as well. “It was out of the capability of traditional HPC,” +Chan said. In their recent publication, they discovered that the light ring of the black +hole was the major source of twinkling, and by making electrons cooler and the gas more opaque, he and his collaborators were able to block the light ring from twinkling.

+ +
+ Graduate student Leon Chan +
Graduate Student Leon Chan. +
+
+ +

With capacity contributed by more than 50 institutions, many of which were granted awards by the NSF CC* program, +the EHT Collaboration has generated over 10 million black hole images and 5 million black hole spectra, taking an estimated 50 million core hours. This availability of open capacity +has contributed to the ability of Ricarte, Joshi, Chan and others like them to make new discoveries early in their careers, +equipping them to tackle some of the most challenging questions in astrophysics.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/european-htcondor-week-registration.html b/preview-fall2024-info/european-htcondor-week-registration.html new file mode 100644 index 000000000..89eeec59c --- /dev/null +++ b/preview-fall2024-info/european-htcondor-week-registration.html @@ -0,0 +1,353 @@ + + + + + + +Registration is open for the European HTCondor Workshop, September 24-27 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Registration is open for the European HTCondor Workshop, September 24-27 +

+

Registration is open for the European HTCondor Workshop, September 24-27

+ +

This year’s European HTCondor Workshop will be held from September 24 to 27th hosted by NIKHEF-Amsterdam, the Dutch National Institute for Subatomic Physics, in the beautiful Dutch capital city of Amsterdam.

+ +

The workshop will be an excellent occasion for learning from the sources (the developers!) about HTCondor, exchanging with your colleagues about experiences and plans and providing your feedback to the experts. The HTCondor Compute Entry point (CE) will be covered as well. Participation is open to all organizations (including companies) and persons interested in HTCondor (and by no means restricted to particle physics and/or academia!) If you know potentially interested persons, don’t hesitate to make them aware of this opportunity.

+ +

The workshop will cover both using and administering HTCondor; topics will be chosen to best match participants’ interests. We would very much like to know about your use of HTCondor, in your project, your experience and your plans. You are warmly encouraged to propose a short presentation.

+ +

There will also time and space for short, maybe spontaneous interactive participation (“show us your toolbox sessions”) which proved to be very popular in previous meetings.

+ +

Registration is now open! Find more information on the event page.

+ +

To ease travel, the workshop will begin Tuesday morning and end around Friday lunchtime.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events.html b/preview-fall2024-info/events.html new file mode 100644 index 000000000..0c9b9c8d0 --- /dev/null +++ b/preview-fall2024-info/events.html @@ -0,0 +1,768 @@ + + + + + + +Events + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ Events +

+ +
+
+
+ +
+
+
+
+ + + + + + + + + + +
+ + +
+
+ + + + + +
+
+

+ CHTC Information Session - September +

+ +

+ + SEP 18, 2024 + +

+ +

+ UW-Madison Campus, Room 1240 of the Computer Sciences Building +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ Save The Date: 2024 European HTCondor Workshop +

+ +

+ + + SEP 24, 2024-27 + +

+ +

+ NIKHEF-Amsterdam. Amsterdam, Netherlands +

+
+
+ +
+ +
+ + + + + + + + + +

Past Events

+
+ +
+ + +
+
+ + + + + +
+
+

+ OSG School 2024 +

+ +

+ + + AUG 5, 2024- 9 + +

+ +

+ University of Wisconsin-Madison +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ You are Invited to Attend Throughput Computing 2024 +

+ +

+ + + JUL 8, 2024-12 + +

+ +

+ University of Wisconsin–Madison’s Fluno Center and Online via Zoom +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ Workshop: Hands-on Introduction to Using CHTC Systems +

+ +

+ + NOV 8, 2023 + +

+ +

+ UW-Madison Campus, Room 1240 of the Computer Sciences Building +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ CHTC Information Session +

+ +

+ + OCT 4, 2023 + +

+ +

+ UW-Madison Campus, Room 1240 of the Computer Sciences Building +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ OSG User School 2023 +

+ +

+ + + AUG 7, 2023-11 + +

+ +

+ University of Wisconsin-Madison +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ Throughput Computing 2023 +

+ +

+ + + JUL 10, 2023-14 + +

+ +

+ University of Wisconsin–Madison’s Fluno Center and Online via Zoom +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ Research Bazaar - Join the CHTC session 'Scaling Up Your Research Computing' +

+ +

+ + + FEB 22, 2023-23 + +

+ +

+ Discovery Building on the UW-Madison campus. +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ Server Room Tours +

+ +

+ + + FEB 9, 2023-14 + +

+ +

+ Meet by the Elevators on the 1st Floor Discovery Building, 330 N. Orchard St. Madison, WI 53715 +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ HPC Cluster FAQ +

+ +

+ + + JAN 19, 2023-24 + +

+ +

+ Researchers' Link in Morgridge +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ GPU/Machine Learning Demo +

+ +

+ + NOV 16, 2022 + +

+ +

+ UW-Madison Campus, Room 1240 of the Computer Sciences Building +

+
+
+ +
+ +
+ + +
+
+ + + + + +
+
+

+ Extended (In-Person) Office Hours +

+ +

+ + NOV 1, 2022 + +

+ +

+ Discovery Building, Room 1260 +

+
+
+ +
+ +
+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2022/11/extended-office-hours.html b/preview-fall2024-info/events/2022/11/extended-office-hours.html new file mode 100644 index 000000000..71fd5fc6c --- /dev/null +++ b/preview-fall2024-info/events/2022/11/extended-office-hours.html @@ -0,0 +1,415 @@ + + + + + + +CHTC Extended (In-Person) Office Hours + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | November 1 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ CHTC Extended (In-Person) Office Hours +

+
+
+
+ + +
+
+ +

Sign Up for our November 1st extended, in-person office hours!

+ +

If you’ve missed talking to the Facilitation team in-person, we will be having a special, extended, in-person office hour session on Tuesday, November 1, from 9:00am - 12:00pm CST. The in-person office hours will be held at the Discovery Building, 330 N. Orchard St., Room 1260.

+ +

Sign up here so we can plan ahead.

+ +

This is a chance to both get specific help (as with usual office hours) and just have a chance to work in the same space as other CHTC users and the facilitation team. Your sign up helps us plan for space and food.

+ +
+
+
+ +

Who

+ +

CHTC Users

+ +

When

+ +

Tuesday, November 1st anytime from 9:00am - 12:00pm CST

+ +

Where

+ +

Discovery Building, Room 1260

+ +

Questions?

+ +

Please email chtc@cs.wisc.edu with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2022/11/gpu-ml-demo.html b/preview-fall2024-info/events/2022/11/gpu-ml-demo.html new file mode 100644 index 000000000..30a15409a --- /dev/null +++ b/preview-fall2024-info/events/2022/11/gpu-ml-demo.html @@ -0,0 +1,423 @@ + + + + + + +GPU/Machine Learning Demo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | November 16 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ GPU/Machine Learning Demo +

+
+
+
+ + +
+
+ +

GPU/Machine Learning demo and Q+A on Wednesday, November 16th!

+ +

Come to our GPU/Machine Learning demo and Q+A on Wednesday, November 16th.

+ +

Are you curious about how to run machine learning jobs on CHTC’s GPU resources? Come to a demo and Q+A on Wednesday, November 16, from 1:30 pm - 2:30 pm in Room 1240 of the Computer Sciences Building. No preparation is needed, and questions are welcome. (Cookies are also included!)

+ +

No registration is required.

+ +

Links and Materials

+ +

Slides (including demo commands)

+ +

GPU Job Templates

+ +

GPUs in CHTC Guide

+ +
+
+
+ +

Who

+ +

CHTC Interested Users

+ +

When

+ +

Wednesday, November 16th from 1:30 pm - 2:30 pm CST

+ +

Where

+ +

UW-Madison Campus, Room 1240, Computer Sciences Building

+ +

Questions?

+ +

Please email chtc@cs.wisc.edu with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2023/01/data-center-tours.html b/preview-fall2024-info/events/2023/01/data-center-tours.html new file mode 100644 index 000000000..0fbde093f --- /dev/null +++ b/preview-fall2024-info/events/2023/01/data-center-tours.html @@ -0,0 +1,415 @@ + + + + + + +Server Room Tours + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | February 9-14 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ Server Room Tours +

+
+
+
+ + +
+
+ +

Server Room Tours on February 9th and February 14th!

+ +

Go behind the scenes in a CHTC server room and learn more about the computational infrastructure that power CHTC’s large-scale computing systems.

+ +

The two tours will be on Thursday, February 9th, at 9:30 am and Tuesday, February 14th, at noon. Space is limited! Register Here.

+ +

We will meet by the Elevators on the 1st Floor Discovery Building, 330 N. Orchard St. Madison, WI 53715.

+ +
+
+
+ +

Who

+ +

CHTC Interested Users

+ +

When

+ +

Thursday, February 9 at 9:30am and Tuesday, February 14 at 12:00pm

+ +

Where

+ +

CHTC server room

+ +

Questions?

+ +

Please email chtc@cs.wisc.edu with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2023/01/hpc-cluster-faq.html b/preview-fall2024-info/events/2023/01/hpc-cluster-faq.html new file mode 100644 index 000000000..2da6f1f01 --- /dev/null +++ b/preview-fall2024-info/events/2023/01/hpc-cluster-faq.html @@ -0,0 +1,422 @@ + + + + + + +HPC cluster FAQ January 19th and 24th + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | January 19-24 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ HPC cluster FAQ January 19th and 24th +

+
+
+
+ + +
+
+ +

This January, we will be going live with a completely new HPC cluster – new execute nodes, new file system, new network, and new operating +system/software modules! This is a continuation of the campus investment in a technology refresh at CHTC, started this summer with our HTC system.

+ +

We will send an email to the chtc-users mailing list when the new cluster is live.

+ +

We are planning two FAQ sessions to go through needed transition steps and answer questions. These sessions will be offered:

+ +
    +
  • Thursday, January 19, 9:30 - 10:30am: This session will be in-person at the Researchers’ Link in the Discovery Building. Meet CHTC staff at the Discovery Building frontdesk to be let upstairs.
  • +
  • Tuesday, January 24, 12 - 1pm: This session will be online, please email chtc@cs.wisc.edu for the Zoom link.
  • +
+ +

No registration is required.

+ +
+
+
+ +

Who

+ +

CHTC Users of the HPC Cluster

+ +

When

+ +

Thursday, January 19 from 9:30am - 10:30am +Tuesday, January 24 from 12:00pm - 1:00pm

+ +

Where

+ +

Researchers’ Link in the Discovery Building and Online on Zoom

+ +

Questions?

+ +

Please email chtc@cs.wisc.edu with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2023/02/research-bazaar.html b/preview-fall2024-info/events/2023/02/research-bazaar.html new file mode 100644 index 000000000..a91b01680 --- /dev/null +++ b/preview-fall2024-info/events/2023/02/research-bazaar.html @@ -0,0 +1,413 @@ + + + + + + +Research Bazaar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | February 22-23 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ Research Bazaar +

+
+
+
+ + +
+
+ +

Scaling up your computing!

+ +

UW-Madison’s Data Science Hub is hosting the fourth annual Research Bazaar, focused on the theme of Information Insights: Shaping Futures with Data and Computing.

+ +

Don’t miss the CHTC session ‘Scaling Up your Research Computing’.

+ +
+
+
+ +

Who

+ +

Anyone Interested in Research Computing

+ +

When

+ +

February 22-23

+ +

Where

+ +

In-person at the Discovery Building on the UW-Madison campus.

+ +

Questions?

+ +

Please email chtc@cs.wisc.edu with any questions about the CHTC session.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2023/07/throughput-computing-2023.html b/preview-fall2024-info/events/2023/07/throughput-computing-2023.html new file mode 100644 index 000000000..fac44b666 --- /dev/null +++ b/preview-fall2024-info/events/2023/07/throughput-computing-2023.html @@ -0,0 +1,473 @@ + + + + + + +Throughput Computing 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | July 10-14 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ Throughput Computing 2023 +

+
+
+
+ + +
+
+ +

Register for virtual attendance (Will remain open throughout event): Register here.

+ +

For the first time, HTCondor Week and the OSG All-Hands Meeting will join together as a single, integrated event from July 10–14 to be held at the University of Wisconsin–Madison’s Fluno Center. Throughput Computing 2023 is sponsored by the OSG Consortium, the HTCondor team, and the UW-Madison Center for High Throughput Computing.

+ +

This will primarily be an in-person event, but remote participation (via Zoom) for the many plenary events will also be offered.

+ +

If you register for the in-person event at the University of Wisconsin–Madison, you can attend plenary and non-plenary sessions, mingle with colleagues, and have planned or ad hoc meetings. Evening events are also planned throughout the week.

+ +

If this is your first time registering for an event on the registration site, you will have to create an account first and then register.

+ +

Schedule

+ +

The schedule can be found under the General Schedule section on the Throughput Computing 2023 event website. The session block topics will not change; however, there will likely be timing adjustments in the schedule as speakers are finalized.

+ +

All the topics typically covered by HTCondor Week and the OSG All-Hands Meeting will be included:

+ +
    +
  • Science Enabled by the OSPool and the HTCondor Software Suite (HTCSS)
  • +
  • OSG Technology
  • +
  • HTCondor Technology
  • +
  • HTCondor and OSG Tutorials
  • +
  • State of the OSG
  • +
  • Campus Services and Perspectives
  • +
+ +

We also have an exciting group of speakers for this year’s Throughput Computing 2023, including:

+ +
    +
  • Laura Cadonati, Professor, School of Physics and Center for Relativistic Astrophysics, Associate Dean for Research, College of Science, Georgia Institute of Technology
  • +
  • Kevin L. Thompson, NSF Program Director
  • +
  • Daniel Andresen, Director, Institute for Computational Research in Engineering and Science, Professor, Dept. of Computer Science; Michelle Munson-Serban Simu Keystone Research Scholar, Kansas State University
  • +
+ +

The U.S. ATLAS and U.S. CMS high-energy physics projects are also planning parallel OSG-related sessions during the event on Wednesday, July 12.

+ +

Workshop Hotel Accommodations

+ +

We have arranged room blocks at a reduced rate at the Fluno Center and at two nearby hotels. We recommend you make your room reservation ASAP, as the number of rooms available at the reduced rate is limited. (Madison is also a popular place to visit in the summer!) Reserved room blocks at these rates begin expiring as soon as June 9th. Please visit the Local Arrangements page to find information about how to book your hotel room at the reduced rate for each hotel.

+ +

Please note: DoubleTree Madison is the only hotel with a free shuttle service to and from the airport.

+ +

Call for Abstracts: HTCondor Sessions

+ +

The call for abstracts for the HTCondor sessions of Throughput Computing 23 is now open. Please visit the Call for Abstracts page to learn how to sign up to give a talk, talk content and length, and how to submit your presentation.

+ +

The submission deadline has been extended to June 19, 2023

+ +

Questions and Resources

+ +

For questions about attending, speaking, accommodations, and other concerns please contact us at htc23@path-cc.io.

+ +

To learn about this event in more detail, view last year’s schedules for:

+ + + +
+
+
+ +

Dates

+ +

Monday, July 10 through Friday, July 14, 2023.

+ +

Who

+ +

Organizations, researchers, campuses, facilitators and administrators interested in the HTCondor Software Suite and high throughput computing or the OSG Consortium resources or services (including the OSPool, the Open Science Data Federation or the PATh Facility.)

+ +

Where

+ +

Fluno Center on the University of Wisconsin-Madison campus and Online via Zoom.

+ +

Registration

+ +

Registration for Throughput Computing 2023 is now open! Please visit the links below to register:

+ +

Registration for virtual attendance: Register here.

+ +

There is no fee for registration for virtual attendance.

+ +

Questions?

+ +

Please email htc23@path-cc.io with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2023/10/information-session.html b/preview-fall2024-info/events/2023/10/information-session.html new file mode 100644 index 000000000..8c540b741 --- /dev/null +++ b/preview-fall2024-info/events/2023/10/information-session.html @@ -0,0 +1,425 @@ + + + + + + +CHTC Information Session + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | October 4 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ CHTC Information Session +

+
+
+
+ + +
+
+ +

Learn about CHTC's computing +and data services for UW-Madison affiliates at this lunchtime information session!

+ +

The Center for High Throughput Computing (CHTC) is hosting an Information Session to welcome the new academic year! Pizza will be provided and CHTC staff will present an introduction to getting started using CHTC resources to help you accomplish your computational research goals.

+ +

Please register to help us estimate the number of +attendees: Register Here

+ +

Materials

+ +

Slides

+ +

Feedback

+ +
+
+
+ +

Who

+ +

UW-Madison faculty, students, and staff interested in large-scale computing.

+ +

When

+ +

Wednesday, October 4, 2023, 12:00 - 1:00 pm CST

+ +

Where

+ +

Computer Sciences Building, Room 1240, UW-Madison Campus

+ +

Registration

+ +

Free Registration

+ +

Questions?

+ +

Please email chtc@cs.wisc.edu with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2023/11/intro-workshop.html b/preview-fall2024-info/events/2023/11/intro-workshop.html new file mode 100644 index 000000000..6724e44ee --- /dev/null +++ b/preview-fall2024-info/events/2023/11/intro-workshop.html @@ -0,0 +1,429 @@ + + + + + + +Workshop: Hands-on Introduction to Using CHTC Systems + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | November 8 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ Workshop: Hands-on Introduction to Using CHTC Systems +

+
+
+
+ + +
+
+ +

Join us for a half-day workshop +on getting started in CHTC, featuring hands-on exercises and discussion.

+ +

Have you gotten a CHTC account (or wanted to) and not quite been able to get +started? This is the workshop for you! We’ll cover the basics of logging in +and submitting jobs on CHTC systems, and include time for discussions and +questions about your own work.

+ +

Registration is required and you can register here: Workshop Registration

+ +

This workshop is also included in the series of fall workshops offered by +the Data Science Hub. See other workshops in the series +here: Data Science Hub Fall 2023 Mini Workshops

+ +

Workshop Materials

+ +
+
+
+ +

Who

+ +

UW-Madison faculty, students, and staff interested in using CHTC resources.

+ +

When

+ +

Wednesday, November 8, 2023, 9:00 am - 12:30 pm CST

+ +

Where

+ +

Computer Sciences Building, Room 1240, UW-Madison Campus

+ +

Registration

+ +

Registration is required.

+ +

Register Here

+ +

Questions?

+ +

Please email chtc@cs.wisc.edu with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2024/01/throughput-computing-2024.html b/preview-fall2024-info/events/2024/01/throughput-computing-2024.html new file mode 100644 index 000000000..bdf57d5b0 --- /dev/null +++ b/preview-fall2024-info/events/2024/01/throughput-computing-2024.html @@ -0,0 +1,454 @@ + + + + + + +Join Us at Throughput Computing 2024, July 8 - 12 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | July 8-12 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ Join Us at Throughput Computing 2024, July 8 - 12 +

+
+
+
+ + +
+
+ +

Don’t miss this opportunity to connect with the High Throughput Computing community.

+ +

You are invited to the second annual Throughput Computing event (HTC 24) from July 8-12 to be held in beautiful Madison, Wisconsin. HTC 24 brings together researchers, campuses, science collaborations, facilitators, administrators, government representatives, and professionals interested in high throughput computing to:

+ +
    +
  • Engage with the throughput computing community, including the OSG Consortium and the PATh and Pelican teams and many others contributing to HTC
  • +
  • Be inspired by presentations and conversations with community leaders and contributors sharing common interests
  • +
  • Learn about HTC and new developments to advance your science, your collaboration or your campus
  • +
+ +

Registration is Open!

+ +

Connect with CC* Campuses and OSG Staff

+ +

CC* campuses (current and potential) will have the opportunity to build connections and to advance their technical know how at the dedicated CC* track held Wednesday, July 10th. These sessions will bring together campus staff, including staff involved directly with HTC technology, with the OSG Consortium staff. The goal is to engage with and to learn from each other to improve the experience of providing or utilizing capacity and to advance scientific research on your own campus and across the nation.

+ +

Speaking Opportunities

+ +

We are introducing Lightning Showcases from the community on Tuesday, July 9. Come and give a lightning talk about your project, tool, or activities around HTC. To keep the session relaxed and informal, there will be opportunities for signing up for a slot on the first day of the workshop.

+ +

We also encourage you to consider a more formal talk. Technical presentations at HTC 24 are short, typically 20 minutes in length. Applying merely requires a brief abstract submission.

+ +

Visiting Madison

+ +

Madison, Wisconsin is both a beautiful and a popular place to visit in the summer. We do have a limited number of room blocks reserved for HTC 24 and encourage you to register and book your hotel room as early as possible. Visit the Event Site Local Arrangements for accommodation details.

+ +

Questions and Resources

+ +

HTC 24 is sponsored by the OSG Consortium, the HTCondor team and the UW-Madison Center for High Throughput Computing.

+ +

For questions about attending, speaking, accommodations, and other concerns please contact us at htc@path-cc.io.

+ +

To learn about HTC 24 in more detail, view the event website:

+ + + +
+
+
+ +

Dates

+ +

Monday, July 8 through Friday, July 12, 2024.

+ +

Who

+ +

Researchers, campuses, scientific collaborations, facilitators, administrators, and professionals interested in the HTCondor Software Suite and high throughput computing or the OSG Consortium resources or services (including the OSPool, the Open Science Data Federation, the Pelican Platform, or the PATh Facility.)

+ +

Where

+ +

Fluno Center on the University of Wisconsin-Madison campus and Online via Zoom.

+ +

Registration

+ +

Registration Is Open but closes soon! In-person registration closes June 30. Remote registration will remain open throughout the event. Visit the Event Site for registration information. Registration is required for attendees, even if you plan to attend remotely only. Registration for in-person attendance will cost $125 per day; there is no fee for registration for virtual attendance. There are two places to register, depending upon whether you will be attending in person or remotely:

+ + + +

Questions?

+ +

Please email htc@path-cc.io with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2024/03/osg-school-2024.html b/preview-fall2024-info/events/2024/03/osg-school-2024.html new file mode 100644 index 000000000..49403cea8 --- /dev/null +++ b/preview-fall2024-info/events/2024/03/osg-school-2024.html @@ -0,0 +1,438 @@ + + + + + + +OSG School 2024 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | August 5- 9 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ OSG School 2024 +

+
+
+
+ + +
+
+ +

Is limited computing capacity holding back your science?

+ +

Applications for the OSG School are now open!

+ +

The OSG School provides researchers the opportunity to learn how to use high-throughput computing (HTC) systems to run large-scale computing applications at their campus or using the national-scale Open Science Pool – to run large-scale computing applications that are at the heart of today’s cutting-edge science.

+ +

The school is ideal for:

+ +
    +
  • +

    Researchers (especially graduate students and post-docs) in any research area for which large-scale computing is a vital part of the research process;

    +
  • +
  • +

    Anyone (especially students and staff) who supports researchers who are current or potential users of high-throughput computing;

    +
  • +
  • +

    Instructors (at the post-secondary level) who teach future researchers and see value in integrating high-throughput computing into their curriculum.

    +
  • +
+ +

People accepted to this program will receive financial support for basic travel and local costs associated with the School.

+ +

To learn more about the event, check out this article that features 2023 School students and their motiviations for attending.

+ +

View complete details and access the School application

+ +
+
+
+ +

Dates

+ +

August 5–9, 2024

+ +

Who

+ +

Researchers (especially graduate students and post-docs), students and staff who supports researchers currently or are potential users of HTC, and instructors at the post-secondary level who want to integrate HTC into their curriculum.

+ +

Where

+ +

The University of Wisconsin-Madison.

+ +

Application and Deadlines

+

Details about the application process can be found on the OSG School 2024 site.

+ +

The deadline for applications is Monday, April 1, 2024.

+ +

Contact Us

+ +

If you have any questions about the event, email us at school@osg-htc.org

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2024/04/euro-htc.html b/preview-fall2024-info/events/2024/04/euro-htc.html new file mode 100644 index 000000000..01070e98f --- /dev/null +++ b/preview-fall2024-info/events/2024/04/euro-htc.html @@ -0,0 +1,427 @@ + + + + + + +Save The Date for the European HTCondor Workshop, September 24-27 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Upcoming Event | September 24-27 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ Save The Date for the European HTCondor Workshop, September 24-27 +

+
+
+
+ + +
+
+ +

This year’s European HTCondor Workshop will be held from September 24 to 27th hosted by NIKHEF-Amsterdam, the Dutch +National Institute for Subatomic Physics, in the beautiful Dutch capital city of Amsterdam.

+ +

The workshop will be an excellent occasion for learning from the sources (the developers!) about HTCondor, exchanging +with your colleagues about experiences and plans and providing your feedback to the experts. The HTCondor Compute Entry +point (CE) will be covered as well. Participation is open to all organizations (including companies) and persons interested +in HTCondor (and by no means restricted to particle physics and/or academia!) If you know potentially interested persons, +don’t hesitate to make them aware of this opportunity.

+ +

The workshop will cover both using and administering HTCondor; topics will be chosen to best match participants’ interests. +We would very much like to know about your use of HTCondor, in your project, your experience and your plans. You are warmly +encouraged to propose a short presentation.

+ +

There will also time and space for short, maybe spontaneous interactive participation (“show us your toolbox sessions”) +which proved to be very popular in previous meetings.

+ +

Registration and abstract submission will be opened in due course.

+ +

To ease travel, the workshop will begin Tuesday morning and end around Friday lunchtime.

+ +
+
+
+ +

Dates

+ +

September 24-27, 2024

+ +

Who

+ +

Anyone interested in learning about HTCondor.

+ +

Where

+ +

NIKHEF-Amsterdam. Amsterdam, Netherlands

+ +

Registration

+ +

Registration is currently not open for this event, but will open in due time.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2024/09/OSG-User-School-2023.html b/preview-fall2024-info/events/2024/09/OSG-User-School-2023.html new file mode 100644 index 000000000..913b0c23b --- /dev/null +++ b/preview-fall2024-info/events/2024/09/OSG-User-School-2023.html @@ -0,0 +1,425 @@ + + + + + + +OSG User School 2023, Aug. 7–11 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Past Event | August 7-11 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ OSG User School 2023, Aug. 7–11 +

+
+
+
+ + +
+
+ +

Has your research computing outgrown your available capacity? How could access to lots more computing transform your research or others? If you have research workloads that can be broken into many independent, parallel computing tasks, we can help!

+ +

The OSG User School provides researchers the opportunity to learn how to use high-throughput computing (HTC) systems to run large-scale computing applications at their campus or using the national-scale OSG Consortium.

+ +

To learn more about the event, check out these articles written about the OSG User School 2022:

+ + +

Apply by April 17 on the OSG School website!

+ +
+
+
+ +

Dates

+ +

August 7 - 11, 2023

+ +

Who

+ +

Researchers (especially graduate students and post-docs), students and staff who supports researchers currently or are potential users of HTC, and instructors at the post-secondary level who want to integrate HTC into their curriculum.

+ +

Where

+ +

The University of Wisconsin-Madison.

+ +

Application and Deadlines

+

Details about the application process can be found on the OSG User School 2023 site.

+ +

The deadline for applications is Monday, April 17, 2023.

+ +

Contact Us

+ +

If you have any questions about the event, email us at user-school@osg-htc.org

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/2024/09/information-session.html b/preview-fall2024-info/events/2024/09/information-session.html new file mode 100644 index 000000000..b33e54f5a --- /dev/null +++ b/preview-fall2024-info/events/2024/09/information-session.html @@ -0,0 +1,429 @@ + + + + + + +CHTC Information Session - September + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+
+
+
+
+

+ + + + + + + + Upcoming Event | September 18 + +

+
+
+
+
+ +
+
+
+ +
+
+
+
+
+

+ CHTC Information Session - September +

+
+
+
+ + +
+
+ +

Learn about CHTC's computing +and data services for UW-Madison affiliates at this information session!

+ +

CHTC staff will present an overview of CHTC services and how these services can help +researchers accomplish their research and computational goals. CHTC staff will also +help attendees identify the next steps for getting started - whether that is +getting an account, how to log in, or how to start running work. After +the presentation, there will be time for Q+A and hands-on help from the CHTC +Facilitation Team.

+ +

This is the perfect opportunity for any UW Madison researcher to learn more +about CHTC - whether you are just curious or have already gotten an account.

+ +

Register Here

+ +
+
+
+ +

Who

+ +
    +
  • Any UW - Madison researcher who wants to learn more about CHTC and tackling computational problems
  • +
  • Any UW - Madison researcher who has recently gotten a CHTC account
  • +
+ +

When

+ +

Wednesday, September 18, 2023, 10:30 am - 12:00 pm CST

+ +

Where

+ +

Computer Sciences Building, Room 1240, UW-Madison Campus

+ +

Registration

+ +

Register Here

+ +

Questions?

+ +

Please email chtc@cs.wisc.edu with any questions.

+ +
+
+
+
+
+ +
+
+ + +
+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/events/:year/:month.jpg b/preview-fall2024-info/events/:year/:month.jpg new file mode 100644 index 000000000..5653ded09 Binary files /dev/null and b/preview-fall2024-info/events/:year/:month.jpg differ diff --git a/preview-fall2024-info/events/demo.html b/preview-fall2024-info/events/demo.html new file mode 100644 index 000000000..6f962862f --- /dev/null +++ b/preview-fall2024-info/events/demo.html @@ -0,0 +1,14 @@ + + +
+ +
diff --git a/preview-fall2024-info/events/demo/index.html b/preview-fall2024-info/events/demo/index.html new file mode 100644 index 000000000..6f962862f --- /dev/null +++ b/preview-fall2024-info/events/demo/index.html @@ -0,0 +1,14 @@ + + +
+ +
diff --git a/preview-fall2024-info/events/extended-office-hours.html b/preview-fall2024-info/events/extended-office-hours.html new file mode 100644 index 000000000..d91b9ab16 --- /dev/null +++ b/preview-fall2024-info/events/extended-office-hours.html @@ -0,0 +1,14 @@ + + +
+ +
diff --git a/preview-fall2024-info/favicon.ico b/preview-fall2024-info/favicon.ico new file mode 100644 index 000000000..9f6ecc6c3 Binary files /dev/null and b/preview-fall2024-info/favicon.ico differ diff --git a/preview-fall2024-info/fellowships/fellows.html b/preview-fall2024-info/fellowships/fellows.html new file mode 100644 index 000000000..65cae98d3 --- /dev/null +++ b/preview-fall2024-info/fellowships/fellows.html @@ -0,0 +1,689 @@ + + + + + + +Open projects for CHTC Fellows + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + +
+
+

CHTC Fellows

+
+ + +
+
+
+
+
+
+ + + Headshot for Ben Staehle + +
+
+
+
+
+

+ + Ben Staehle + +

+
Mentor(s):
Joe Bartowiak
+
+
+
+
+

Tracking server inventory and elevation

+
+

The CHTC maintains over 1,000 servers on the UW–Madison campus and +across the country. Keeping track of server elevation (datacenter +and rack location), serial numbers, asset tags is a challenge that +is always in need of improvement. This project will focus on taking +existing data from the CHTC hardware monitoring system and automatically +exporting it to other systems such as Google spreadsheets or ITAdvisor. +After a successful summer, the student fellow will gain skills in +Python and monitoring and Google Docs APIs.

+ +
+
+
+
+
+
+ +
+
+
+
+
+
+ + + Headshot for Kristina Zhao + +
+
+
+
+
+

+ + Kristina Zhao + +

+
Mentor(s):
Emma Turetsky and Ian Ross
+
+
+
+
+

Integrating PyTorch and Pelican

+
+

PyTorch is one of the most popular machine learning frameworks. +An important aspect of using it is the data engineering: how +is input data fed into the model during training? Going from +“tutorial scale” problems to cutting-edge research requires +drastically different techniques around data handling.

+ +

For this project, we aim to better integrate Pelican +into the PyTorch community, providing both technical +mechanisms (implementing the fsspec interface for Pelican) +and documentation by providing tutorials and recipes for +scaling PyTorch-based training using a combination of HTCondor +and Pelican.

+ +
+
+
+
+
+
+ +
+
+
+
+
+
+ + + Headshot for Neha Talluri + +
+
+
+
+
+

+ + Neha Talluri + +

+
Mentor(s):
Jason Patton
+
+
+
+
+

Where in the world am I

+
+

In PATh, an important part of the infrastructure is the “glidein”, a client that +starts at a remote location and provides computational cycles for research. +In the past, glideins have relied on configuration at remote locations to +determine their location but this often results in missing or incorrect +information. This project will focus on enhancing glideins so that they +can detect and report where they are running in the world, possibly including +data like geolocation and institutional owner. After a successful summer, +the student fellow will gain skills in Python, bash, and layer 3 networking.

+ +
+
+
+
+
+
+ +
+
+
+
+
+
+ + + Headshot for Patrick Brophy + +
+
+
+
+
+

+ + Patrick Brophy + +

+
Mentor(s):
Haoming Meng
+
+
+
+
+

Expanded Pelican Origin Monitoring

+
+

The Pelican origin service is responsible for exporting objects in the backend +storage to the data federation. As it is the “entry point” for the data, understanding +the load on the origin and its activities is key to keeping the federation healthy.
+Pelican takes monitoring data from the web server component and feeds it into the popular +Prometheus software to store time series about the activity. This project would focus on:

+
    +
  • Implementing new monitoring probes to complement the existing information.
  • +
  • Forwarding the raw, unsummarized data to an ElasticSearch database for further analysis.
  • +
  • Designing visualizations to provide administrators with an overview of the origin’s activities.
  • +
  • Implementing alerts when there are health issues with the origin.
  • +
+ +

After a successful summer, the student fellow will gain skills in using the Go +language, the Prometheus monitoring system (and other Cloud Native technologies), and web design.

+ +
+
+
+
+
+
+ +
+
+
+
+
+
+ + + Headshot for Pratham Patel + +
+
+
+
+
+

+ + Pratham Patel + +

+
Mentor(s):
Brian Lin
+
+
+
+
+

Enhancing container image build system

+
+

Container images are a widely used technology to package and distribute +software and services for use in systems such as Docker or Kubernetes. +The PATh project builds hundreds of these images on a weekly basis but +the build system needs improvement to support more images and additional +use cases. This project will focus on taking the existing system and +adding configurable, per-image build options. After a successful summer, +the student fellow will gain skills in Docker containers, GitHub actions, and Bash.

+ +
+
+
+
+
+
+ +
+
+
+
+
+
+ + + Headshot for Ryan Boone + +
+
+
+
+
+

+ + Ryan Boone + +

+
Mentor(s):
Cole Bollig and Rachel Lombardi
+
+
+
+
+

Grid Exerciser

+
+

The OSPool is a very large, very dynamic, heterogenous high throughput system composed of execute +points from dozens of campuses all over the United States. Sometimes, something will go wrong +at one of these many sites, or one network, or one storage point, and it is difficult to determine +where the problem is. This project proposed the design and construction of a “Grid Exerciser”, +which consists of intentionally sending sample jobs to targetted locations on the OSPool to verify +correct operation and sufficient performance. The project will also have a reporting and +visualization component so that the voluminous results can be understood by a human in a +concise manner.

+ +
+
+
+
+
+
+ +
+
+
+
+
+
+ + + Headshot for Thinh Nguyen + +
+
+
+
+
+

+ + Thinh Nguyen + +

+
Mentor(s):
Justin Hiemstra
+
+
+
+
+

ML for failure classification in the OSPool

+
+

The OSPool runs hundreds of thousands of jobs every day on dozens of +different sites, each unique in their own way. Naturally, there are +many hundreds of failures, most of which the system works around, but +with added latency to workflow completion. This project would attempt +to automatically classify failures from job logs to detect common +patterns and highlight places for humans to look to fix common failures +with the most payoff. Students working on this project will gain +experience applying ML techniques to real world problems.

+ +
+
+
+
+
+
+ +
+
+
+
+
+
+ + + Headshot for Wil Cram + +
+
+
+
+
+

+ + Wil Cram + +

+
Mentor(s):
Greg Thain
+
+
+
+
+

Schedd performance analysis for human

+
+

The condor_schedd is a single threaded program, and when it is overloaded, +it is difficult for administrators to understand why. There are some +statistics about what it is doing, but there is no clear way to present +this information in a useful way to an administrator. Students working +on this project would build visualizations of complex data, and work +with end users and facilitators to tune output for real world human +consumption.

+ +
+
+
+
+
+
+ +
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/fellowships/index.html b/preview-fall2024-info/fellowships/index.html new file mode 100644 index 000000000..1cb4c809a --- /dev/null +++ b/preview-fall2024-info/fellowships/index.html @@ -0,0 +1,481 @@ + + + + + + +The Center for High Throughput Computing Fellows Program + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + CHTC Logo + +
+ +
+
+
+

+The Center for High Throughput Computing Fellows Program +

+ +

The CHTC Fellows Program trains students in the development and use of cyberinfrastructure through a 12-week summer program where participants will work with mentors on delivering a project that will make an impact on the nation’s science.

+ +

The Program aims to provide opportunities for undergraduate and graduate students to connect with mentors within the community. Projects opportunities for students include collaboratively developing software for high throughput computing and cyberinfrastructure, operating complex service environments, and facilitating the use of large-scale computational services. It provides students with insight into how scientists use research computing as a tool to advance their research.

+ + + +
+
+
+
+
+ + + Headshot for Wil Cram + +
+
+
+
+
+

+ + Wil Cram + +

+
Mentor(s):
Greg Thain
+
+
+
+
+

Schedd performance analysis for human

+
+

The condor_schedd is a single threaded program, and when it is overloaded, +it is difficult for administrators to understand why. There are some +statistics about what it is doing, but there is no clear way to present +this information in a useful way to an administrator. Students working +on this project would build visualizations of complex data, and work +with end users and facilitators to tune output for real world human +consumption.

+ +
+
+
+
+
+ +

View all fellows

+ +

How do you find a project?

+ +

As part of the application process, students will be paired with potential mentors to develop the project ideas from the suggested projects list; project submissions are due prior to the start of the summer. Projects can be large- or medium-sized, taking about 480 or 240 hours to complete, respectively.

+ +

The CHTC Fellows Program aims to be inclusive of students at all levels of experience and skill sets; a willingness to learn and interest in science is prioritized over past accomplishments.

+ +

Projects have been available in the following areas in the past:

+ +
+

Research Facilitation

+ +

Are you interested in and comfortable with collaborative problem-solving, documentation and training, and community building? Are you looking for opportunities to learn about technologies, collaborate with teammates, and develop skills to communicate about technical concepts to a general audience?

+
+ +
+

Infrastructure Services

+ +

Are you interested in state of the art hardware, complex systems and leading technologies? Are you interested in expanding your skills by learning about these diverse technologies, including Linux servers, networking, Kubernetes, distributed file systems, batch systems, and databases?

+
+ +
+

Software Development

+ +

Are you comfortable with programming skills acquired from coursework or dedicated training activities? Are you interested in building on those skills to gain experience in a project in support of scientific research?

+
+ +

How it Works

+ +

Fellows work with a mentor to develop a project relevant to one of the areas listed. Fellows will receive a monthly stipend for participating in the Program, during their fellowship. In-person participation at CHTC is preferred to maximize interaction with mentors and others in the cohort, but remote involvement is an option for some projects for those based outside of the Madison, Wisconsin area. However, you must be located in the United States during the Fellowship Program to participate.

+ +

Eligibility:

+ +
    +
  • You must be enrolled in an undergraduate or graduate program at an accredited University or College within the US.
  • +
  • You must have completed at least 1 academic year by the start of the Fellowship.
  • +
  • U.S. citizenship is not required to participate.
  • +
  • In order to ensure the safety of the workplace, proof of COVID-19 vaccination is required unless an exemption is granted for medical, disability or religious reasons.
  • +
+ +

Duration of fellowship and total stipend:

+ +
    +
  • The Fellowship Program lasts for 12 weeks during Summer 2024, running from June 3 to August 23.
  • +
  • The Program provides $650/week stipend for large projects and $325/week stipend for medium projects. The stipends will be dispersed monthly.
  • +
  • It is expected that a Fellow working on a large project is available full time during the fellowship period and will not have another significant activity (such as classes, another trainee position or a job) in the same time period.
  • +
+ +

Developing a project

+ +
    +
  • After you apply, we will make a first selection of candidates and reach out to you for a “matchmaking discussion” with a mentor to discuss possible projects.
  • +
  • It is not necessary to prepare a project proposal before application, however including information on your potential interests as part of the application will help us match you with a mentor.
  • +
  • With the help of the mentor, the students will develop and submit a short 2-page project proposal and timeline. Based on that, we will make a final fellows selection.
  • +
  • During the Fellowship, you will work with your mentor and other collaborators. You will also make a short presentation about your project to other Fellows and Mentors as you start your Fellowship, another midway through the project to show your progress and a final presentation about your results at the very end.
  • +
+ +
+

Applying

+ +

Applications for the summer of 2024 are closed. The information below is provided to inform those interested future opportunities.

+ +

To apply, send an email to chtc-jobs@g-groups.wisc.edu with the following information:

+ +
    +
  • A resume/CV (in PDF format) with contact information. Be sure to include your full name, email address, the name of your university or college and your current or planned major and/or area of study.
  • +
  • A cover letter that describes your interest in the internship program. For example, you may wish to expand on 3 or 4 topics from the following list: your background, your skills, and strengths; what software, computing or scientific topics appeal to you; previous research experience, if any; what you may want to pursue as a future career; and what benefits you would like to gain from this program. If you already have a potential project which interests you from the project lists above, you can also mention them here. It is however not required to have a mentor/project finalized to submit an application. Successful applicants will be connected to mentors to select and define their projects in a 2nd step following this application.
  • +
+
+ +

Summer 2024 Timeline:

+ +
    +
  • Friday 8 March, 2024 - Final deadline for applications (applications will be reviewed on a rolling basis as they arrive.)
  • +
  • Friday 15 March, 2024 - End of selection period for applications. Those selected will be matched to work with mentors on developing a proposal - this may involve a short interview and other follow-up. (Interviews for selected applicants will occur on a rolling basis.)
  • +
  • Monday 1 April, 2024 - Deadline for submission of proposals. (Proposals will be reviewed on a rolling basis.)
  • +
  • By 15 April, 2023 - Final selection of Fellows for summer 2024
  • +
  • June-August - Fellows work on projects.
  • +
+ +

Funding

+ +

External funding support for the CHTC Fellows Program is provided by the National Science Foundation through Cooperative Agreement OAC-2030508 and Grant OAC-2331480. Support for this program is also provided by UW-Madison and the Morgridge Institute for Research.

+ +

Other Scientific Fellowships

+ + + +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/fellowships/list.html b/preview-fall2024-info/fellowships/list.html new file mode 100644 index 000000000..5186d285e --- /dev/null +++ b/preview-fall2024-info/fellowships/list.html @@ -0,0 +1,392 @@ + + + + + + +Open projects for CHTC Fellows + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ Collage photos of current and previous CHTC interns. + Collage photos of current and previous CHTC interns. +
Photo: Morgridge Institute for Research
+
+ +
+
+
+

+ Open projects for CHTC Fellows +

+ +

+ This page lists software development, infrastructure services, and research facilitation + projects for CHTC Fellow applicants to consider. Please check back in January 2025 for the next round of + fellowship opportunities. +

+

+ Updated June 3, 2024 +

+ + + + + + + +

Software Development

+ +

No software development projects are currently available.

+ + + +

Infrastructure Services

+ +

No software development projects are currently available.

+ + + +

Research Facilitation

+ +

No software development projects are currently available.

+ + + +
+
+
+
+
+
+

Fellowships

+
    + +
+
+
+
+
+
+

Questions: chtc-jobs@g-groups.wisc.edu

+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/fire-up-the-gpus.html b/preview-fall2024-info/fire-up-the-gpus.html new file mode 100644 index 000000000..2bfb1ced4 --- /dev/null +++ b/preview-fall2024-info/fire-up-the-gpus.html @@ -0,0 +1,359 @@ + + + + + + +Fire up the GPUs: UW-Madison, Morgridge project sparks next-level computing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Fire up the GPUs: UW-Madison, Morgridge project sparks next-level computing +

+
+ Emile with RAM in Data Center +
+ +

A form of computing machinery that was once the province of hardcore video gamers — the graphic processing unit, or GPU — has recently taken the world of scientific research by storm.

+ +

Originally designed in the late 1990s with the capability of rendering 3D graphics, GPUs have been essential over the years to creating increasingly sophisticated and realistic visual effects.

+ +

While most of the research world has thought in terms of CPUs — or central processing units — as the lingua franca of computing power, GPUs are now emerging at the top of the rack for fields such as machine learning and scientific computing.

+ +

Morgridge Investigator Anthony Gitter, a UW-Madison associate professor of biostatistics and medical informatics, recognized the need early on in his machine learning projects related to protein engineering and drug discovery — projects that generate millions of data points. There were GPU-related tools available that could complete his team’s modeling experiments in days that would have taken months or years — if accomplished at all — with standard CPU-based computing.

+ +

But he also noticed, around 2018, a groundswell of DIY efforts across the UW-Madison campus related to GPUs.

+ +

“I saw a lot of my peers were trying to set up their own systems,” he recalls. “People were buying workstations that would have one GPU and sticking it under a desk for a grad student to run, then trying to figure out what hardware to buy, how to keep it maintained and what software to install.”

+ +

Gitter spotted an opportunity. Why not create a centralized resource and user community that could help support hundreds of varied GPU experiments, much like his Morgridge and UW-Madison colleagues have accomplished through the Center for High-Throughput Computing (CHTC)? That center successfully manages more than 300 unique projects a year, generating hundreds of millions of hours of computing time.

+ +

Read more about the CHTC managed GPU Lab in the full article on the Morgridge Website.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/free-supercomputing.html b/preview-fall2024-info/free-supercomputing.html new file mode 100644 index 000000000..b037ecf75 --- /dev/null +++ b/preview-fall2024-info/free-supercomputing.html @@ -0,0 +1,350 @@ + + + + + + +Free Supercomputing for Research - Scott Cole introduces you to OSG + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Free Supercomputing for Research - Scott Cole introduces you to OSG +

+

Scott Cole, a neuroscience PhD student at University of California San Diego, wrote an article which appeared in PythonWeekly that details how to get up and running on Open Science Pool. “I was starting to run into computational limitations in my neuroscience research, but I didn’t have any experience speeding up my work with something like high throughput computing,” said Cole. When Cole saw that there was an opportunity at the OSG User School to learn how to use OSG and the free access to resources it provides, he jumped on it.

+ +

While at the OSG User School, Cole was able to use the tutorials in the curriculum to work his way through using the Open Science Pool. Despite being jet lagged due to a flight from Hong Kong, Scott, with the help of the instructors, was able to get a handle on distributed high-throughput computing. “Since the learning process was so streamlined, it made it much easier to learn the necessary tools to utilize the Open Science Pool,” he said.

+ +

Of his research, Cole says, “My lab studies neural oscillations, or brain rhythms. When we record electrical activity from almost any brain region, we see a diverse set of rhythms, which reflect the brain’s computation in that region. We are interested in the biological mechanisms that generate these rhythms, and how they influence the brain’s information processing.”

+ +

See Cole’s article here: https://srcole.github.io/2017/01/03/osg_python/

+ +

For further information on Cole’s research, please visit his main webpage: https://srcole.github.io/

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/get-to-know-todd.html b/preview-fall2024-info/get-to-know-todd.html new file mode 100644 index 000000000..566e21d57 --- /dev/null +++ b/preview-fall2024-info/get-to-know-todd.html @@ -0,0 +1,487 @@ + + + + + + +Get To Know Todd Tannenbaum + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Get To Know Todd Tannenbaum +

+
+ Image of Todd T taking a selfie with a tropical beach in the background. +
+ +

As the technical lead of the CHTC, how did you get started?

+ +

Long ago, I came to UW-Madison to major in computer sciences and upon graduation, accepted a job as the Unix systems +administrator in the Computer Aided EngineeringCenter in the College of Engineering.

+ +
+ Retro photograph of Todd sitting in front of a computer with a mullet hairstyle. +
+ +

While there, I was introduced to HTCondor –which went by the name of Condor at that time– and created and managed an +HTCondor installation consisting of about 200 Unix workstations deployed across the College. As the years went by, I +became the director of (what used to be called) “The Model Advanced Facility”, which served as a high-performance +computing and visualization resource in Engineering. We had both HPC supercomputer systems and also a HTCondor cluster. +I found that the majority of engineers I worked with had their problems fit very well with the high throughput computing +paradigm, so our HTCondor installation was more popular than our expensive HPC supercomputers. However, HTCondor didn’t +quite do what I needed it to do so I walked over to the computer sciences building and met with Miron Livny. He +suggested I attend the HTCondor developers meeting, which I started doing. Ultimately I made the decision that working +in high throughput computing research was more personally rewarding for me than being a director. So in 1997, I switched +from engineering to computer sciences to work on HTCondor full time.

+ +

What is the HTCondor software suite and why is it important to researchers?

+ +

Today, scientific research is oftentimes predicated on access to lots of computing cycles for simulations and analysis. +Imagine your work requires running a computer simulation that takes an hour to complete on your nice new laptop; now +imagine you have 10,000 such simulations you need to run. With just your laptop, this would take over a year to +complete, but if you could effectively use 10,000 computers in an organized manner, you could be done in an hour. The +HTCondor Software Suite (HTCSS) enables a researcher or engineer to easily harness the computing capacity of a large +number of computers that may be geographically distributed or owned and managed by different organizations, allowing +these people to submit and track very large numbers of computing jobs.

+ +

HTCSS also provides services for the owners of the servers. It makes sure the capacity is equitably shared amongst +groups of researchers and minimizes the chances that one researcher’s computing negatively impacts the computing of +another researcher.

+ +

HTCSS has enjoyed wide-spread adoption; it has been instrumental in providing the enormous amount of computing required +for two recent Nobel Prizes (and hopefully counting!), and is used not only at universities and government labs +worldwide, but also in industry including companies like SpaceX, Dreamworks, and Boeing.

+ +

How is HTCSS connected to CHTC?

+ +

The HTCondor Software Suite (HTCSS) is the product of three decades of continuous research and development on +high-throughput computing within the Center for High Throughput Computing (CHTC) and the UW-Madison Computer Sciences +Department. Although HTCSS is open source, all members of the core development team responsible for the support, +enhancement, and evolution of the HTCSS work at the CHTC. UW-Madison alone uses HTCSS as its cornerstone technology to +complete nearly 250,000 compute jobs each day for the benefit of research groups across the Madison campus as they work +on challenges in every field, delivering faculty and graduate students at UW-Madison the computing equivalent of +approximately 30,000 computers (cpu cores) running 24 hrs every day.

+ +

In addition, the computing infrastructure at UW-Madison managed by the CHTC is a great experimental laboratory for the +development of the HTCondor Software Suite itself. We heavily utilize the CHTC facilitators to provide feedback to the +HTCSS developers about places where the software is working well and where improvements are needed, what researchers are +finding helpful or confusing, and which new features we should add.

+ +

How has HTCSS evolved over the years?

+ +

When HTCondor was first conceived, it was used primarily just at UW-Madison to deliver a few dozen compute hours per day +to a handful of users. Today HTCSS is in use at universities, government labs, and commercial organizations worldwide; +the software is downloaded more than 100,000 times each month from our website and has grown to +over a million lines of code. We’ve made a lot of changes to deal with ever increasing amounts of scientific data and +sets of jobs/machines. Also, as the technology of computing keeps evolving, HTCondor is evolving with it. For instance, +HTCondor manages GPU resources and containers. Back when I started, there were no GPUs or containers (software that +emulates another computer).

+ +

What does your day at work look like?

+ +

I split my time between management and technological duties. I talk with the other developers that work with the HTCSS +about any support emergencies in the user community. I also work on the design of new features or the best ways to fix +bugs. I still find some time for my favorite part, which is hands-on work of writing code and doing direct support for +the community – such as answering support emails. This is something unique that we do here. At a lot of software +development organizations, the people that handle support questions are different from those who write code and the two +rarely meet. But here, all the developers, including myself, take turns with first level support– answering user support +questions directly. We feel this is important to not lose touch with end users who use the software daily.

+ +

What has been your favorite memory so far?

+ +

Many years from now when I look back at my career, I think I will look back fondly on how our work here surpasses simply +making shareholders more wealthy. It really is (and has been) about enabling scientific discovery via computing for the +benefit of humankind. It is nice to work in academia and still have your work be relevant in “the real world”, outside +of just academic papers. Another thing I will look fondly upon is the long list of colleagues I’ve had the privilege of +working with all over the years. A lot of fun, motivated and extremely intelligent people.

+ +

Where do you see HTCSS in the next 5 years?

+ +

I’d like to see HTCondor being more accessible to an ever wider range of researchers and engineers. I’d like HTCondor to +have even more impact on the individual researcher at smaller institutions and schools, including community colleges. +These are things we are already doing, now but I imagine an even bigger impact in five years.

+ +

What would you say has been the greatest impact of your job?

+ +

My greatest impact is in having the HTCSS enable High Throughput Computing to maintain relevance and keep delivering +computing capacity to researchers for scientific discovery. +The idea I’m helping humankind as opposed to just a group of shareholders is what I derive the most satisfaction from.

+ +

What has been the greatest challenge so far?

+ +

Drinking from the fire hose! There’s so much we could be working on, so much we should be working on to balance the +needs of supporting existing communities versus building new mechanisms to attract more people to the community, all +while trying to balance my own competing technology -vs- management duties.

+ +

A lot of times ‘what to do’ is an easier problem than answering the ‘who’ and the ‘when’. There’s so much you want to do +but only so many hours in a day and only so much staff effort available. Figuring out where to apply the effort to have +the largest impact is probably the biggest challenge.

+ +
+ Image of Todd T cycling through a cinematic countryside road. +
+ +

How do you like to spend your free time?

+ +

I like cycling (road cycling, I am not coordinated enough for hard-core mountain biking!) and sailing. Both of these +aren’t very conducive to winter which is very unfortunate, so I am generally a happier person in spring, summer and fall +than in winter. Although in winter I get to watch the Green Bay Packers, which is usually a lot of fun, albeit not as +much this year perhaps!
+I enjoy playing and listening to all kinds of music. I’ve played bass guitar since high school in several bands over the +years and I’m also a novice guitar player. My favorite band is The Clash.

+ +

What are some of your favorite books? What books have influenced your work?

+ +
+ Image of todd holding up a pint of beer and smiling. +
+ +

I’m actually one of the founding members of Jordan’s Big 10 Pub Book Club – the Big Ten Pub is the closest pub to the +computer science building, just down on Regent Street. We started the book club about fifty books ago to bring together +people who like both books and beer. We most recently read ‘Rendezvous with Rama’ by Arthur C. Clarke. We’ve even had a +few authors of the books we’ve read join our club discussions.

+ +

Books that have directly influenced my work are probably ones reserved for Mountain-Dew drinking software nerds, such as +‘Effective C++’ by Scott Meyers and ‘Transaction Processing’ by Jim Gray. We have applied a lot of concepts from the +database community into the distributed computing world over the years.

+ +

If you could travel anywhere outside of the country, where would you go?

+ +

Probably the U.S. Virgin Islands because of the amazing sailing opportunities.

+ +

What is one of your hidden talents?

+ +

I like to cook Indian food. My family really likes my Rajma Dal recipe, a vegetarian red kidney bean curry. This past +weekend I made Sambar which is actually in a tupperware in my fridge for lunch. My older son is vegetarian. He decided +at the age of four to be vegetarian after asking me where meat comes from. I told him meat comes from the meat aisle in +the grocery store, but as an inquisitive four year old, he didn’t like my answer and went to ask his mom instead who +then gave him a more detailed answer. Ever since then, he’s refused to eat meat and that really helped jumpstart my +Indian cooking interest - there are so many tasty vegetarian dishes in Indian cuisine.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/gis-story.html b/preview-fall2024-info/gis-story.html new file mode 100644 index 000000000..5884570a8 --- /dev/null +++ b/preview-fall2024-info/gis-story.html @@ -0,0 +1,401 @@ + + + + + + +Preserving historic Wisconsin aerial photos with a little help from CHTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Preserving historic Wisconsin aerial photos with a little help from CHTC +

+

Associate State Cartographer Jim Lacy works with CHTC to digitize and preserve historical aerial photography for the public.

+ +
+ Two aerial photos of Madison, Wisconsin in 1937 available on WHAIFinder. +
Two aerial photos of Madison, Wisconsin in 1937 available on WHAIFinder.
+
+ +

Right now, hundreds of thousands of historic aerial photos from around Wisconsin are gradually aging in file cabinets on the University of +Wisconsin-Madison campus, with some of the photos approaching 100 years old. Although historical photography is a captivating and well-trodden +method to study the past, without intervention, this opportunity will be lost as the photos get older and begin to decay.

+ +

Addressing this challenge is the State Cartographer’s Office (SCO) and the +Arthur H. Robinson Map Library (RML), units within +the Department of Geography at the University of Wisconsin-Madison, who are working to digitally preserve +Wisconsin aerial photography from the twentieth century. The SCO and RML team created a free digital tool in 2011 called +the Wisconsin Historic Aerial Image Finder (WHAIFinder), where the public can view and download digital +versions of the air-photos at any time. The platform currently provides almost 40,000 Wisconsin aerial images, ranging from 1937-1941.

+ +

SCO’s Associate State Cartographer Jim Lacy continues the effort of digitizing Wisconsin +air-photos from other decades alongside Map & Geospatial Data Librarian Jaime Martindale from +the RML. “We really want to work hard to digitally preserve all of that photography. That way it’s available forever,” Lacy said.

+ +
+ Associate State Cartographer Jim Lacy +
Associate State Cartographer Jim Lacy
+
+ +

One of the steps necessary when digitizing the photography is to convert the images to Cloud Optimized GeoTIFF (COG) format and generate jpegs. +This caused a computing bottleneck for Lacy, who experimented with his local PC and found that about 100,000 images in need of converting would +take over a month to process. “What we’re doing with the COG conversion frankly is not that complicated.” Lacy said, “It’s basically reformatting data, +but it’s still fairly compute intensive.”

+ +

Asking himself if there was a better way, Lacy went in search of a solution +and looked to the Center for High Throughput Computing (CHTC) last November for a more efficient computing option. +Specializing in high throughput computing (HTC), CHTC allows for users like Lacy to split their work up into a large amount of smaller-scale jobs +that can be processed in parallel. He attended a CHTC workshop and worked in close collaboration with the CHTC facilitation team to find the right +computing techniques.

+ +

“The facilitators were extremely helpful in giving me pushes in the right direction,” Lacy remarked. He found that using the +HTCondor Software Suite (HTCSS) was a “learning curve,” despite his previous experience with necessary user elements +like shell scripting and UNIX. “It took some learning, patience, and a lot of trial and error.”

+ +

The impact of using CHTC services and capacity was noteworthy. Running his own case study using input files from +the National Agriculture Imagery Program (NAIP) for Dane County, Lacy found that what took his local +PC 93 minutes to run took five minutes when done through CHTC. “It’s been a huge time saver,” Lacy stated. He also found that utilizing CHTC allowed +room for mistakes and experimentation. “If we were to use a desktop PC it would take a week each time. For us, the option of repeatability is really +important.”

+ +
+ Glimpse of Lacy's presentation on CHTC +
Glimpse of Lacy's presentation on CHTC
+
+ +

One issue Lacy had while using CHTC pertained to the data transfer time, despite the entire process taking less time than his local PC. In his case +study, the total data transfer overhead came out to around 21 hours. That was 14 times longer than the amount of time it took to process the data. +Lacy recently met with CHTC, including members of the facilitation team and the Pelican project to discuss possible +improvements to data transferring, as well as making the entire process less hands-on.

+ +

Utilizing open capacity from a research computing center, Lacy views his work with the SCO to be atypical to the research world. +“We do some research, but our main focus is outreach and connecting people to mapping related resources. We’re all about the Wisconsin Idea,” +Lacy remarked. “The goal of the Wisconsin Idea is to share our knowledge and help other folks solve problems. ”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/google76f04192afc2088e.html b/preview-fall2024-info/google76f04192afc2088e.html new file mode 100644 index 000000000..a064ee28c --- /dev/null +++ b/preview-fall2024-info/google76f04192afc2088e.html @@ -0,0 +1 @@ +google-site-verification: google76f04192afc2088e.html \ No newline at end of file diff --git a/preview-fall2024-info/gpargo-cc-star.html b/preview-fall2024-info/gpargo-cc-star.html new file mode 100644 index 000000000..8215085ca --- /dev/null +++ b/preview-fall2024-info/gpargo-cc-star.html @@ -0,0 +1,416 @@ + + + + + + +Great Plains Regional CyberTeam Expanding Capacity for Computing from Great Plains Campuses + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Great Plains Regional CyberTeam Expanding Capacity for Computing from Great Plains Campuses +

+

As a multidisciplinary and multi-institutional collaboration, the Great Plains Augmented Regional Gateway to the +OSG (GP-ARGO) has made significant strides in democratizing computing. Continued support by the CC* award (NSF 23-526) +from the National Science Foundation (NSF) is a testament to its dedication to advancing the field.

+ +

The task of effectively supporting computational and data-intensive research at an under-resourced and +understaffed university in a rural area without the benefit of in-person support is a formidable challenge. +Yet, the Great Plains Augmented Regional Gateway to the OSG (GP-ARGO) +undertook this daunting responsibility across eighteen universities with exceptional success. Not only did +it accomplish this feat, but it also established a new standard of excellence in the field, +supplying cyberinfrastructure and support.

+ +

GP-ARGO is a product of a regionally distributed OSG Gateway led by the +Great Plains Network (GPN), but it started as a gigabit Point of Presence (gigaPOP) +of institutions across the great plains region. “It was just a whole bunch of institutions saying, let’s buy +a bunch of networks together because it’s easier on us,” Co-principal investigator (PI) and Cyber Infrastructure +Program Committee lead Dan Andresen explained, “which is still what GPN is today, but we’ve moved into more facilitating +research and connectivity at a social and scientific level as well.”

+ +

The social networking part of this project came later, starting with GPN, but then developing into the CyberTeam. +“As part of CyberTeam, we noticed that smaller institutions lacked intrinsic capabilities compared to larger ones,” +Andresen noted. This gap in research computing sparked the idea of GP-ARGO.

+ +

The “O” in GP-ARGO stands for “OSG,” indicating the team’s intention to leverage OSG resources. “We knew we wanted +to connect these 18 institutions, and OSG was the way to do it,” Andresen explained. Derek Weitzel, a Research +Professor in distributed computing at the University of Nebraska-Lincoln, played a vital role in connecting OSG +with GP-ARGO. Weitzel had worked with OSG before the project began, playing an integral part in interfacing between +the OSG and GP-ARGO. After establishing OSG’s role in this new project, “it became just a simple matter of obtaining +the 18 machines and then figuring out which institutions wanted to be a part of this first beta testing phase,” +Andresen reminisced.

+ +

Handling 18 machines across six states came with challenges, particularly in communicating and managing 18 administrative +domains, security protocols, and rule differences. “None of these sites were the same,” Weitzel explained. “Some sites +were very restrictive, others were very relaxed, and we had to make all of them work.” Kyle Hutson, one of the +former mentors for the Cyber Infrastructure side of the CyberTeam, played a crucial role in resolving these technical nuances.

+ +

With GP-ARGO consistently ranking among the top five OSG entry points for a good part of the last year, the team has +successfully linked the machines together and ensured smooth operation, even without dedicated system administrators +on-site. Through a large dashboard that compiles information from each institution on which projects are actually +running on the nodes, IT leaders and CIOs can monitor and visualize each of the nodes. The dashboard also comes +with a data visualization of usage by university, including the PIs on each project, adding a personal component to +the monitoring.

+ +

Acknowledging the great success of this regional network organization, the National Science Foundation (NSF) +supports it. First, CyberTeam received a CC* award, and later, the entire GP-ARGO network received one — something that +no one has done before. “Applying as a network rather than a single institution made sense,” Andresen explained, “this +emphasizes this is a regional effort rather than an individual, institutional effort.”

+ +

GP-ARGO has truly set the curve in taking on a project of this scale and magnitude and doing it successfully. Reflecting +on what went well, Andresen gleamed, “I mean, we did it! We’ve got it working; we’re among the top five OSG entry points, +we’ve contributed 13 million CPU hours of science, and we have people who are excited and involved, which has been incredibly +fun and exciting.”

+ +

Furthermore, the team has ensured the sustainability of this operation. “Most of the institutions we’re working with don’t +have the expertise or the full-time employees to spare,” Andresen explained. Central administration by OSG has been instrumental +in this regard, especially recently, regarding restructuring administration roles with the leaving of Kyle Hutson. “If +something happens to whoever is the administrator, like leaving for another institution,” Hutson jokingly remarked, “we +have four people across four different institutions that all have administrative rights. I was a primary person doing that, +but I was not the only person who could do this, so somebody else can take over.”

+ +

Part of GP-ARGO’s appeal lies in their determination and dedication to helping other consortiums and networks aiming to achieve +similar goals. They provide a Git repository with all their code and emphasize the importance of both social and technical networks. +“Building trust and familiarity is crucial,” Andresen advised. “Get involved with the OSG and get to know people; having Derek +[Weitzel] available as the interface has been invaluable. Knowing the context and the people is much easier than starting from scratch.”

+ +

Despite the immense undertaking, Andresen commented on how fun and exciting the project has been, with the OSG playing a pivotal +role. “This program only builds stronger connections within the region between all these different professionals,” Weitzel +reflected. “It’s allowed us to reach out to different types of people, creating new opportunities that build on each other.”

+ +

Echoing this sentiment, Hutson highlighted the project’s impact in involving previously less-engaged institutions within GPN with the network’s recent expansion from 18 to 19 campuses. “Cameron University heard about some of the things we’re doing +through their state network, had a spare box, and asked if they could get involved!” Hutson explained.

+ +

Building these regional connections was one of the most important steps in creating this network. The Midwest doesn’t +have any major supercomputing centers or institutions with enough people to drive a network of this magnitude forward. +However, Andresen noted that the key to their triumph in this large-scale and long-term endeavor lay in the region’s heritage: +“We knew we couldn’t do this alone, but here in the Midwest, our spiritual successor has always been that we look out +for and help each other out. That’s who we are, and it’s what has helped us reach remarkable feats.”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/gpu-cloudburst.html b/preview-fall2024-info/gpu-cloudburst.html new file mode 100644 index 000000000..5bb96b743 --- /dev/null +++ b/preview-fall2024-info/gpu-cloudburst.html @@ -0,0 +1,539 @@ + + + + + + +SDSC and IceCube Center Conduct GPU Cloudburst Experiment + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ SDSC and IceCube Center Conduct GPU Cloudburst Experiment +

+ + +

The San Diego Supercomputer Center (SDSC) and the +Wisconsin IceCube Particle Astrophysics Center (WIPAC) at the University +of Wisconsin–Madison successfully completed a computational experiment +as part of a multi-institution collaboration that marshalled all +globally available for sale GPUs (graphics processing units) across +Amazon Web Services, Microsoft Azure, and the Google Cloud +Platform.

+ +

In all, some 51,500 GPU processors were used during the +approximately 2-hour experiment conducted on November 16 and funded +under a National Science Foundation EAGER +grant. +The experiment used simulations from the IceCube Neutrino +Observatory, an array of some 5,160 +optical sensors deep within a cubic kilometer of ice at the South Pole. +In 2017, researchers at the NSF-funded observatory found the first evidence of a source of high-energy +cosmic +neutrinos +– subatomic particles that can emerge from their sources and pass +through the universe unscathed, traveling for billions of light years to +Earth from some of the most extreme environments in the universe.

+ +
+
+
+
+ +
+
+ +
+
+
+
Number and PFLOPS32 provided to IceCube Computing
+
+ +

The experiment – completed just prior to the opening of +the International Conference for High Performance Computing, Networking, +Storage, and Analysis (SC19) in Denver, CO – was coordinated by Frank +Würthwein, SDSC Lead for High-Throughput Computing, and Benedikt Riedel, +Computing Manager for the IceCube Neutrino Observatory and Global +Computing Coordinator at WIPAC.

+ +

Igor Sfiligoi, SDSC’s lead scientific software developer +for high-throughput computing, and David Schultz, a production software +manager with IceCube, conducted the actual run.

+ +

“We focused this GPU cloud burst in the area of multi-messenger astrophysics, which is based on the +observation and analysis of what we call ‘messenger’ signals, in this +case neutrinos,” said Würthwein, also a physics professor at the +University of California San Diego and Executive Director of the OSG, a multi-disciplinary +research partnership specializing in high-throughput computational +services funded by the NSF.

+ +

“The NSF chose multi messenger astronomy as one of its +10 Big +Ideas +to focus on during the next few years,” said Würthwein. “We now have +instruments that can measure gravitational waves, neutrinos, and various +forms of light to see the most violent events in the universe. We’re +only starting to understand the physics behind such energetic celestial +phenomena that can reach Earth from deepest space.”

+ +

The net result was a peak of about 51k GPUs of various +kinds, with an aggregate peak of about 380 PFLOP32s (according to NVIDIA +specifications), according to Sfiligoi.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GPU SpecsV100P100P40P4T4M60K80K520
Num GPUS9.2k7.2k2.1k0.5k4.6k10.1k12.5k5.4k
PFLOP32s132.268.125.22.538.648.851.612.4
+
Absolute number of resources provided to IceCube
+
+ +

“For comparison, the Number 1 TOP100 HPC system, Summit, (based at Oak Ridge National +Laboratory) has a nominal +performance of about 400 PFLOP32s. So, at peak, our cloud-based cluster +provided almost 95% of the performance of Summit, at least for the +purpose of IceCube simulations.

+ +

The relatively short time span of the experiment showed +the ability to conduct a massive amount of data processing within a very +short period – an advantage for research projects that must meet a tight +deadline. Francis Halzen, principal investigator for IceCube, a +Distinguished Professor at the University of Wisconsin–Madison, and +director of the university’s Institute for Elementary Particle Physics, +foresaw this several years ago.

+ +

“We have initiated an effort to improve the calibration +of the instrument that will result in sensitivity improved by an +estimated factor of four,” wrote Halzen. “We can apply this improvement +to 10 years of archived data, thus obtaining the equivalent of 40 years +of current IceCube data.”

+ +

“We conducted this experiment with three goals in mind,” +said IceCube’s Riedel. “One obvious goal was to produce simulations that +will be used to do science with IceCube for multi-messenger +astrophysics. But we also wanted to understand the readiness of our +cyberinfrastructure for bursting into future Exascale-class facilities +such as Argonne’s Aurora or Oak Ridge’s Frontier, when they become +available. And more generally, we sought to determine how much GPU +capacity can be bought today for an hour or so GPU burst in the +commercial cloud.”

+ +

“This was a social experiment as well,” added Würthwein. +“We scavenged up all available GPUs on demand across 28 cloud regions +across three continents – North America, Europe, and Asia. The results +of this experiment tell us that we can elastically burst to very large +scales of GPUs using the cloud, given that exascale computers don’t +exist now but may soon be used in the coming years. The demo also shows +such bursting of massive data, is suitable for a wide range of +challenges across astronomy and other sciences. To the extent that the +elasticity is there, we believe that this can be applied across all of +scientific research to get results quickly.”

+ +
+ +
Regions used in the GPU experiment across AWS, GCP, and Azure
+
+ +

HTCondor was used to integrate all purchased GPUs into a +single resource pool to which IceCube submitted their workflows from +their home base in Wisconsin. This was accomplished by aggregating +resources in each cloud region, and then aggregating those aggregators +into a single global pool at SDSC.

+ +

“This is very similar to the production infrastructure +that OSG operates for IceCube to aggregate dozens of ‘on-prem’ clusters +into a single global resource pool across the U.S., Canada, and Europe,” +said Sfiligoi.

+ +

An additional experiment to reach even higher scales is +likely to be made sometime around the Christmas and New Year holidays, +when commercial GPU use is traditionally lower, and therefore +availability of such GPUs for scientific research is greater.

+ +

Acknowledgment: Thanks to the NSF for their support of +this endeavor as part of the +OAC-1941481, +MPS-1148698, +OAC-1841530 +and +OAC-1826967. +Special thanks also to all the support personnel from AWS, Azure, Google +Cloud and Strategic Blue, who helped raise all the necessary quotas and +limits. And all of this would of course not be possible without the hard +work of Igor Sfiligoi, David Schultz, Frank Würthwein and Benedikt Riedel.

+ + + + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/hanna-lab.html b/preview-fall2024-info/hanna-lab.html new file mode 100644 index 000000000..3ea8445bf --- /dev/null +++ b/preview-fall2024-info/hanna-lab.html @@ -0,0 +1,413 @@ + + + + + + +Training a dog and training a robot aren’t so different + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Training a dog and training a robot aren’t so different +

+

For AI and robotics researcher Josiah Hanna and his lab, high throughput computing is a critical tool in reinforcement learning.

+ +

+ +

Artificial intelligence (AI) robotics expert Josiah Hanna’s research has a lot in common with training dogs: Both robotics training and dog +training use a type of reinforcement learning to encourage the desired behavior. With computers or robots, however, this type of reinforcement learning is a branch of machine learning (ML) that models an intelligent agent interacting with a task environment.

+ +

Comparing robotic reinforcement learning to training a dog how to sit, Hanna explains that “you don’t explicitly tell the dog how to sit, but you coax the dog into sitting, and when it +shows that behavior, you reward that. Over time, the robot dog learns these are the actions that lead to getting the reward, and it learns to avoid actions that don’t lead to the reward. +We want to give computers and robots the ability to learn through experience, by seeing what works and what leads to them achieving the goals we set for them. Then, when they see the +actions that lead to reaching their goals, they know that they should do that again in the future.”

+ +

In other words, Hanna’s research specifically seeks to develop algorithms that enable computers to learn goal-oriented behavior in order to better accomplish their goals. Unlike a dog, +robots aren’t necessarily rewarded but instead learn from past mistakes and take that information to determine what a successful action is. Through trial and error, the agent learns +which actions it needs to take to achieve its goals. “It’s critical that they’re [computers] able to learn through their experience. That’s what my research and the whole field of +reinforcement learning studies — the kinds of algorithms which will enable this to happen,” Hanna elaborates.

+ +

Another way that UW–Madison Computer Sciences Ph.D. student Nicholas Corrado describes it is like teaching a robot how to walk. Initially, the +robot moves its legs randomly and likely falls over. Through trial and error, however, the robot eventually discovers that it can make forward progress by moving its legs to take +only a single step forward. Wanting to maximize its forward progress, the robot then increases the probability of executing this stepping behavior and eventually learns how to walk. +“It requires a lot of computing to do this because starting from random movements, and getting to walking behavior is not super straightforward,” Corrado elaborates.

+ +

Unlike other types of ML that are classification-based, a lot of reinforcement learning relies on simulations because it’s based on modeling agents performing some task. The difference +between other areas of ML and reinforcement learning, Corrado explains, is that with reinforcement learning, “You have this multi-step decision-making process that you must learn how +to solve optimally. It’s so much harder because the agent needs to learn how its action right now affects its performance way down the road, so reinforcement learning feels like a much +harder problem to focus on than what we call supervised learning methods.”

+ +

Since learning on physical robots is difficult, Hanna’s lab will sometimes use simulations as a “surrogate” for physical robots. This is where high throughput computing (HTC) becomes +a valuable tool. Hanna shares that “it’s really useful to have high throughput computing so you can run your simulation or learning algorithm for many different processes. You can see +how different learning algorithms or different parameters for learning algorithms affect the ability of an algorithm to produce robust behavior or high-performing behavior.” In this +sense, the Center for High Throughput Computing (CHTC) is a “huge resource” for Hanna’s students who evaluate a wide variety of different algorithms they +think might work better than previous ones. It’s a great enabler of increasing experimentation bandwidth, or how many experiments they can run. In fact, for the Hanna Lab, its CHTC +usage is nearly 5.7 million hours.

+ +

One project the Hanna lab is working on is enabling robots to learn to play soccer, Corrado says. With reinforcement learning, researchers programmed robots to play soccer and then +entered an annual international competition where they placed third despite it being their first time participating, “greatly exceeding our expectations,” +Corrado highlights. The end goal isn’t necessarily to train robots how to play soccer but rather “develop reinforcement learning techniques that enable us to train agents to work +cooperatively” and “develop techniques that improve the data efficiency of reinforcement learning. If we can reduce the data requirement, reinforcement learning is going to be much, +much more practical for industrial applications.”

+ +
+ From the annual RoboCup
+Standard Platform League (SPL) competition, a research competition that aims to advance the capabilities of robotics in challenging, real-time domains. +
From the annual RoboCup Standard Platform League (SPL) competition, a research competition that aims to advance the capabilities of robotics +in challenging, real-time domains. +
+
+ +


Even before Hanna came to UW–Madison, he had experience with HTCondor Software Suite (HTCSS) from graduate school. It was a “critical resource” for Hanna then +and remains as such today in his role as a researcher and professor at UW–Madison. “One of the first things I did when I got here [UW–Madison] was tap into HTC resources,” Hanna recalls. +As a new principal investigator (PI), Hanna also had a meeting with a CHTC facilitator to learn how to obtain access and what resources it provides.

+ +

Since he found the tool so valuable while he was a graduate student, Hanna also tries to set up his students with the CHTC early on instead of running experiments locally on their +computers. Hanna shares “It’s a great resource we have to leverage that helps speed things up.” For the research group, running a high volume of simulations and experiments is a +key enabler of progress. This means Hanna encourages his students to run experiments whenever they reach uncertainties, which can help provide clarity. “Oftentimes it’s just easier +to run the experiment. Something I try to guide the students on is knowing when some experiments just need to be run to understand some aspect of designing reinforcement learning +algorithms.” His students are developing their own pipelines with CHTC, learning how to work more efficiently with it, and writing scripts to launch experiments with it.

+ +

To put into context exactly how many experiments reinforcement learning requires, Corrado says, “Benchmarks contain anywhere from 5–10 tasks, and maybe you need to compare four +different algorithms and run 20 independent runs of each algorithm on each task. At that point, you’re running hundreds of experiments. I’ve even had to run thousands of experiments.” +In fact, for a paper currently under review, through performing a hyperparameter sweep of an algorithm — which determines the hyperparameter combination that performs best out of +many combinations — Corrado had submitted enough jobs to hit the default CHTC limit of a 10,000-job submission. This was something he definitely could not have accomplished on his +personal laptop or with a lab-specific server.

+ +

Hanna says he is also seeing a shift toward more high-performance computing with GPUs in his lab, which CHTC has helped enable. “Up until recently, reinforcement learning was +separate from other forms of deep learning that were going on, and you really couldn’t benefit that much from a GPU unless you had a lot of CPUs as well, which is what high +throughput computing is really good for,” Hanna explains.

+ +

When asked about the future use of CHTC in his lab, Hanna imagines spending more time with multi-processing and networking several CPUs together, both of which reinforcement +learning experiments could benefit from. As CHTC continues increasing its GPU capacity, Hanna says he plans to use that more in their work as well.

+ +

Without the CHTC, the type of large-scale experimentation the Hanna Lab uses would be impractical, Corrado says. For this type of work, HTC is almost always necessary and continues +to expand the horizons of the lab.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/hannah.html b/preview-fall2024-info/hannah.html new file mode 100644 index 000000000..a110a460b --- /dev/null +++ b/preview-fall2024-info/hannah.html @@ -0,0 +1,484 @@ + + + + + + +Get To Know Student Communications Specialist Hannah Cheren + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Get To Know Student Communications Specialist Hannah Cheren +

+

During her two-year tenure with the Morgridge Institute for Research - Research Computing lab, Hannah Cheren made significant science writing contributions and along the way changed the direction of her life.

+ +
+ Hannah Cheren, Student Writer +
Hannah Cheren, Student Writer.
+
+ +

Hannah is a senior undergraduate student in Life Sciences Communications and Statistics, simultaneously working towards a certificate in Data Science. She is a contributing writer for the Center for High Throughput Computing (CHTC) and the National Science Foundation funded PATh project, publishing 19 science and research computing articles describing high-throughput research computing and highlighting the researchers who utilize these organizations’ services. After her graduation this May, Hannah will be joining a public relations and communications consulting group for the life sciences as an Account Coordinator.

+ +

Hannah takes her well-earned center-stage to share a bit about herself, experiences and professional trajectory so far, as well as her plans after graduation.

+ +

What piqued your interest in life sciences communication?

+ +

I came to college intending to be a computer science major, but I immediately realized it wasn’t for me. I had a bit of a freak-out moment, but eventually made my way to the career advising office, where I was given a list of all the majors offered by the university so I could see all my options at a glance.

+ +

Life Sciences Communication (LSC) stood out to me as an interesting route because I have always had an interest in writing and communications. I still felt like I didn’t know much about LSC, so I reached out to Tera Wagner, the former Life Sciences Communication advisor, who really sold it to me.

+ +
+ Hannah Cheren and former LSC advisor, Tera Wagner. +

Hannah Cheren and former LSC advisor, Tera Wagner.
+
+ +

What drew me in was how different it is from journalism and other communications-based majors in the sense that you’re taught to take complex scientific information and translate it to a more easily digestible version that just about anybody can understand!

+ +

How did you hear about / get started as a writer with the OSG/PATh communications team at Morgridge?

+ +

I learned about the job position from the advisor I just spoke about, Tera Wagner. She thought it might be a good fit for me, and it turns out it was!

+ +

Why this position in particular?

+ +

The job description captured my attention, and the interview process reinforced my interest, for sure. I remember being asked how well I could handle criticism, and while I was a bit stunned by the question, I knew I would be challenged and learn a lot in this role. As a writer, half the job is having people critique and edit your work. I knew this was the field I’d eventually like to go into, so learning to handle criticism this early in my career was a skill that I wanted to learn sooner rather than later.

+ +

How would you describe your experience so far working with the rest of the team?

+ +

This job, in general, has been life-changing; it’s set me up for success in more ways than I expected. I remember the first couple of months were really challenging for me - this was my first “real” job, and even starting out, I felt like I had been thrown to the wolves. The summer of 2022 was a big turning point; I had more time to fully immerse myself and learn all I could, and started feeling a lot more confident. We had recently wrapped up HTCondor Week 2022, and within a couple of months, I had written and published seven articles about researchers from the event. It was a lot, but I became accustomed to how fast-paced this job could get, and it helped improve my efficiency, which I would say has really helped set me up for the real world.

+ +

In terms of ‘lows,’ I’m not sure what I would classify as a low. Honestly, it has all been a great learning experience. Even when things go wrong, I take it all in good stride.

+ +

Favorite story you’ve written to date and why?

+ +

The Lightning Talks article was the one that I (not to be dramatic) felt like I put in my blood, sweat, and tears into. It was pretty intense because it involved interviewing and writing about work from 11 different researchers. The article ended up being really cool, and I’m very proud of it!

+ +

What kind of writer did you hope you’d become prior to starting and how has that changed in the time you’ve been here?

+ +

When I was younger, I was really into writing and reading. My dream job at the time was to be a novelist. I used to write all the time, from elementary school all the way to high school, so it has always been in the picture.

+ +

As I got older, I began to skew away from writing because I wasn’t sure how I could make a career out of it and it didn’t seem to be a highly sought-after professional path, or so I thought.

+ +

But this experience has felt really full circle. I feel like this job has allowed me to find my “writing voice” again - while still maintaining the scientific theme - which has been exhilarating and inspiring for me. + I feel I have been able to come into my own as a science writer for PATh and I learned what was expected of me in this position. Writing, coupled with video editing and scheduling Tweets , helped me feel more comfortable with the organization and further hone in on technical and soft skills.

+ +

How would you say this position has helped you learn about High Throughput Computing (HTC)?

+ +

It has helped a ton! I went from having no knowledge about HTC to enrolling in a class that teaches HTC because I have grown so much in my confidence.

+ +

Why do you think communication is important for the PATh project?

+ +

The research that occurs within the PATh project is not only interesting, but so incredibly important within each field. Not only that, I think it’s important to communicate about this work in a way that people who aren’t in the field can understand it. By doing this, I hope to show researchers in all stages of their career or students who are interested in this type of work that it’s not all scary and complicated. Communicating about the PATh project, hopefully, motivates people who are already using HTC to stick with it and can encourage those who think it might be a good fit for their research to try it out.

+ +

What would you miss about your job when you leave?

+ +

Oh my gosh, everything! I’ll, of course, miss the people I work with; I will miss my little cubicle where I can see everyone passing by and be near the people I work closest with. I will also miss the work - it’s true what they say; if you do what you love, you’ll never work a day in your life. I honestly get so excited to go to work because I just think what we do is so incredible. I’ll also miss the researchers - it’s been so great to be able to interview and interact with so many different kinds of people and learn about topics and research they’re passionate about. I’m so grateful for my time here and I’m excited about what else I get to do in between now and when I graduate!

+ +

What would be your advice to upcoming writers who also aspire to work in life science communications?

+ +

This field is often fast-paced and can sometimes feel overwhelming. My advice is not to get discouraged by it; eventually, you’ll get used to it, and it’ll be part of your routine. Also, I think something that a lot of science writers experience in the beginning of their careers is “losing their voice.” Science writing can be very technical, and as a writer, it can sometimes be disheartening to sacrifice writing with your style to writing with more jargon to a specific audience. After a while, you’ll find your “science writing voice;” practice truly does make perfect, and with a little time (and lots of editing), you’ll begin to produce writing that sounds like you but still delivers on that science aspect. Speaking of editing, your writings may go through many fine-tuning rounds before publication. Try not to take it personally, and be confident in your writing! Take every piece of criticism as a learning opportunity and make the best out of it.

+ +

What is your hope for our industry?

+ +

I hope to keep seeing a wide variety of people with different backgrounds and interests find LSC. I think many people see science communication and think they need a background in science and have to write all day, which couldn’t be farther from the truth. While I write a lot, I do it because I love it! However, people can go so many other avenues; from social media consulting to marketing, videography, lab work, genetics, social science research, and so many more; I can’t even name them all! For example, I’m currently conducting research using TikTok as my data source, which I didn’t even know would be a thing. I hope to continue to see this field continue to branch out and break down boundaries on what can be studied.

+ +

I’m curious about your research on TikTok. Can you talk more about that?

+ +

Yes! I’m currently writing a thesis on how TikTok has become a platform for psychological polarization - political polarization, in particular. We’re seeing an app that was originally intended to be an entertainment platform become a hub for information, including science communication. This new type of content “blew up” during the height of the pandemic in 2020, when scientists and doctors discovered that creating short videos on TikTok was a great way to reach a wide variety of audiences. However, as COVID-19 became politicized in the media, it did the same on TikTok. What’s even crazier than this is these videos about COVID-19 and the vaccine seem to have polarized its users to an extent unlike anything we’ve seen before. I think that’s super interesting and extremely important to study.

+ +

This thesis was inspired by a book I read called Frenemies by Jaime E. Settle. She essentially studied the same thing I described but on Facebook. I thought Settle’s train of thought and reasoning were so interesting, but I remember finishing it and thinking, “too bad this isn’t going to matter in a couple of decades.” While this book really opened the door to this bigger conversation, Facebook is not a platform younger generations use. So, using her line of thinking, I wanted to conduct similar research using TikTok, an app that’s wildly more popular among my generation and younger and has users that regularly communicate about scientific issues. Saying that I do research on TikTok sounds a little silly, but I really do think that my work will be important for studying political polarization in the future!

+ +

What do you think you have accomplished for PATh?

+ +

I would like to think my work has given researchers something tangible to share with their families, friends, and peers about the details of their research. Everyone I’ve interviewed so far is doing such fascinating work, and my goal when I’m writing about it is to shine as big as a light on them and all their hard work as much as possible. With each article, I hope these researchers can read through my eyes how amazing all their accomplishments are and have a space where they can brag about it because they deserve to!

+ +

On the flip side, I hope that I show researchers who may think that HTC can advance their work that it’s possible to get started. You don’t need to be a rocket scientist or even a computer scientist to use these resources; anyone who can benefit from using HTC to make their lives just a little easier should absolutely try it.

+ +

How has your work here impacted how you think about your future and your goals?

+ +

First and foremost, it has impacted how I think about science writing as not only an interest, but a possible career. I have learned so much and gained so much valuable experience and people seem genuinely curious about what it is I do.

+ +

The jobs I have applied to post-graduation are more science writing and market research-type jobs at life sciences companies – which even a couple of years ago isn’t the trajectory I thought I would follow. That being said, I couldn’t be happier in discovering my passion for this type of work - I love my job so much, and I definitely see myself doing something like this for a very long time!

+ +

Hannah outside of work

+ +
+ Hannah Cheren’s dog. +

Hannah Cheren’s dog.
+
+ +

When do you feel most like yourself?

+ +

I love Madison, but I’m an east coast girl at heart; I’m from New Jersey, and spending time with my family there is so important to me. We have a very active seven-year-old dog and I love taking her on walks with my two younger sisters, who have always been my best friends! They’re both at school as well, and I love spending as much time as I can with them and my parents!

+ +

If you could have dinner with, interview, and write about one person, alive or dead, who would it be and why?

+ +

Katherine Johnson. She was a mathematician at NASA and calculated trajectories that led Apollo 11 to the moon. She was also one of the first African American women to work at NASA.

+ +

I was in highschool when the movie Hidden Figures came out. This movie tells the story of three young African American women working at NASA, including Katherine Johnson. I was in complete awe of Taraji P. Henson’s portrayal of Johnson, and I instantly became fascinated by her and her story. This movie was so inspiring as a young girl interested in pursuing studying in a STEM-related field, and Katherine Johnson, in particular, was a character who really stuck out to me. She passed away a couple of years ago, but I would’ve loved nothing more than to speak with her and express to her how much she had an impact on me as a girl in STEM!

+ +

If you had to describe your personality in a song, what would be the title?

+ +

Bubbly! I’m a big optimist.

+ +
+ Hannah and her sisters at an event. +

Hannah and her sisters at an event.
+
+ +

What animal intrigues you the most and why?

+ +

Cows. We don’t see a lot of cows in New Jersey…so coming to Wisconsin and seeing them in fields every five minutes was so funny to me. I’ve had a running joke ever since that they’re my favorite animal, but now I think I tricked myself into actually believing it, so they intrigue me the most for sure!

+ +

Quick-fire questions

+ +

Vacation or staycation?

+ +

Vacation. I love to travel! I’m going to Italy to visit my sister abroad and Israel during the summer with my sisters and cousin for birthright, and I couldn’t be more excited.

+ +

TikTok or instagram?

+ +

TikTok.

+ +

Rom-com, action, supernatural or horror movies?

+ +

Action; my friends from home got me on a Marvel binge recently!

+ +

Fine dining or casual?

+ +

Casual.

+ +

Favorite decade for music?

+ +

This is going to be so boring, but I don’t think I have a favorite decade of music. Most of what I listen to is from this decade, though. My favorite artist currently is Quinn XCII.

+ +

Thrifting or high street?

+ +

Thrifting, for sure!

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/high-throughput-computing-fostering-data-science-without-limits.html b/preview-fall2024-info/high-throughput-computing-fostering-data-science-without-limits.html new file mode 100644 index 000000000..98b822e76 --- /dev/null +++ b/preview-fall2024-info/high-throughput-computing-fostering-data-science-without-limits.html @@ -0,0 +1,361 @@ + + + + + + +High-throughput computing: Fostering data science without limits + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ High-throughput computing: Fostering data science without limits +

+
+ Conference Room +
+ +

Biology and big data are now completely inseparable.

+ +

Most modern biology produces data sets too massive to manage by conventional standards, and the challenge will increase exponentially as the sophistication of the science grows.

+ +

The Center for High-Throughput Computing (CHTC), a joint partnership of UW-Madison School of Computer, Data & Information Sciences and the Morgridge Institute, sees this onslaught of data and says: Bring it on.

+ +

“We have established a goal of never letting the amount of data limit the experimental approach of the scientists,” says Miron Livny, the founder of high-throughput computing (HTC). Livny has been championing HTC for more than three decades as a UW-Madison computer scientist, and more recently as the Morgridge Institute’s lead investigator of research computing.

+ +

HTCondor is a task-scheduling software approach that essentially breaks a larger computational task into smaller pieces, allowing researchers to analyze more data (hence the term “high throughput”). The team now handles 250-300 projects a year, double that of five years ago, and uses hundreds of millions of hours of computing time.

+ +

And that’s just at UW-Madison. The global Open Science Grid provides HTC resources to the world, where it is the backbone system for Nobel Prize-winning projects such as detecting gravitational waves and discovering new subatomic particles. Just this year, it made a splash for its contribution to the discovery of a massive black hole in the center of our galaxy.

+ +

This service is gaining adherents on campus because scientists are learning that it is more than someone asking, “What technology do you need?” Research computing is a collaboration, and the people HTC brings to the equation are more important than the technology.

+ +

Livny says the HTC Facilitation Team is a great example. The emphasis on facilitators was way ahead of its time, almost unheard of in computer science circles. These are the translators who can work their magic between the technology and the bench experiments — finding the best way to maximize the data for the scientists.

+ +

Read the Full Story on the Morgridge Website.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/htc-24-event.html b/preview-fall2024-info/htc-24-event.html new file mode 100644 index 000000000..7362c0787 --- /dev/null +++ b/preview-fall2024-info/htc-24-event.html @@ -0,0 +1,420 @@ + + + + + + +High Throughput Community Builds Stronger Ties at HTC24 Week + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ High Throughput Community Builds Stronger Ties at HTC24 Week +

+
+Photos from HTC24 +
Photos from HTC24
+
+ +

CHTC and the OSG Consortium hosted its second annual Throughput Computing Week in Madison, Wisconsin joined in person and remotely by 388 participants. This year’s themes included dedicated sessions on campus cyberinfrastructure, talks on AI and machine learning enabled by high throughput computing, and tutorials and presentations on the new Pelican Platform project. You can find a detailed overview of HTC24 here.

+ +

July 8th-12th marked the Center for High Throughput Computing’s (CHTC) and the OSG Consortium’s second annual Throughput Computing Week, HTC24. A total of 156 attendees in person, and 250 remote participants joined together, representing a total of 122 institutions, to share their contributions and insights for all things high throughput computing (HTC). Campuses from across the country intent on supporting research on their campuses as well as researchers from a wide range of science domains joined the event. Campuses varied significantly in size and experience with throughput computing but shared the common goal to advance research opportunities on their campuses. Similarly, researchers in attendance ranged broadly in their area of research from physics to biology to oceanography, but shared the drive to advance their work through HTC.

+ +
+ Miron Livny speaking to the crowd +
Miron Livny speaking to the crowd
+
+ +

This week-long event included notable moments bringing together the high throughput community. The presentation by HTC24’s keynote speaker Anthony Gitter, Associate Professor and Principal Investigator at the Morgridge Institute for Research, inspired the audience. His talk, “Unleashing the power of protein engineering with artificial intelligence,” explored the intersection of AI and protein engineering in synthetic biology. Gitter discussed AI-guided approaches like Mutational Effect Transfer Learning (METL) for predicting the effects of sequence modifications on protein function. The talk also discussed advancements in supervised learning models and deep mutational scanning techniques, showcasing AI’s transformative potential in optimizing protein functionalities for medicine and industry.

+ +
+ Anthony Gitter, HTC24 Keynote Speaker +
Anthony Gitter, HTC24 Keynote Speaker
+
+ +

Another highlight of the week was the David Swanson Award presentation. Christina Koch, OSG lead facilitator, was joined by Ken Bloom, of the Holland Computing Center of the University of Nebraska-Lincoln, to present the annual David Swanson Award. David Swanson “was very committed to the goals for the OSG,” Bloom noted. Ronda Swanson joined the session and spoke on David’s passion for science and the HTC community. Ronda proudly presented Cort Posnansky, researcher for the LIGO-VIRGO Collaboration and former OSG School student, with the 2024 David Swanson Award. Posnansky shared his research with the community, highlighting the significance of using high throughput computing in the search for gravitational waves from astrophysical collisions.

+ +

Miron Livny, Director of the CHTC and Technical Director of the OSG, opened HTC24 Week with a session centered around data and addressing the challenges of making large data sets accessible for research and available to the public or research communities. Along with Livny, OSG Executive Director Frank Wurthwein and Pelican Principal Investigator Brian Bockelman, spoke on the impacts of the Open Science Data Federation (OSDF) and how to further use these resources.

+ +
+ Kevin Thompson, NSF Program Director +
Kevin Thompson, NSF Program Director
+
+ +

Other sessions concentrated on integrating campuses into the OSPool and the OSDF, featuring talks by National Science Foundation (NSF) Program Director Kevin Thompson, Frank Wuerthwein, OSG Campus Coordinator Tim Cartwright, and Minority Serving – Cyberinfrastructure (CI) Consortium Facilitator Russell Hofmann. Thompson addressed the pivotal role of the Campus Cyberinfrastructure (CC*) Program in advancing networking capabilities essential for scientific research and education (R&E). Focusing on upgrading campus networks and fostering partnerships to optimize cyberinfrastructure for scientific discovery. Wuerthwein transitioned to the challenge encountered by institutions of higher learning that lack the resources or expertise to maintain the batch and storage clusters. He proposed solutions aimed at reducing the total cost of ownership (TCO) for compute and data infrastructure.

+ +

Additionally, Tim Cartwright, OSG Campus Coordinator, provided an overview of OSG Campus Services and its tailored support for campuses at various engagement stages with the OSG. Following this, Todd Tannenbaum, HTCondor Software Lead, expanded on Cartwright’s themes and introduced the new HTCondor-CE dashboard which will be rolled out to campuses contributing resources to the OSPool this month.

+ +

This year there were also dedicated sessions for campuses covering topics including the CC* solicitation process and campuses contributions to the OSPool. These sessions provided campus representatives from those already involved or those considering proposals with the chance to dive deeper into the CC* process. Cartwright touched on the OSG’s campus outreach services, how to connect, and then opened the panel up to the institutions, allowing for a Q&A and ‘stump the experts’ session.

+ +
+ Brian Bockelman holding the new HTC24 award +
Brian Bockelman holding the new HTC24 award
+
+ +

Other sessions highlighted the Pelican Platform, featuring insights from Brian Bockelman and other Pelican team members. They addressed operational aspects of the OSDF and integration methods with Pelican. The session also playfully introduced the new HTC award, a 3-D printed pelican tentatively named “The Beakelman” or “The Brian,” recognizing lightning talk presenters.

+ +

To foster engagement and feedback, Tuesday sessions concluded with “Lightning Talks” from the community, offering ideas and suggestions. Seven CHTC Fellows also showcased their work and faced challenging questions from the engaged audience.

+ +
+
+Pratham Patel +
+
+Neha Talluri +
+
+

Fellows Pratham Patel and Neha Talluri presenting their projects.

+
+
+ +

Adding some lighter notes, CHTC’s Todd Tannenbaum and Greg Thain introduced the Early Late Night Show, a Late Night Show parody, with a CHTC twist. Host of the show, Tannenbaum, light heartedly interviewed Miron Livny, Frank Wurthwein, and Brian Bockelman.

+ +
+ Todd Tannenbaum interviewing Miron Livny. +
Todd Tannenbaum interviewing Miron Livny.
+
+ +

As well as the educational and comedic segments of HTC24 Week, participants also took part in activities outside of the meeting rooms, allowing them to strengthen their relationships with each other and to develop robust ties between collaborating working groups. Following Tuesday’s session, CHTC’s Todd Tannenbaum led a group on a bike ride around the UW Arboretum. Participants also had opportunities throughout the week to kick back at the Memorial Union Terrace, kayak on Lake Wingra, and sing karaoke. The week wasn’t without its unexpected moments either—like the memorable evening where 12 conference-goers and staff found themselves unexpectedly stuck in an elevator. They were rescued by firefighters who opened the elevator hatch and provided a ladder for them to escape. This unplanned event prompted shared jokes and camaraderie, leading to suggestions that for next year’s HTC week, an elevator bonding session or escape room should be added to the official social schedule.

+ +
+
+kayak +
+
+Neha +
+
+

Participants kayaking (on the left), and an image from the elevator (on the right).

+
+
+ +

Beyond the presentations, the week provided valuable opportunities for meaningful connections between the CHTC and OSG teams and HTC24 attendees. Discussions over lunch, during coffee breaks and throughout the event provided a chance for participants to learn more about each other and their work, fostering stronger connections and friendships that are sure to endure.

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/htc.html b/preview-fall2024-info/htc.html new file mode 100644 index 000000000..04c2dd9a7 --- /dev/null +++ b/preview-fall2024-info/htc.html @@ -0,0 +1,378 @@ + + + + + + +What is High Throughput Computing? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ What is High Throughput Computing? +

+

For many experimental scientists, scientific progress and quality of +research are strongly linked to computing throughput. In other words, +most scientists are concerned with how many floating point operations +per month or per year they can extract from their computing +environment rather than the number of such operations the +environment can provide them per second or minute. Floating point +operations per second (FLOPS) has been the yardstick used by most +High Performance Computing (HPC) efforts to evaluate their systems. +Little attention has been devoted by the computing community to +environments that can deliver large amounts of processing capacity +over long periods of time. We refer to such environments as High +Throughput Computing (HTC) environments.

+ +

For more than a decade, the HTCondor team at the Computer Sciences +Department at the University of Wisconsin-Madison has been developing and +evaluating mechanisms and policies that support HTC on large collections +of distributively owned heterogeneous computing resources. We first introduced the +distinction between High Performance Computing (HPC) and High Throughput Computing +(HTC) in a +seminar at the NASA Goddard Flight Center +in July of 1996 and a month later +at the European Laboratory for Particle Physics (CERN). In June of 1997 HPCWire +published an interview on High Throughput Computing.

+ +

The key to HTC is effective management and exploitation of all +available computing resources. Since the computing needs of most +scientists can be satisfied these days by commodity CPUs and memory, +high efficiency is not playing a major role in a HTC environment. +The main challenge a typical HTC environment faces is how to +maximize the amount of resources accessible to its customers. +Distributed ownership of computing resources is the major obstacle +such an environment has to overcome in order to expand the pool of +resources it can draw from. Recent trends in the cost/performance +ratio of computer hardware have placed the control (ownership) over +powerful computing resources in the hands of individuals and small +groups. These distributed owners will be willing to include their +resources in a HTC environment only after they +are convinced that their needs will be addressed and their rights protected.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/htcondor-european-workshop.html b/preview-fall2024-info/htcondor-european-workshop.html new file mode 100644 index 000000000..9eee59c89 --- /dev/null +++ b/preview-fall2024-info/htcondor-european-workshop.html @@ -0,0 +1,409 @@ + + + + + + +HTCondor European Workshop returns for ninth year in Orsay, France + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ HTCondor European Workshop returns for ninth year in Orsay, France +

+

The workshop highlights research organizations’ success with the HTCondor Software Suite (HTCSS), challenges, and possible +solutions. Planning for 2024’s workshop is already underway.

+ +
+ Group photo of those involved with the 2023 HTCondor European Workshop +
Group photo of those involved with the 2023 HTCondor European Workshop
+
+ +

The ninth 2023 HTCondor European Workshop took place September 19–22 +at IJCLab in Orsay, France, to join communities of high-throughput computing +(HTC) users together. Attendees and HTCondor users have the opportunity to learn from developers and vice versa, +HTCondor Core Developer Greg Thain says. During the workshops, “[I]nformation [is] going in all directions — developers +to users, users to users, and users back up to developers,” Thain elaborates. Attendees discuss aspects of +HTCondor Software Suite (HTCSS) and HTC that they like and areas that could undergo potential development.

+ +

This year, one featured talk was from the European Weather Cloud (EWC), +part of the meteorological community, which just started using HTCondor, Thain mentions. In their presentation, +Francesco Murdaca and Mike Grant discussed their challenges and current uses of HTC. Other HTCondor users like +DESY and CERN also provided updates, +challenges, and the scope of their current HTC uses.

+ +

Another highlight was this year’s “Lightning Talks” sessions, which gave individual attention to users as a way +for them to highlight what works, what doesn’t, and what they’re trying to accomplish, HTCondor Technical Lead +Todd Tannenbaum says. These lightning talks spurred spontaneous discussion. Also included in this year’s programming +was a discussion of Pelican, a new system for sharing data, Thain reveals.

+ +

HTCSS provides distributed high-throughput computing (dHTC) resources to users in academic, government, and commercial +organizations across the globe. High energy physics is a leading group of dHTC use, of which CERN in Geneva, Switzerland, +is a major player. For high-energy physics, Thain explains that more computation needs to be done than can be accomplished +in one physical area, so physics communities and member nations affiliated with CERN share resources with each other. +However, HTCondor’s resources are not restricted to just these organizations — a broad range of scientific and research +disciplines tap into its resources. “About 50% of the participants were regular participants — we’ve seen their faces at +a lot of these workshops in Europe — but happily about 50% of the faces were new. So that was an encouraging sign, and +we are making plans to have another one,” Tannenbaum says. “The audience has widened a bit from just the system administrators +at these national labs that are doing the LHC computing to include a couple of +commercial companies and other universities.”

+ +

The topics of discussion vary by year, Thain explains, depending on new developments or changes in the computing landscape, +but are mainly driven by the Europeans. “One of the things we do in the year before is try and take the pulse of what’s new, +what’s concerning, or what’s difficult and try to make sure that we have a workshop that addresses that,” Thain explains. +“We’ve talked about the new tokens the last couple of years, and there’s been a lot of concern about electrical power, especially +in terms of global events.” With the war in Ukraine and energy embargoes from Russia, electric prices have been less stable, +Tannenbaum says, which is a big concern of European data centers. Security, energy management, and power-saving were big +themes of this year’s workshops.

+ +

One of the popular workshops — and one that Tannenbaum looks forward to — is the “Show Us Your Toolbox” session. During this +session, “…folks from all the different national labs [show] how they solve problems like monitoring their cluster, managing +data, and interactive work. Just talking about what challenges they have at their site and their solutions to date inspires +good discussion amongst the participants,” Tannenbaum shares. Other topics up for discussion included how German sites were +deploying HTCSS, ways to improve upon the current HTCSS, and the money and resources users saved with HTCSS.

+ +

Another opportunity for users was participating in office hours, where they could take their computing issues to developers +who work on HTCondor. For Tannenbaum, this is instructive because it helps him determine where people run into problems that +he might not experience and understand which parts of HTCSS may need refining.

+ +

Planning for the 2024 HTCondor European Workshop is already underway, with the venue set for Nikhef, +the Dutch National Institute for Subatomic Physics, in Amsterdam, Tannenbaum reveals. Feedback from the attendees of this year’s +workshop provided insightful information planners will take into account when they meet in a few months to discuss next year’s +workshop. “Looking at the feedback from the user surveys, we felt that this was the proof of the importance of in-person workshops,” +Tannenbaum says. Restricting the workshops to Zoom or other online formats, like what occurred in 2020, causes the workshops to +become less participatory and more reliant on passive listening. “It was much more a series of lectures. [The format was like] slide +show, next slide show, next slide show, which is useful to people, but it doesn’t have that give and take and that everybody is +sharing and thinking together,” Tannenbaum says of the online workshops.

+ +

Across the globe, similar workshops have popped up or are in the beginnings of development in places like India and Southeast Asia, +which the European workshops have had a part in spearheading, Tannenbaum says. “[T]here’s a lot of opportunities to +network with people and share good ideas. If people are in Europe, we’d love to see them next year in Amsterdam. It’s a great +opportunity to have high-level conversations with other users. These last 10 years of meetings have come out of the work that +was done almost 30 years ago,” Thain states.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/htcondor-helps-enable-mars-research.html b/preview-fall2024-info/htcondor-helps-enable-mars-research.html new file mode 100644 index 000000000..3f86de370 --- /dev/null +++ b/preview-fall2024-info/htcondor-helps-enable-mars-research.html @@ -0,0 +1,351 @@ + + + + + + +USGS uses HTCondor to advance Mars research + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ USGS uses HTCondor to advance Mars research +

+

The U.S. Geological Survey goes off-planet in its recent release of 3D models and images of the Mars surface. +Gathered through a join effort from the Mars Reconnaissance Orbiter and the +High Resolution Imaging Experiment the images collected amounted to years of work and hundreds of terabytes of data.

+ +

With >155,000 images collected and ready to be released to the public the USGS relied on HTCondor to do the +necessary pre-processing to produce “scientifically useful images and associated metadata”. Streaming from +the NASA Planetary Data System cloud holdings, HTCondor processed the 114 TB of data in 4 hours, with more than 4,000 jobs running simultaneously throughout.

+ +

To read more about the released data refer to the full article on the USGS website:

+ +

It is easier than ever to view Mars landscapes in high resolution

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/icecube-receives-hpc-award.html b/preview-fall2024-info/icecube-receives-hpc-award.html new file mode 100644 index 000000000..3d23ed232 --- /dev/null +++ b/preview-fall2024-info/icecube-receives-hpc-award.html @@ -0,0 +1,352 @@ + + + + + + +UW–Madison's Icecube Neutrino Observatory Wins HPCwire Award + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ UW–Madison's Icecube Neutrino Observatory Wins HPCwire Award +

+

The UW-Madison Center for High Throughput Computing’s (CHTC) collaboration with the San Diego Supercomputer Center on +the IceCube Neutrino Observatory received recognition with the HPCwire 2022 Readers’ Choice Award for Best Use of +High Performance Computing (HPC) in the Cloud (Use Case).

+ +

“We have used CHTC and the Open Science Pool (OSPool) for over a decade to perform all large-scale data analysis tasks +and generate Monte Carlo simulations of the instrument’s performance,” notes Francis Halzen, principal investigator of +IceCube and the Hilldale and Gregory Breit Distinguished Professor of Physics. “Without CHTC and OSPool resources we +would simply be unable to make any of IceCube’s groundbreaking discoveries.”

+ +

Read the full story here:

+ +

UW–MADISON’S ICECUBE NEUTRINO OBSERVATORY WINS HPCWIRE AWARD

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/icecube/.well-known/issuer.jwks b/preview-fall2024-info/icecube/.well-known/issuer.jwks new file mode 100644 index 000000000..eba1cef84 --- /dev/null +++ b/preview-fall2024-info/icecube/.well-known/issuer.jwks @@ -0,0 +1,22 @@ +{ + "keys": [ + { + "alg": "ES256", + "crv": "P-256", + "kid": "dad0", + "kty": "EC", + "use": "sig", + "x": "VT3b6ftX9JgdB_9rXn4QOYjyh3K3y6bN2ANjlDJfcWA=", + "y": "Hgyz6Ao2xn0xr6CSAltGw1jGVvfUXNQI5R6FherIGIg=" + }, + { + "alg": "ES256", + "crv": "P-256", + "kid": "7672", + "kty": "EC", + "use": "sig", + "x": "M7o02LMoCExHWtEG6Da302wVYWRn22wNdj4dLH7IbS8=", + "y": "YH4Fc0gkqUjs8mF-oIPeO7AAGDcCykL1B1CmSWIARBs=" + } + ] +} diff --git a/preview-fall2024-info/icecube/.well-known/openid-configuration b/preview-fall2024-info/icecube/.well-known/openid-configuration new file mode 100644 index 000000000..9e83ce8a5 --- /dev/null +++ b/preview-fall2024-info/icecube/.well-known/openid-configuration @@ -0,0 +1,4 @@ +{ + "issuer":"https://chtc.cs.wisc.edu/icecube", + "jwks_uri":"https://chtc.cs.wisc.edu/icecube/.well-known/issuer.jwks" +} diff --git a/preview-fall2024-info/icon_credits.html b/preview-fall2024-info/icon_credits.html new file mode 100644 index 000000000..eac20ec74 --- /dev/null +++ b/preview-fall2024-info/icon_credits.html @@ -0,0 +1,360 @@ + + + + + + +Icon Credits + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ Icon Credits +

+ +

All icons are from the Noun Project.

+ +

On our user guide page:

+ +
    +
  • Check Box by iconomania from the Noun Project
  • +
  • Laptop by joe pictos from the Noun Project
  • +
  • computer programmer by Θ R I M Λ T from the Noun Project
  • +
  • development by Jasfart from the Noun Project
  • +
  • Data by amante de icono from the Noun Project
  • +
  • processor by Delta from the Noun Project
  • +
  • Warning by arjuazka from the Noun Project
  • +
  • servers by b farias from the Noun Project
  • +
+ +

On our user guide page:

+ +
    +
  • people by zidney from the Noun Project
  • +
  • open book by sobinsergey from the Noun Project
  • +
  • gpu by Mask Icon from the Noun Project
  • +
+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/images/2022TeamPhoto.jpg b/preview-fall2024-info/images/2022TeamPhoto.jpg new file mode 100644 index 000000000..bb8777521 Binary files /dev/null and b/preview-fall2024-info/images/2022TeamPhoto.jpg differ diff --git a/preview-fall2024-info/images/Anirvan-Showcase-1.png b/preview-fall2024-info/images/Anirvan-Showcase-1.png new file mode 100644 index 000000000..6c5d44c6a Binary files /dev/null and b/preview-fall2024-info/images/Anirvan-Showcase-1.png differ diff --git a/preview-fall2024-info/images/Anirvan-Showcase-2.png b/preview-fall2024-info/images/Anirvan-Showcase-2.png new file mode 100644 index 000000000..1bd6ba5b6 Binary files /dev/null and b/preview-fall2024-info/images/Anirvan-Showcase-2.png differ diff --git a/preview-fall2024-info/images/CDIS_Career_Fair22_3942.jpg b/preview-fall2024-info/images/CDIS_Career_Fair22_3942.jpg new file mode 100644 index 000000000..fb63653af Binary files /dev/null and b/preview-fall2024-info/images/CDIS_Career_Fair22_3942.jpg differ diff --git a/preview-fall2024-info/images/CHTC-Quick-Facts-2012.png b/preview-fall2024-info/images/CHTC-Quick-Facts-2012.png new file mode 100644 index 000000000..97afa72eb Binary files /dev/null and b/preview-fall2024-info/images/CHTC-Quick-Facts-2012.png differ diff --git a/preview-fall2024-info/images/CHTC-logo-header.png b/preview-fall2024-info/images/CHTC-logo-header.png new file mode 100644 index 000000000..b34556e10 Binary files /dev/null and b/preview-fall2024-info/images/CHTC-logo-header.png differ diff --git a/preview-fall2024-info/images/CHTC-logo.png b/preview-fall2024-info/images/CHTC-logo.png new file mode 100644 index 000000000..7ea14bdd3 Binary files /dev/null and b/preview-fall2024-info/images/CHTC-logo.png differ diff --git a/preview-fall2024-info/images/CHTC_Fellows.png b/preview-fall2024-info/images/CHTC_Fellows.png new file mode 100644 index 000000000..5af649b22 Binary files /dev/null and b/preview-fall2024-info/images/CHTC_Fellows.png differ diff --git a/preview-fall2024-info/images/CHTC_Fellows.webp b/preview-fall2024-info/images/CHTC_Fellows.webp new file mode 100644 index 000000000..702634318 Binary files /dev/null and b/preview-fall2024-info/images/CHTC_Fellows.webp differ diff --git a/preview-fall2024-info/images/CHTC_Logo_White.png b/preview-fall2024-info/images/CHTC_Logo_White.png new file mode 100644 index 000000000..3ed8daeb1 Binary files /dev/null and b/preview-fall2024-info/images/CHTC_Logo_White.png differ diff --git a/preview-fall2024-info/images/Data-Server.jpg b/preview-fall2024-info/images/Data-Server.jpg new file mode 100644 index 000000000..0da8a32bc Binary files /dev/null and b/preview-fall2024-info/images/Data-Server.jpg differ diff --git a/preview-fall2024-info/images/Emile_Working.jpg b/preview-fall2024-info/images/Emile_Working.jpg new file mode 100644 index 000000000..d649a0640 Binary files /dev/null and b/preview-fall2024-info/images/Emile_Working.jpg differ diff --git a/preview-fall2024-info/images/FirstDayClass_AD21_0269.jpg b/preview-fall2024-info/images/FirstDayClass_AD21_0269.jpg new file mode 100644 index 000000000..95c1d3a58 Binary files /dev/null and b/preview-fall2024-info/images/FirstDayClass_AD21_0269.jpg differ diff --git a/preview-fall2024-info/images/Gaylen-Fronk-square.jpg b/preview-fall2024-info/images/Gaylen-Fronk-square.jpg new file mode 100644 index 000000000..4290f44f7 Binary files /dev/null and b/preview-fall2024-info/images/Gaylen-Fronk-square.jpg differ diff --git a/preview-fall2024-info/images/GitHub_Logo_White.png b/preview-fall2024-info/images/GitHub_Logo_White.png new file mode 100644 index 000000000..c61ab9d05 Binary files /dev/null and b/preview-fall2024-info/images/GitHub_Logo_White.png differ diff --git a/preview-fall2024-info/images/HPC-cluster.jpg b/preview-fall2024-info/images/HPC-cluster.jpg new file mode 100644 index 000000000..6b0d2505f Binary files /dev/null and b/preview-fall2024-info/images/HPC-cluster.jpg differ diff --git a/preview-fall2024-info/images/HTC23-osg-services.jpg b/preview-fall2024-info/images/HTC23-osg-services.jpg new file mode 100644 index 000000000..5c7a62ea9 Binary files /dev/null and b/preview-fall2024-info/images/HTC23-osg-services.jpg differ diff --git a/preview-fall2024-info/images/HTC_Domain_Interaction_V2.svg b/preview-fall2024-info/images/HTC_Domain_Interaction_V2.svg new file mode 100644 index 000000000..a15a3ee08 --- /dev/null +++ b/preview-fall2024-info/images/HTC_Domain_Interaction_V2.svg @@ -0,0 +1 @@ +Center ForHigh ThroughputComputingHTCBringing the power of High Throughput Computing to all fields of Research \ No newline at end of file diff --git a/preview-fall2024-info/images/HTC_Graphic.png b/preview-fall2024-info/images/HTC_Graphic.png new file mode 100644 index 000000000..fda33e7e3 Binary files /dev/null and b/preview-fall2024-info/images/HTC_Graphic.png differ diff --git a/preview-fall2024-info/images/HTC_Graphic.svg b/preview-fall2024-info/images/HTC_Graphic.svg new file mode 100644 index 000000000..55f4fd08c --- /dev/null +++ b/preview-fall2024-info/images/HTC_Graphic.svg @@ -0,0 +1,1887 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +HTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/preview-fall2024-info/images/HTC_Graphic_Outline.png b/preview-fall2024-info/images/HTC_Graphic_Outline.png new file mode 100644 index 000000000..c0daef883 Binary files /dev/null and b/preview-fall2024-info/images/HTC_Graphic_Outline.png differ diff --git a/preview-fall2024-info/images/HTC_Graphic_Outline.svg b/preview-fall2024-info/images/HTC_Graphic_Outline.svg new file mode 100644 index 000000000..cc7f675ee --- /dev/null +++ b/preview-fall2024-info/images/HTC_Graphic_Outline.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/HTC_Graphic_Outline.webp b/preview-fall2024-info/images/HTC_Graphic_Outline.webp new file mode 100644 index 000000000..1dab57493 Binary files /dev/null and b/preview-fall2024-info/images/HTC_Graphic_Outline.webp differ diff --git a/preview-fall2024-info/images/HTCondorLayersFlat.jpg b/preview-fall2024-info/images/HTCondorLayersFlat.jpg new file mode 100644 index 000000000..744aed0a1 Binary files /dev/null and b/preview-fall2024-info/images/HTCondorLayersFlat.jpg differ diff --git a/preview-fall2024-info/images/HTCondorLayersFlat.webp b/preview-fall2024-info/images/HTCondorLayersFlat.webp new file mode 100644 index 000000000..1fef6cc1f Binary files /dev/null and b/preview-fall2024-info/images/HTCondorLayersFlat.webp differ diff --git a/preview-fall2024-info/images/HTCondor_red_blk_notag.png b/preview-fall2024-info/images/HTCondor_red_blk_notag.png new file mode 100644 index 000000000..710d2dbe2 Binary files /dev/null and b/preview-fall2024-info/images/HTCondor_red_blk_notag.png differ diff --git a/preview-fall2024-info/images/Hannah-Showcase.jpg b/preview-fall2024-info/images/Hannah-Showcase.jpg new file mode 100644 index 000000000..f999b4c5b Binary files /dev/null and b/preview-fall2024-info/images/Hannah-Showcase.jpg differ diff --git a/preview-fall2024-info/images/IceCube.jpeg b/preview-fall2024-info/images/IceCube.jpeg new file mode 100644 index 000000000..4bd5e07a6 Binary files /dev/null and b/preview-fall2024-info/images/IceCube.jpeg differ diff --git a/preview-fall2024-info/images/LHC.jpg b/preview-fall2024-info/images/LHC.jpg new file mode 100644 index 000000000..7bd63b52e Binary files /dev/null and b/preview-fall2024-info/images/LHC.jpg differ diff --git a/preview-fall2024-info/images/Matlab_Logo.png b/preview-fall2024-info/images/Matlab_Logo.png new file mode 100644 index 000000000..bc3144d1f Binary files /dev/null and b/preview-fall2024-info/images/Matlab_Logo.png differ diff --git a/preview-fall2024-info/images/Morgridge-Small-Logo.png b/preview-fall2024-info/images/Morgridge-Small-Logo.png new file mode 100644 index 000000000..b869659fa Binary files /dev/null and b/preview-fall2024-info/images/Morgridge-Small-Logo.png differ diff --git a/preview-fall2024-info/images/OSGUS23-andrew-help.jpg b/preview-fall2024-info/images/OSGUS23-andrew-help.jpg new file mode 100644 index 000000000..6f6f8d84f Binary files /dev/null and b/preview-fall2024-info/images/OSGUS23-andrew-help.jpg differ diff --git a/preview-fall2024-info/images/OSGVS21-Logo.png b/preview-fall2024-info/images/OSGVS21-Logo.png new file mode 100644 index 000000000..888101cc5 Binary files /dev/null and b/preview-fall2024-info/images/OSGVS21-Logo.png differ diff --git a/preview-fall2024-info/images/Open_Science_Grid_Consortium(Logo).jpg b/preview-fall2024-info/images/Open_Science_Grid_Consortium(Logo).jpg new file mode 100644 index 000000000..50d224ea3 Binary files /dev/null and b/preview-fall2024-info/images/Open_Science_Grid_Consortium(Logo).jpg differ diff --git a/preview-fall2024-info/images/Python_Logo.png b/preview-fall2024-info/images/Python_Logo.png new file mode 100644 index 000000000..32a31fbd8 Binary files /dev/null and b/preview-fall2024-info/images/Python_Logo.png differ diff --git a/preview-fall2024-info/images/R_Logo.png b/preview-fall2024-info/images/R_Logo.png new file mode 100644 index 000000000..743c9bbf4 Binary files /dev/null and b/preview-fall2024-info/images/R_Logo.png differ diff --git a/preview-fall2024-info/images/Research_Computing_Usage_Graph.png b/preview-fall2024-info/images/Research_Computing_Usage_Graph.png new file mode 100644 index 000000000..05d96cb7a Binary files /dev/null and b/preview-fall2024-info/images/Research_Computing_Usage_Graph.png differ diff --git a/preview-fall2024-info/images/Spencer-Showcase.jpg b/preview-fall2024-info/images/Spencer-Showcase.jpg new file mode 100644 index 000000000..d75e9bc41 Binary files /dev/null and b/preview-fall2024-info/images/Spencer-Showcase.jpg differ diff --git a/preview-fall2024-info/images/Thumbs.db b/preview-fall2024-info/images/Thumbs.db new file mode 100644 index 000000000..b35502ba1 Binary files /dev/null and b/preview-fall2024-info/images/Thumbs.db differ diff --git a/preview-fall2024-info/images/USGS-collage.jpg b/preview-fall2024-info/images/USGS-collage.jpg new file mode 100644 index 000000000..33bc74f42 Binary files /dev/null and b/preview-fall2024-info/images/USGS-collage.jpg differ diff --git a/preview-fall2024-info/images/WinSCPPortable.png b/preview-fall2024-info/images/WinSCPPortable.png new file mode 100644 index 000000000..1d66f155f Binary files /dev/null and b/preview-fall2024-info/images/WinSCPPortable.png differ diff --git a/preview-fall2024-info/images/atlas.jpg b/preview-fall2024-info/images/atlas.jpg new file mode 100644 index 000000000..4791e46f1 Binary files /dev/null and b/preview-fall2024-info/images/atlas.jpg differ diff --git a/preview-fall2024-info/images/banq-mw.jpg b/preview-fall2024-info/images/banq-mw.jpg new file mode 100644 index 000000000..77dbbadea Binary files /dev/null and b/preview-fall2024-info/images/banq-mw.jpg differ diff --git a/preview-fall2024-info/images/banq-mw.png b/preview-fall2024-info/images/banq-mw.png new file mode 100644 index 000000000..b2a97d94b Binary files /dev/null and b/preview-fall2024-info/images/banq-mw.png differ diff --git a/preview-fall2024-info/images/banq-patrie.jpg b/preview-fall2024-info/images/banq-patrie.jpg new file mode 100644 index 000000000..b91b1d348 Binary files /dev/null and b/preview-fall2024-info/images/banq-patrie.jpg differ diff --git a/preview-fall2024-info/images/banq-patrie.png b/preview-fall2024-info/images/banq-patrie.png new file mode 100644 index 000000000..76aae24e7 Binary files /dev/null and b/preview-fall2024-info/images/banq-patrie.png differ diff --git a/preview-fall2024-info/images/batlab.png b/preview-fall2024-info/images/batlab.png new file mode 100644 index 000000000..59a7c4804 Binary files /dev/null and b/preview-fall2024-info/images/batlab.png differ diff --git a/preview-fall2024-info/images/bazaar.jpg b/preview-fall2024-info/images/bazaar.jpg new file mode 100644 index 000000000..7bd863922 Binary files /dev/null and b/preview-fall2024-info/images/bazaar.jpg differ diff --git a/preview-fall2024-info/images/brg_bg.gif b/preview-fall2024-info/images/brg_bg.gif new file mode 100644 index 000000000..e45d2f0c8 Binary files /dev/null and b/preview-fall2024-info/images/brg_bg.gif differ diff --git a/preview-fall2024-info/images/christina-koch-chtc-featured.webp b/preview-fall2024-info/images/christina-koch-chtc-featured.webp new file mode 100644 index 000000000..90914f9d8 Binary files /dev/null and b/preview-fall2024-info/images/christina-koch-chtc-featured.webp differ diff --git a/preview-fall2024-info/images/christina-koch-square.jpg b/preview-fall2024-info/images/christina-koch-square.jpg new file mode 100644 index 000000000..8b62fe376 Binary files /dev/null and b/preview-fall2024-info/images/christina-koch-square.jpg differ diff --git a/preview-fall2024-info/images/chtc-file-transfer.png b/preview-fall2024-info/images/chtc-file-transfer.png new file mode 100644 index 000000000..7b8f6f90c Binary files /dev/null and b/preview-fall2024-info/images/chtc-file-transfer.png differ diff --git a/preview-fall2024-info/images/chtc-internship-banner-lg.jpg b/preview-fall2024-info/images/chtc-internship-banner-lg.jpg new file mode 100644 index 000000000..815efd154 Binary files /dev/null and b/preview-fall2024-info/images/chtc-internship-banner-lg.jpg differ diff --git a/preview-fall2024-info/images/chtc-internship-banner.jpg b/preview-fall2024-info/images/chtc-internship-banner.jpg new file mode 100644 index 000000000..cf8148e83 Binary files /dev/null and b/preview-fall2024-info/images/chtc-internship-banner.jpg differ diff --git a/preview-fall2024-info/images/circuit_background.svg b/preview-fall2024-info/images/circuit_background.svg new file mode 100644 index 000000000..0c9a70eac --- /dev/null +++ b/preview-fall2024-info/images/circuit_background.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/circuit_board_light.svg b/preview-fall2024-info/images/circuit_board_light.svg new file mode 100644 index 000000000..91221a4ac --- /dev/null +++ b/preview-fall2024-info/images/circuit_board_light.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/condor.png b/preview-fall2024-info/images/condor.png new file mode 100644 index 000000000..8b161f1ca Binary files /dev/null and b/preview-fall2024-info/images/condor.png differ diff --git a/preview-fall2024-info/images/cui.jpg b/preview-fall2024-info/images/cui.jpg new file mode 100644 index 000000000..2097e0396 Binary files /dev/null and b/preview-fall2024-info/images/cui.jpg differ diff --git a/preview-fall2024-info/images/cui_image001.png b/preview-fall2024-info/images/cui_image001.png new file mode 100644 index 000000000..5edff337d Binary files /dev/null and b/preview-fall2024-info/images/cui_image001.png differ diff --git a/preview-fall2024-info/images/david-oconnor.jpg b/preview-fall2024-info/images/david-oconnor.jpg new file mode 100644 index 000000000..ff0ed513e Binary files /dev/null and b/preview-fall2024-info/images/david-oconnor.jpg differ diff --git a/preview-fall2024-info/images/deLeon.jpg b/preview-fall2024-info/images/deLeon.jpg new file mode 100644 index 000000000..383471a18 Binary files /dev/null and b/preview-fall2024-info/images/deLeon.jpg differ diff --git a/preview-fall2024-info/images/demo_robot.jpeg b/preview-fall2024-info/images/demo_robot.jpeg new file mode 100644 index 000000000..41805e35c Binary files /dev/null and b/preview-fall2024-info/images/demo_robot.jpeg differ diff --git a/preview-fall2024-info/images/depablo.jpg b/preview-fall2024-info/images/depablo.jpg new file mode 100644 index 000000000..174ce666b Binary files /dev/null and b/preview-fall2024-info/images/depablo.jpg differ diff --git a/preview-fall2024-info/images/docs/Export As.jpg b/preview-fall2024-info/images/docs/Export As.jpg new file mode 100644 index 000000000..a046467e7 Binary files /dev/null and b/preview-fall2024-info/images/docs/Export As.jpg differ diff --git a/preview-fall2024-info/images/docs/Export As.png b/preview-fall2024-info/images/docs/Export As.png new file mode 100644 index 000000000..ad8c3e99b Binary files /dev/null and b/preview-fall2024-info/images/docs/Export As.png differ diff --git a/preview-fall2024-info/images/docs/add-article.png b/preview-fall2024-info/images/docs/add-article.png new file mode 100644 index 000000000..b78fd38bf Binary files /dev/null and b/preview-fall2024-info/images/docs/add-article.png differ diff --git a/preview-fall2024-info/images/docs/add_article.png b/preview-fall2024-info/images/docs/add_article.png new file mode 100644 index 000000000..c76c2434b Binary files /dev/null and b/preview-fall2024-info/images/docs/add_article.png differ diff --git a/preview-fall2024-info/images/docs/add_new_calendar.jpg b/preview-fall2024-info/images/docs/add_new_calendar.jpg new file mode 100644 index 000000000..d941e60ff Binary files /dev/null and b/preview-fall2024-info/images/docs/add_new_calendar.jpg differ diff --git a/preview-fall2024-info/images/docs/add_new_calendar.png b/preview-fall2024-info/images/docs/add_new_calendar.png new file mode 100644 index 000000000..b01233d02 Binary files /dev/null and b/preview-fall2024-info/images/docs/add_new_calendar.png differ diff --git a/preview-fall2024-info/images/docs/create_a_pr.jpg b/preview-fall2024-info/images/docs/create_a_pr.jpg new file mode 100644 index 000000000..fd0437fd7 Binary files /dev/null and b/preview-fall2024-info/images/docs/create_a_pr.jpg differ diff --git a/preview-fall2024-info/images/docs/create_a_pr.png b/preview-fall2024-info/images/docs/create_a_pr.png new file mode 100644 index 000000000..b030507c1 Binary files /dev/null and b/preview-fall2024-info/images/docs/create_a_pr.png differ diff --git a/preview-fall2024-info/images/docs/create_preview_branch.jpg b/preview-fall2024-info/images/docs/create_preview_branch.jpg new file mode 100644 index 000000000..044a7f8b4 Binary files /dev/null and b/preview-fall2024-info/images/docs/create_preview_branch.jpg differ diff --git a/preview-fall2024-info/images/docs/create_preview_branch.png b/preview-fall2024-info/images/docs/create_preview_branch.png new file mode 100644 index 000000000..d89b7bf05 Binary files /dev/null and b/preview-fall2024-info/images/docs/create_preview_branch.png differ diff --git a/preview-fall2024-info/images/docs/demo_image.jpg b/preview-fall2024-info/images/docs/demo_image.jpg new file mode 100644 index 000000000..d204cbb35 Binary files /dev/null and b/preview-fall2024-info/images/docs/demo_image.jpg differ diff --git a/preview-fall2024-info/images/docs/demo_image.png b/preview-fall2024-info/images/docs/demo_image.png new file mode 100644 index 000000000..7c34daba5 Binary files /dev/null and b/preview-fall2024-info/images/docs/demo_image.png differ diff --git a/preview-fall2024-info/images/docs/demo_outage.jpg b/preview-fall2024-info/images/docs/demo_outage.jpg new file mode 100644 index 000000000..aae2557fa Binary files /dev/null and b/preview-fall2024-info/images/docs/demo_outage.jpg differ diff --git a/preview-fall2024-info/images/docs/demo_outage.png b/preview-fall2024-info/images/docs/demo_outage.png new file mode 100644 index 000000000..1731e35ad Binary files /dev/null and b/preview-fall2024-info/images/docs/demo_outage.png differ diff --git a/preview-fall2024-info/images/docs/demo_outage_schedule_app.jpg b/preview-fall2024-info/images/docs/demo_outage_schedule_app.jpg new file mode 100644 index 000000000..91e50a8fd Binary files /dev/null and b/preview-fall2024-info/images/docs/demo_outage_schedule_app.jpg differ diff --git a/preview-fall2024-info/images/docs/demo_outage_schedule_app.png b/preview-fall2024-info/images/docs/demo_outage_schedule_app.png new file mode 100644 index 000000000..c8d9f207e Binary files /dev/null and b/preview-fall2024-info/images/docs/demo_outage_schedule_app.png differ diff --git a/preview-fall2024-info/images/docs/demo_size_reduction.jpg b/preview-fall2024-info/images/docs/demo_size_reduction.jpg new file mode 100644 index 000000000..3d51fa723 Binary files /dev/null and b/preview-fall2024-info/images/docs/demo_size_reduction.jpg differ diff --git a/preview-fall2024-info/images/docs/demo_size_reduction.png b/preview-fall2024-info/images/docs/demo_size_reduction.png new file mode 100644 index 000000000..d3ee4eeed Binary files /dev/null and b/preview-fall2024-info/images/docs/demo_size_reduction.png differ diff --git a/preview-fall2024-info/images/docs/get_calendar_url.jpg b/preview-fall2024-info/images/docs/get_calendar_url.jpg new file mode 100644 index 000000000..7ab6a7882 Binary files /dev/null and b/preview-fall2024-info/images/docs/get_calendar_url.jpg differ diff --git a/preview-fall2024-info/images/docs/get_calendar_url.png b/preview-fall2024-info/images/docs/get_calendar_url.png new file mode 100644 index 000000000..3be7a30fa Binary files /dev/null and b/preview-fall2024-info/images/docs/get_calendar_url.png differ diff --git a/preview-fall2024-info/images/docs/go_to_calendar_settings.jpg b/preview-fall2024-info/images/docs/go_to_calendar_settings.jpg new file mode 100644 index 000000000..80f13e14f Binary files /dev/null and b/preview-fall2024-info/images/docs/go_to_calendar_settings.jpg differ diff --git a/preview-fall2024-info/images/docs/go_to_calendar_settings.png b/preview-fall2024-info/images/docs/go_to_calendar_settings.png new file mode 100644 index 000000000..0586c564c Binary files /dev/null and b/preview-fall2024-info/images/docs/go_to_calendar_settings.png differ diff --git a/preview-fall2024-info/images/docs/preview-demo.png b/preview-fall2024-info/images/docs/preview-demo.png new file mode 100644 index 000000000..4b5afed97 Binary files /dev/null and b/preview-fall2024-info/images/docs/preview-demo.png differ diff --git a/preview-fall2024-info/images/events/HTCondor_red_blk_notag.png b/preview-fall2024-info/images/events/HTCondor_red_blk_notag.png new file mode 100644 index 000000000..f7d74b8c4 Binary files /dev/null and b/preview-fall2024-info/images/events/HTCondor_red_blk_notag.png differ diff --git a/preview-fall2024-info/images/events/Throughput-Computing-2023-Banner.jpg b/preview-fall2024-info/images/events/Throughput-Computing-2023-Banner.jpg new file mode 100644 index 000000000..f07f32fad Binary files /dev/null and b/preview-fall2024-info/images/events/Throughput-Computing-2023-Banner.jpg differ diff --git a/preview-fall2024-info/images/events/Throughput-Computing-2024-BannerCHTC.jpg b/preview-fall2024-info/images/events/Throughput-Computing-2024-BannerCHTC.jpg new file mode 100644 index 000000000..5653ded09 Binary files /dev/null and b/preview-fall2024-info/images/events/Throughput-Computing-2024-BannerCHTC.jpg differ diff --git a/preview-fall2024-info/images/events/capital.jpeg b/preview-fall2024-info/images/events/capital.jpeg new file mode 100644 index 000000000..5d741be90 Binary files /dev/null and b/preview-fall2024-info/images/events/capital.jpeg differ diff --git a/preview-fall2024-info/images/events/osg-school-2024-event.jpg b/preview-fall2024-info/images/events/osg-school-2024-event.jpg new file mode 100644 index 000000000..add31819d Binary files /dev/null and b/preview-fall2024-info/images/events/osg-school-2024-event.jpg differ diff --git a/preview-fall2024-info/images/events/osg-user-school-2023-event.jpg b/preview-fall2024-info/images/events/osg-user-school-2023-event.jpg new file mode 100644 index 000000000..b277f64fe Binary files /dev/null and b/preview-fall2024-info/images/events/osg-user-school-2023-event.jpg differ diff --git a/preview-fall2024-info/images/events/throughput-2024-banners.png b/preview-fall2024-info/images/events/throughput-2024-banners.png new file mode 100644 index 000000000..c88cd9d71 Binary files /dev/null and b/preview-fall2024-info/images/events/throughput-2024-banners.png differ diff --git a/preview-fall2024-info/images/gandhi.jpg b/preview-fall2024-info/images/gandhi.jpg new file mode 100644 index 000000000..8bd3839f6 Binary files /dev/null and b/preview-fall2024-info/images/gandhi.jpg differ diff --git a/preview-fall2024-info/images/guides-globus-endpoints.png b/preview-fall2024-info/images/guides-globus-endpoints.png new file mode 100644 index 000000000..288ddef93 Binary files /dev/null and b/preview-fall2024-info/images/guides-globus-endpoints.png differ diff --git a/preview-fall2024-info/images/h_shadow1.gif b/preview-fall2024-info/images/h_shadow1.gif new file mode 100644 index 000000000..38894386e Binary files /dev/null and b/preview-fall2024-info/images/h_shadow1.gif differ diff --git a/preview-fall2024-info/images/hagness.jpg b/preview-fall2024-info/images/hagness.jpg new file mode 100644 index 000000000..346dcabe0 Binary files /dev/null and b/preview-fall2024-info/images/hagness.jpg differ diff --git a/preview-fall2024-info/images/icecube.jpg b/preview-fall2024-info/images/icecube.jpg new file mode 100644 index 000000000..6a4900f01 Binary files /dev/null and b/preview-fall2024-info/images/icecube.jpg differ diff --git a/preview-fall2024-info/images/joao-dorea.jpg b/preview-fall2024-info/images/joao-dorea.jpg new file mode 100644 index 000000000..b4f8f1bb8 Binary files /dev/null and b/preview-fall2024-info/images/joao-dorea.jpg differ diff --git a/preview-fall2024-info/images/laptop.svg b/preview-fall2024-info/images/laptop.svg new file mode 100644 index 000000000..0fc463deb --- /dev/null +++ b/preview-fall2024-info/images/laptop.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/preview-fall2024-info/images/logo-osg-large.png b/preview-fall2024-info/images/logo-osg-large.png new file mode 100644 index 000000000..35134bcdf Binary files /dev/null and b/preview-fall2024-info/images/logo-osg-large.png differ diff --git a/preview-fall2024-info/images/logo_bmrb.svg b/preview-fall2024-info/images/logo_bmrb.svg new file mode 100644 index 000000000..092dcfc64 --- /dev/null +++ b/preview-fall2024-info/images/logo_bmrb.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + diff --git a/preview-fall2024-info/images/logos/BMRB_Logo.svg b/preview-fall2024-info/images/logos/BMRB_Logo.svg new file mode 100644 index 000000000..092dcfc64 --- /dev/null +++ b/preview-fall2024-info/images/logos/BMRB_Logo.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + diff --git a/preview-fall2024-info/images/logos/CHTC_Logo.svg b/preview-fall2024-info/images/logos/CHTC_Logo.svg new file mode 100644 index 000000000..5f4967420 --- /dev/null +++ b/preview-fall2024-info/images/logos/CHTC_Logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/logos/CHTC_Logo_Full_Color.svg b/preview-fall2024-info/images/logos/CHTC_Logo_Full_Color.svg new file mode 100644 index 000000000..aba43e53d --- /dev/null +++ b/preview-fall2024-info/images/logos/CHTC_Logo_Full_Color.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/logos/CHTC_Logo_Full_Color_W_Text.svg b/preview-fall2024-info/images/logos/CHTC_Logo_Full_Color_W_Text.svg new file mode 100644 index 000000000..37ead3ec3 --- /dev/null +++ b/preview-fall2024-info/images/logos/CHTC_Logo_Full_Color_W_Text.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/logos/CHTC_Logo_White.svg b/preview-fall2024-info/images/logos/CHTC_Logo_White.svg new file mode 100644 index 000000000..23ee13afb --- /dev/null +++ b/preview-fall2024-info/images/logos/CHTC_Logo_White.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/logos/CHTC_Logo_White_W_Text.svg b/preview-fall2024-info/images/logos/CHTC_Logo_White_W_Text.svg new file mode 100644 index 000000000..63b8118e8 --- /dev/null +++ b/preview-fall2024-info/images/logos/CHTC_Logo_White_W_Text.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/logos/CHTC_Small_Logo.png b/preview-fall2024-info/images/logos/CHTC_Small_Logo.png new file mode 100644 index 000000000..7d49bc075 Binary files /dev/null and b/preview-fall2024-info/images/logos/CHTC_Small_Logo.png differ diff --git a/preview-fall2024-info/images/logos/CHTC_Small_Logo.svg b/preview-fall2024-info/images/logos/CHTC_Small_Logo.svg new file mode 100644 index 000000000..da13451df --- /dev/null +++ b/preview-fall2024-info/images/logos/CHTC_Small_Logo.svg @@ -0,0 +1,13 @@ + + + + + + + + + diff --git a/preview-fall2024-info/images/logos/HTCSS_Logo.png b/preview-fall2024-info/images/logos/HTCSS_Logo.png new file mode 100644 index 000000000..c4cb45017 Binary files /dev/null and b/preview-fall2024-info/images/logos/HTCSS_Logo.png differ diff --git a/preview-fall2024-info/images/logos/IRIS-HEP_Logo.png b/preview-fall2024-info/images/logos/IRIS-HEP_Logo.png new file mode 100644 index 000000000..e30573423 Binary files /dev/null and b/preview-fall2024-info/images/logos/IRIS-HEP_Logo.png differ diff --git a/preview-fall2024-info/images/logos/Morgridge_Horizontal_Logo.png b/preview-fall2024-info/images/logos/Morgridge_Horizontal_Logo.png new file mode 100644 index 000000000..43ac442ac Binary files /dev/null and b/preview-fall2024-info/images/logos/Morgridge_Horizontal_Logo.png differ diff --git a/preview-fall2024-info/images/logos/Morgridge_Logo.png b/preview-fall2024-info/images/logos/Morgridge_Logo.png new file mode 100644 index 000000000..f6bca1efa Binary files /dev/null and b/preview-fall2024-info/images/logos/Morgridge_Logo.png differ diff --git a/preview-fall2024-info/images/logos/Morgridge_Logo.svg b/preview-fall2024-info/images/logos/Morgridge_Logo.svg new file mode 100644 index 000000000..50237633b --- /dev/null +++ b/preview-fall2024-info/images/logos/Morgridge_Logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/logos/NEOS_Logo.svg b/preview-fall2024-info/images/logos/NEOS_Logo.svg new file mode 100644 index 000000000..d3b469103 --- /dev/null +++ b/preview-fall2024-info/images/logos/NEOS_Logo.svg @@ -0,0 +1,144 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/preview-fall2024-info/images/logos/OSG-logo.svg b/preview-fall2024-info/images/logos/OSG-logo.svg new file mode 100644 index 000000000..9d435504e --- /dev/null +++ b/preview-fall2024-info/images/logos/OSG-logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/logos/PATh_Logo.png b/preview-fall2024-info/images/logos/PATh_Logo.png new file mode 100644 index 000000000..905ebf162 Binary files /dev/null and b/preview-fall2024-info/images/logos/PATh_Logo.png differ diff --git a/preview-fall2024-info/images/logos/Pegasus_Logo.png b/preview-fall2024-info/images/logos/Pegasus_Logo.png new file mode 100644 index 000000000..768b58ca0 Binary files /dev/null and b/preview-fall2024-info/images/logos/Pegasus_Logo.png differ diff --git a/preview-fall2024-info/images/logos/UW_Logo.svg b/preview-fall2024-info/images/logos/UW_Logo.svg new file mode 100755 index 000000000..b01f23164 --- /dev/null +++ b/preview-fall2024-info/images/logos/UW_Logo.svg @@ -0,0 +1 @@ +uw-web-logo-flush \ No newline at end of file diff --git a/preview-fall2024-info/images/logos/xDD_Logo.svg b/preview-fall2024-info/images/logos/xDD_Logo.svg new file mode 100644 index 000000000..c78bcb9b2 --- /dev/null +++ b/preview-fall2024-info/images/logos/xDD_Logo.svg @@ -0,0 +1,204 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/preview-fall2024-info/images/markley.jpg b/preview-fall2024-info/images/markley.jpg new file mode 100644 index 000000000..b41440730 Binary files /dev/null and b/preview-fall2024-info/images/markley.jpg differ diff --git a/preview-fall2024-info/images/microscope.svg b/preview-fall2024-info/images/microscope.svg new file mode 100644 index 000000000..d1cf2a865 --- /dev/null +++ b/preview-fall2024-info/images/microscope.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/preview-fall2024-info/images/multi-job-submit-video-thumbnail.png b/preview-fall2024-info/images/multi-job-submit-video-thumbnail.png new file mode 100644 index 000000000..32799a0c7 Binary files /dev/null and b/preview-fall2024-info/images/multi-job-submit-video-thumbnail.png differ diff --git a/preview-fall2024-info/images/neos.png b/preview-fall2024-info/images/neos.png new file mode 100644 index 000000000..27fa257c4 Binary files /dev/null and b/preview-fall2024-info/images/neos.png differ diff --git a/preview-fall2024-info/images/osg.png b/preview-fall2024-info/images/osg.png new file mode 100644 index 000000000..f1843e408 Binary files /dev/null and b/preview-fall2024-info/images/osg.png differ diff --git a/preview-fall2024-info/images/overview_htcondor_job_submission.png b/preview-fall2024-info/images/overview_htcondor_job_submission.png new file mode 100644 index 000000000..f783f71df Binary files /dev/null and b/preview-fall2024-info/images/overview_htcondor_job_submission.png differ diff --git a/preview-fall2024-info/images/paul-wilson.jpg b/preview-fall2024-info/images/paul-wilson.jpg new file mode 100644 index 000000000..b7b1c08d9 Binary files /dev/null and b/preview-fall2024-info/images/paul-wilson.jpg differ diff --git a/preview-fall2024-info/images/perkins.jpg b/preview-fall2024-info/images/perkins.jpg new file mode 100644 index 000000000..ebea84673 Binary files /dev/null and b/preview-fall2024-info/images/perkins.jpg differ diff --git a/preview-fall2024-info/images/peters.jpg b/preview-fall2024-info/images/peters.jpg new file mode 100644 index 000000000..4cca43ed4 Binary files /dev/null and b/preview-fall2024-info/images/peters.jpg differ diff --git a/preview-fall2024-info/images/putty-7.jpeg b/preview-fall2024-info/images/putty-7.jpeg new file mode 100644 index 000000000..69690641c Binary files /dev/null and b/preview-fall2024-info/images/putty-7.jpeg differ diff --git a/preview-fall2024-info/images/resilience-hero-large.jpeg b/preview-fall2024-info/images/resilience-hero-large.jpeg new file mode 100644 index 000000000..394ebebf6 Binary files /dev/null and b/preview-fall2024-info/images/resilience-hero-large.jpeg differ diff --git a/preview-fall2024-info/images/schwartz.jpg b/preview-fall2024-info/images/schwartz.jpg new file mode 100644 index 000000000..d402cfd82 Binary files /dev/null and b/preview-fall2024-info/images/schwartz.jpg differ diff --git a/preview-fall2024-info/images/spencer-ericksen.jpg b/preview-fall2024-info/images/spencer-ericksen.jpg new file mode 100644 index 000000000..c112d7151 Binary files /dev/null and b/preview-fall2024-info/images/spencer-ericksen.jpg differ diff --git a/preview-fall2024-info/images/staging-file-transfer.png b/preview-fall2024-info/images/staging-file-transfer.png new file mode 100644 index 000000000..ef6e7b718 Binary files /dev/null and b/preview-fall2024-info/images/staging-file-transfer.png differ diff --git a/preview-fall2024-info/images/team/AnnikaJohnson.png b/preview-fall2024-info/images/team/AnnikaJohnson.png new file mode 100644 index 000000000..df76baaa1 Binary files /dev/null and b/preview-fall2024-info/images/team/AnnikaJohnson.png differ diff --git a/preview-fall2024-info/images/team/Brian_Aydemir.jpeg b/preview-fall2024-info/images/team/Brian_Aydemir.jpeg new file mode 100644 index 000000000..7cd690dcb Binary files /dev/null and b/preview-fall2024-info/images/team/Brian_Aydemir.jpeg differ diff --git a/preview-fall2024-info/images/team/EvanWooldridge.png b/preview-fall2024-info/images/team/EvanWooldridge.png new file mode 100644 index 000000000..b7a1c8b00 Binary files /dev/null and b/preview-fall2024-info/images/team/EvanWooldridge.png differ diff --git a/preview-fall2024-info/images/team/MichaelCollins.png b/preview-fall2024-info/images/team/MichaelCollins.png new file mode 100644 index 000000000..192afd5cf Binary files /dev/null and b/preview-fall2024-info/images/team/MichaelCollins.png differ diff --git a/preview-fall2024-info/images/team/Rob-Gardner.png b/preview-fall2024-info/images/team/Rob-Gardner.png new file mode 100644 index 000000000..d74a2735d Binary files /dev/null and b/preview-fall2024-info/images/team/Rob-Gardner.png differ diff --git a/preview-fall2024-info/images/team/ThengVang.jpg b/preview-fall2024-info/images/team/ThengVang.jpg new file mode 100644 index 000000000..3f272bb37 Binary files /dev/null and b/preview-fall2024-info/images/team/ThengVang.jpg differ diff --git a/preview-fall2024-info/images/team/aaron-moate.png b/preview-fall2024-info/images/team/aaron-moate.png new file mode 100644 index 000000000..036d9098c Binary files /dev/null and b/preview-fall2024-info/images/team/aaron-moate.png differ diff --git a/preview-fall2024-info/images/team/abhinandan-saha.jpg b/preview-fall2024-info/images/team/abhinandan-saha.jpg new file mode 100644 index 000000000..9a9adc10f Binary files /dev/null and b/preview-fall2024-info/images/team/abhinandan-saha.jpg differ diff --git a/preview-fall2024-info/images/team/alperen-bakirci.jpg b/preview-fall2024-info/images/team/alperen-bakirci.jpg new file mode 100644 index 000000000..a9c1186e3 Binary files /dev/null and b/preview-fall2024-info/images/team/alperen-bakirci.jpg differ diff --git a/preview-fall2024-info/images/team/andrew_owen.jpg b/preview-fall2024-info/images/team/andrew_owen.jpg new file mode 100644 index 000000000..4617647df Binary files /dev/null and b/preview-fall2024-info/images/team/andrew_owen.jpg differ diff --git a/preview-fall2024-info/images/team/brian-bockelman.jpeg b/preview-fall2024-info/images/team/brian-bockelman.jpeg new file mode 100644 index 000000000..0ebb6eb0d Binary files /dev/null and b/preview-fall2024-info/images/team/brian-bockelman.jpeg differ diff --git a/preview-fall2024-info/images/team/brian-lin.jpg b/preview-fall2024-info/images/team/brian-lin.jpg new file mode 100644 index 000000000..8fa6934ec Binary files /dev/null and b/preview-fall2024-info/images/team/brian-lin.jpg differ diff --git a/preview-fall2024-info/images/team/bryna-goeking.jpg b/preview-fall2024-info/images/team/bryna-goeking.jpg new file mode 100644 index 000000000..69f1d7fbb Binary files /dev/null and b/preview-fall2024-info/images/team/bryna-goeking.jpg differ diff --git a/preview-fall2024-info/images/team/cameron_abplanalp.png b/preview-fall2024-info/images/team/cameron_abplanalp.png new file mode 100644 index 000000000..982b1977e Binary files /dev/null and b/preview-fall2024-info/images/team/cameron_abplanalp.png differ diff --git a/preview-fall2024-info/images/team/cannon-lock.jpg b/preview-fall2024-info/images/team/cannon-lock.jpg new file mode 100644 index 000000000..31afa7caf Binary files /dev/null and b/preview-fall2024-info/images/team/cannon-lock.jpg differ diff --git a/preview-fall2024-info/images/team/carl-edquist.jpg b/preview-fall2024-info/images/team/carl-edquist.jpg new file mode 100644 index 000000000..5a2c0375b Binary files /dev/null and b/preview-fall2024-info/images/team/carl-edquist.jpg differ diff --git a/preview-fall2024-info/images/team/carrie-brown.jpg b/preview-fall2024-info/images/team/carrie-brown.jpg new file mode 100644 index 000000000..31095bd89 Binary files /dev/null and b/preview-fall2024-info/images/team/carrie-brown.jpg differ diff --git a/preview-fall2024-info/images/team/christina-koch.jpg b/preview-fall2024-info/images/team/christina-koch.jpg new file mode 100644 index 000000000..455bad094 Binary files /dev/null and b/preview-fall2024-info/images/team/christina-koch.jpg differ diff --git a/preview-fall2024-info/images/team/cole-bollig.jpg b/preview-fall2024-info/images/team/cole-bollig.jpg new file mode 100644 index 000000000..f6c1052ca Binary files /dev/null and b/preview-fall2024-info/images/team/cole-bollig.jpg differ diff --git a/preview-fall2024-info/images/team/derek-weitzel.png b/preview-fall2024-info/images/team/derek-weitzel.png new file mode 100644 index 000000000..e46c6b25f Binary files /dev/null and b/preview-fall2024-info/images/team/derek-weitzel.png differ diff --git a/preview-fall2024-info/images/team/diego-davila.jpg b/preview-fall2024-info/images/team/diego-davila.jpg new file mode 100644 index 000000000..2f27b0317 Binary files /dev/null and b/preview-fall2024-info/images/team/diego-davila.jpg differ diff --git a/preview-fall2024-info/images/team/edgar-fajardo.jpg b/preview-fall2024-info/images/team/edgar-fajardo.jpg new file mode 100644 index 000000000..d6847b48e Binary files /dev/null and b/preview-fall2024-info/images/team/edgar-fajardo.jpg differ diff --git a/preview-fall2024-info/images/team/emelie-fuchs.jpg b/preview-fall2024-info/images/team/emelie-fuchs.jpg new file mode 100644 index 000000000..ec739fda2 Binary files /dev/null and b/preview-fall2024-info/images/team/emelie-fuchs.jpg differ diff --git a/preview-fall2024-info/images/team/emile_turatsinze.jpg b/preview-fall2024-info/images/team/emile_turatsinze.jpg new file mode 100644 index 000000000..207ba1cce Binary files /dev/null and b/preview-fall2024-info/images/team/emile_turatsinze.jpg differ diff --git a/preview-fall2024-info/images/team/emily_yao.jpg b/preview-fall2024-info/images/team/emily_yao.jpg new file mode 100644 index 000000000..da2a80f13 Binary files /dev/null and b/preview-fall2024-info/images/team/emily_yao.jpg differ diff --git a/preview-fall2024-info/images/team/emma_turetsky.jpg b/preview-fall2024-info/images/team/emma_turetsky.jpg new file mode 100644 index 000000000..631ecfce4 Binary files /dev/null and b/preview-fall2024-info/images/team/emma_turetsky.jpg differ diff --git a/preview-fall2024-info/images/team/frank-wuerthwein.jpg b/preview-fall2024-info/images/team/frank-wuerthwein.jpg new file mode 100644 index 000000000..bc5cb071a Binary files /dev/null and b/preview-fall2024-info/images/team/frank-wuerthwein.jpg differ diff --git a/preview-fall2024-info/images/team/greg-thain.jpg b/preview-fall2024-info/images/team/greg-thain.jpg new file mode 100644 index 000000000..10fc4785d Binary files /dev/null and b/preview-fall2024-info/images/team/greg-thain.jpg differ diff --git a/preview-fall2024-info/images/team/hannah-cheren.jpg b/preview-fall2024-info/images/team/hannah-cheren.jpg new file mode 100644 index 000000000..5dd58aed6 Binary files /dev/null and b/preview-fall2024-info/images/team/hannah-cheren.jpg differ diff --git a/preview-fall2024-info/images/team/haoming_meng.jpg b/preview-fall2024-info/images/team/haoming_meng.jpg new file mode 100644 index 000000000..487f5322b Binary files /dev/null and b/preview-fall2024-info/images/team/haoming_meng.jpg differ diff --git a/preview-fall2024-info/images/team/ian-ross.jpeg b/preview-fall2024-info/images/team/ian-ross.jpeg new file mode 100644 index 000000000..3ad2cc1d7 Binary files /dev/null and b/preview-fall2024-info/images/team/ian-ross.jpeg differ diff --git a/preview-fall2024-info/images/team/ian-ross.jpg b/preview-fall2024-info/images/team/ian-ross.jpg new file mode 100644 index 000000000..f5467bb6f Binary files /dev/null and b/preview-fall2024-info/images/team/ian-ross.jpg differ diff --git a/preview-fall2024-info/images/team/igor-sfiligoi.jpg b/preview-fall2024-info/images/team/igor-sfiligoi.jpg new file mode 100644 index 000000000..6c901b2ff Binary files /dev/null and b/preview-fall2024-info/images/team/igor-sfiligoi.jpg differ diff --git a/preview-fall2024-info/images/team/irene-landrum.png b/preview-fall2024-info/images/team/irene-landrum.png new file mode 100644 index 000000000..6bce28d19 Binary files /dev/null and b/preview-fall2024-info/images/team/irene-landrum.png differ diff --git a/preview-fall2024-info/images/team/jack-yuan.jpg b/preview-fall2024-info/images/team/jack-yuan.jpg new file mode 100644 index 000000000..f92fd2153 Binary files /dev/null and b/preview-fall2024-info/images/team/jack-yuan.jpg differ diff --git a/preview-fall2024-info/images/team/jaime-frey.jpg b/preview-fall2024-info/images/team/jaime-frey.jpg new file mode 100644 index 000000000..0c96f694a Binary files /dev/null and b/preview-fall2024-info/images/team/jaime-frey.jpg differ diff --git a/preview-fall2024-info/images/team/janet-stathas.jpg b/preview-fall2024-info/images/team/janet-stathas.jpg new file mode 100644 index 000000000..88b938689 Binary files /dev/null and b/preview-fall2024-info/images/team/janet-stathas.jpg differ diff --git a/preview-fall2024-info/images/team/jason-patton.png b/preview-fall2024-info/images/team/jason-patton.png new file mode 100644 index 000000000..63b5e2471 Binary files /dev/null and b/preview-fall2024-info/images/team/jason-patton.png differ diff --git a/preview-fall2024-info/images/team/jeff-peterson.jpg b/preview-fall2024-info/images/team/jeff-peterson.jpg new file mode 100644 index 000000000..03f212b95 Binary files /dev/null and b/preview-fall2024-info/images/team/jeff-peterson.jpg differ diff --git a/preview-fall2024-info/images/team/jessica-vera.png b/preview-fall2024-info/images/team/jessica-vera.png new file mode 100644 index 000000000..39d2d1702 Binary files /dev/null and b/preview-fall2024-info/images/team/jessica-vera.png differ diff --git a/preview-fall2024-info/images/team/joe-bartowiak.jpg b/preview-fall2024-info/images/team/joe-bartowiak.jpg new file mode 100644 index 000000000..391d81da6 Binary files /dev/null and b/preview-fall2024-info/images/team/joe-bartowiak.jpg differ diff --git a/preview-fall2024-info/images/team/joe_ruess.jpeg b/preview-fall2024-info/images/team/joe_ruess.jpeg new file mode 100644 index 000000000..40e2f80aa Binary files /dev/null and b/preview-fall2024-info/images/team/joe_ruess.jpeg differ diff --git a/preview-fall2024-info/images/team/john-knoeller.jpg b/preview-fall2024-info/images/team/john-knoeller.jpg new file mode 100644 index 000000000..eda38ee91 Binary files /dev/null and b/preview-fall2024-info/images/team/john-knoeller.jpg differ diff --git a/preview-fall2024-info/images/team/john-thiltges.jpg b/preview-fall2024-info/images/team/john-thiltges.jpg new file mode 100644 index 000000000..cae48ccaa Binary files /dev/null and b/preview-fall2024-info/images/team/john-thiltges.jpg differ diff --git a/preview-fall2024-info/images/team/john_parsons.jpeg b/preview-fall2024-info/images/team/john_parsons.jpeg new file mode 100644 index 000000000..27790e88f Binary files /dev/null and b/preview-fall2024-info/images/team/john_parsons.jpeg differ diff --git a/preview-fall2024-info/images/team/josie-watkins.jpeg b/preview-fall2024-info/images/team/josie-watkins.jpeg new file mode 100644 index 000000000..445358741 Binary files /dev/null and b/preview-fall2024-info/images/team/josie-watkins.jpeg differ diff --git a/preview-fall2024-info/images/team/josie-watkins.png b/preview-fall2024-info/images/team/josie-watkins.png new file mode 100644 index 000000000..421c7dd39 Binary files /dev/null and b/preview-fall2024-info/images/team/josie-watkins.png differ diff --git a/preview-fall2024-info/images/team/justin_hiemstra.jpg b/preview-fall2024-info/images/team/justin_hiemstra.jpg new file mode 100644 index 000000000..7a819cdc2 Binary files /dev/null and b/preview-fall2024-info/images/team/justin_hiemstra.jpg differ diff --git a/preview-fall2024-info/images/team/kent_cramer.jpeg b/preview-fall2024-info/images/team/kent_cramer.jpeg new file mode 100644 index 000000000..ac3fd4e9e Binary files /dev/null and b/preview-fall2024-info/images/team/kent_cramer.jpeg differ diff --git a/preview-fall2024-info/images/team/lauren-michael.png b/preview-fall2024-info/images/team/lauren-michael.png new file mode 100644 index 000000000..b4c32932d Binary files /dev/null and b/preview-fall2024-info/images/team/lauren-michael.png differ diff --git a/preview-fall2024-info/images/team/lili_bicoy.jpg b/preview-fall2024-info/images/team/lili_bicoy.jpg new file mode 100644 index 000000000..bc46a64fb Binary files /dev/null and b/preview-fall2024-info/images/team/lili_bicoy.jpg differ diff --git a/preview-fall2024-info/images/team/lincoln-bryant.png b/preview-fall2024-info/images/team/lincoln-bryant.png new file mode 100644 index 000000000..ae4ad0c36 Binary files /dev/null and b/preview-fall2024-info/images/team/lincoln-bryant.png differ diff --git a/preview-fall2024-info/images/team/marian-zvada.jpg b/preview-fall2024-info/images/team/marian-zvada.jpg new file mode 100644 index 000000000..ccafe1051 Binary files /dev/null and b/preview-fall2024-info/images/team/marian-zvada.jpg differ diff --git a/preview-fall2024-info/images/team/mark-coatsworth.jpg b/preview-fall2024-info/images/team/mark-coatsworth.jpg new file mode 100644 index 000000000..4417d820a Binary files /dev/null and b/preview-fall2024-info/images/team/mark-coatsworth.jpg differ diff --git a/preview-fall2024-info/images/team/mark-truttmann.jpg b/preview-fall2024-info/images/team/mark-truttmann.jpg new file mode 100644 index 000000000..196f8a3bd Binary files /dev/null and b/preview-fall2024-info/images/team/mark-truttmann.jpg differ diff --git a/preview-fall2024-info/images/team/mats-rynge.jpg b/preview-fall2024-info/images/team/mats-rynge.jpg new file mode 100644 index 000000000..c2f526df7 Binary files /dev/null and b/preview-fall2024-info/images/team/mats-rynge.jpg differ diff --git a/preview-fall2024-info/images/team/matt_westphall.jpeg b/preview-fall2024-info/images/team/matt_westphall.jpeg new file mode 100644 index 000000000..3857eca52 Binary files /dev/null and b/preview-fall2024-info/images/team/matt_westphall.jpeg differ diff --git a/preview-fall2024-info/images/team/matyas-selmeci.jpg b/preview-fall2024-info/images/team/matyas-selmeci.jpg new file mode 100644 index 000000000..da9e4cba8 Binary files /dev/null and b/preview-fall2024-info/images/team/matyas-selmeci.jpg differ diff --git a/preview-fall2024-info/images/team/max_hartke.jpg b/preview-fall2024-info/images/team/max_hartke.jpg new file mode 100644 index 000000000..d984af3da Binary files /dev/null and b/preview-fall2024-info/images/team/max_hartke.jpg differ diff --git a/preview-fall2024-info/images/team/mihir_manna.jpeg b/preview-fall2024-info/images/team/mihir_manna.jpeg new file mode 100644 index 000000000..5542ed825 Binary files /dev/null and b/preview-fall2024-info/images/team/mihir_manna.jpeg differ diff --git a/preview-fall2024-info/images/team/mike-stanfield.jpg b/preview-fall2024-info/images/team/mike-stanfield.jpg new file mode 100644 index 000000000..bf42e0fa7 Binary files /dev/null and b/preview-fall2024-info/images/team/mike-stanfield.jpg differ diff --git a/preview-fall2024-info/images/team/miron-livny.png b/preview-fall2024-info/images/team/miron-livny.png new file mode 100644 index 000000000..a762f7e22 Binary files /dev/null and b/preview-fall2024-info/images/team/miron-livny.png differ diff --git a/preview-fall2024-info/images/team/pascal_paschos.png b/preview-fall2024-info/images/team/pascal_paschos.png new file mode 100644 index 000000000..845c783d1 Binary files /dev/null and b/preview-fall2024-info/images/team/pascal_paschos.png differ diff --git a/preview-fall2024-info/images/team/patrick_lubben.jpeg b/preview-fall2024-info/images/team/patrick_lubben.jpeg new file mode 100644 index 000000000..b44b47847 Binary files /dev/null and b/preview-fall2024-info/images/team/patrick_lubben.jpeg differ diff --git a/preview-fall2024-info/images/team/rachel-lombardi.jpg b/preview-fall2024-info/images/team/rachel-lombardi.jpg new file mode 100644 index 000000000..1d1231e80 Binary files /dev/null and b/preview-fall2024-info/images/team/rachel-lombardi.jpg differ diff --git a/preview-fall2024-info/images/team/rishideep.jpg b/preview-fall2024-info/images/team/rishideep.jpg new file mode 100644 index 000000000..ba7c415c6 Binary files /dev/null and b/preview-fall2024-info/images/team/rishideep.jpg differ diff --git a/preview-fall2024-info/images/team/ryan-toh.jpeg b/preview-fall2024-info/images/team/ryan-toh.jpeg new file mode 100644 index 000000000..89595d950 Binary files /dev/null and b/preview-fall2024-info/images/team/ryan-toh.jpeg differ diff --git a/preview-fall2024-info/images/team/ryan_jacob.jpg b/preview-fall2024-info/images/team/ryan_jacob.jpg new file mode 100644 index 000000000..2545d5821 Binary files /dev/null and b/preview-fall2024-info/images/team/ryan_jacob.jpg differ diff --git a/preview-fall2024-info/images/team/shawn-mckee.jpg b/preview-fall2024-info/images/team/shawn-mckee.jpg new file mode 100644 index 000000000..6c388489e Binary files /dev/null and b/preview-fall2024-info/images/team/shawn-mckee.jpg differ diff --git a/preview-fall2024-info/images/team/shirley_obih.jpg b/preview-fall2024-info/images/team/shirley_obih.jpg new file mode 100644 index 000000000..1d629fb62 Binary files /dev/null and b/preview-fall2024-info/images/team/shirley_obih.jpg differ diff --git a/preview-fall2024-info/images/team/silhouette.png b/preview-fall2024-info/images/team/silhouette.png new file mode 100644 index 000000000..74fc0220a Binary files /dev/null and b/preview-fall2024-info/images/team/silhouette.png differ diff --git a/preview-fall2024-info/images/team/tae_kidd.jpg b/preview-fall2024-info/images/team/tae_kidd.jpg new file mode 100644 index 000000000..21c6f31b9 Binary files /dev/null and b/preview-fall2024-info/images/team/tae_kidd.jpg differ diff --git a/preview-fall2024-info/images/team/taylor_halvensleben.png b/preview-fall2024-info/images/team/taylor_halvensleben.png new file mode 100644 index 000000000..bb79fd683 Binary files /dev/null and b/preview-fall2024-info/images/team/taylor_halvensleben.png differ diff --git a/preview-fall2024-info/images/team/team_photos/team-1998-orig.png b/preview-fall2024-info/images/team/team_photos/team-1998-orig.png new file mode 100644 index 000000000..2825547ce Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-1998-orig.png differ diff --git a/preview-fall2024-info/images/team/team_photos/team-1998.jpg b/preview-fall2024-info/images/team/team_photos/team-1998.jpg new file mode 100644 index 000000000..1180d9ca2 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-1998.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-1999.jpg b/preview-fall2024-info/images/team/team_photos/team-1999.jpg new file mode 100644 index 000000000..cd5d819e9 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-1999.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2002-10-orig.jpg b/preview-fall2024-info/images/team/team_photos/team-2002-10-orig.jpg new file mode 100644 index 000000000..54efd2bbb Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2002-10-orig.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2002-10.jpg b/preview-fall2024-info/images/team/team_photos/team-2002-10.jpg new file mode 100644 index 000000000..1aed9f4ed Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2002-10.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2003a-orig.jpg b/preview-fall2024-info/images/team/team_photos/team-2003a-orig.jpg new file mode 100644 index 000000000..a31b16003 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2003a-orig.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2003a.jpg b/preview-fall2024-info/images/team/team_photos/team-2003a.jpg new file mode 100644 index 000000000..7b1bb3258 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2003a.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2003b.jpg b/preview-fall2024-info/images/team/team_photos/team-2003b.jpg new file mode 100644 index 000000000..87adbdaee Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2003b.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2006-04.jpg b/preview-fall2024-info/images/team/team_photos/team-2006-04.jpg new file mode 100644 index 000000000..a3bb487ee Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2006-04.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2007-05-orig.jpg b/preview-fall2024-info/images/team/team_photos/team-2007-05-orig.jpg new file mode 100644 index 000000000..58064fac9 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2007-05-orig.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2007-05.jpg b/preview-fall2024-info/images/team/team_photos/team-2007-05.jpg new file mode 100644 index 000000000..c92929709 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2007-05.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2008-05-orig.jpg b/preview-fall2024-info/images/team/team_photos/team-2008-05-orig.jpg new file mode 100644 index 000000000..1eb199848 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2008-05-orig.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2008-05.jpg b/preview-fall2024-info/images/team/team_photos/team-2008-05.jpg new file mode 100644 index 000000000..6e1a94a85 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2008-05.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2009.jpg b/preview-fall2024-info/images/team/team_photos/team-2009.jpg new file mode 100644 index 000000000..9c7b1e929 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2009.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2010.jpg b/preview-fall2024-info/images/team/team_photos/team-2010.jpg new file mode 100644 index 000000000..1c1f413ec Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2010.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2011-orig.jpg b/preview-fall2024-info/images/team/team_photos/team-2011-orig.jpg new file mode 100644 index 000000000..065409b4a Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2011-orig.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2011.jpg b/preview-fall2024-info/images/team/team_photos/team-2011.jpg new file mode 100644 index 000000000..6d39e2bb3 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2011.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2012-orig.jpg b/preview-fall2024-info/images/team/team_photos/team-2012-orig.jpg new file mode 100644 index 000000000..6a1518d30 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2012-orig.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2012.jpg b/preview-fall2024-info/images/team/team_photos/team-2012.jpg new file mode 100644 index 000000000..909cf54a3 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2012.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2013-orig.jpg b/preview-fall2024-info/images/team/team_photos/team-2013-orig.jpg new file mode 100644 index 000000000..2c4609ebf Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2013-orig.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2013.jpg b/preview-fall2024-info/images/team/team_photos/team-2013.jpg new file mode 100644 index 000000000..ec841296b Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2013.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2014-orig.jpg b/preview-fall2024-info/images/team/team_photos/team-2014-orig.jpg new file mode 100644 index 000000000..9b92e5674 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2014-orig.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2014.jpg b/preview-fall2024-info/images/team/team_photos/team-2014.jpg new file mode 100644 index 000000000..f37e99ec2 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2014.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2015-orig.jpg b/preview-fall2024-info/images/team/team_photos/team-2015-orig.jpg new file mode 100644 index 000000000..f1b69ada1 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2015-orig.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2015.jpg b/preview-fall2024-info/images/team/team_photos/team-2015.jpg new file mode 100644 index 000000000..ef9877b80 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2015.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2016.jpg b/preview-fall2024-info/images/team/team_photos/team-2016.jpg new file mode 100644 index 000000000..c204f006f Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2016.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2017.jpg b/preview-fall2024-info/images/team/team_photos/team-2017.jpg new file mode 100644 index 000000000..863da6b2d Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2017.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2018.jpg b/preview-fall2024-info/images/team/team_photos/team-2018.jpg new file mode 100644 index 000000000..c4dd7f1f3 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2018.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2019.jpg b/preview-fall2024-info/images/team/team_photos/team-2019.jpg new file mode 100644 index 000000000..a5f4fe7c4 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2019.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2020.jpg b/preview-fall2024-info/images/team/team_photos/team-2020.jpg new file mode 100644 index 000000000..3afca7448 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2020.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2021.jpg b/preview-fall2024-info/images/team/team_photos/team-2021.jpg new file mode 100644 index 000000000..9a19732bf Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2021.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2022.jpg b/preview-fall2024-info/images/team/team_photos/team-2022.jpg new file mode 100644 index 000000000..bb8777521 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2022.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2023.jpg b/preview-fall2024-info/images/team/team_photos/team-2023.jpg new file mode 100644 index 000000000..074991d5a Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2023.jpg differ diff --git a/preview-fall2024-info/images/team/team_photos/team-2024.jpg b/preview-fall2024-info/images/team/team_photos/team-2024.jpg new file mode 100644 index 000000000..bbd916112 Binary files /dev/null and b/preview-fall2024-info/images/team/team_photos/team-2024.jpg differ diff --git a/preview-fall2024-info/images/team/tim-cartwright.jpg b/preview-fall2024-info/images/team/tim-cartwright.jpg new file mode 100644 index 000000000..5a6ac61b8 Binary files /dev/null and b/preview-fall2024-info/images/team/tim-cartwright.jpg differ diff --git a/preview-fall2024-info/images/team/tim-slauson.jpg b/preview-fall2024-info/images/team/tim-slauson.jpg new file mode 100644 index 000000000..95ca383bd Binary files /dev/null and b/preview-fall2024-info/images/team/tim-slauson.jpg differ diff --git a/preview-fall2024-info/images/team/tim-theisen.png b/preview-fall2024-info/images/team/tim-theisen.png new file mode 100644 index 000000000..854b4d2da Binary files /dev/null and b/preview-fall2024-info/images/team/tim-theisen.png differ diff --git a/preview-fall2024-info/images/team/todd-miller.png b/preview-fall2024-info/images/team/todd-miller.png new file mode 100644 index 000000000..3f41dcc17 Binary files /dev/null and b/preview-fall2024-info/images/team/todd-miller.png differ diff --git a/preview-fall2024-info/images/team/todd-tannenbaum.jpg b/preview-fall2024-info/images/team/todd-tannenbaum.jpg new file mode 100644 index 000000000..795c062dc Binary files /dev/null and b/preview-fall2024-info/images/team/todd-tannenbaum.jpg differ diff --git a/preview-fall2024-info/images/team/william_swanson.jpg b/preview-fall2024-info/images/team/william_swanson.jpg new file mode 100644 index 000000000..dc92bbf33 Binary files /dev/null and b/preview-fall2024-info/images/team/william_swanson.jpg differ diff --git a/preview-fall2024-info/images/team/yuxiao.jpg b/preview-fall2024-info/images/team/yuxiao.jpg new file mode 100644 index 000000000..344e0bf88 Binary files /dev/null and b/preview-fall2024-info/images/team/yuxiao.jpg differ diff --git a/preview-fall2024-info/images/team/zach-miller.jpg b/preview-fall2024-info/images/team/zach-miller.jpg new file mode 100644 index 000000000..ef48b036e Binary files /dev/null and b/preview-fall2024-info/images/team/zach-miller.jpg differ diff --git a/preview-fall2024-info/images/townsend.jpg b/preview-fall2024-info/images/townsend.jpg new file mode 100644 index 000000000..3fe4969e2 Binary files /dev/null and b/preview-fall2024-info/images/townsend.jpg differ diff --git a/preview-fall2024-info/images/twitter.png b/preview-fall2024-info/images/twitter.png new file mode 100644 index 000000000..4a76f4719 Binary files /dev/null and b/preview-fall2024-info/images/twitter.png differ diff --git a/preview-fall2024-info/images/use-transfer-staging.png b/preview-fall2024-info/images/use-transfer-staging.png new file mode 100644 index 000000000..b37d922aa Binary files /dev/null and b/preview-fall2024-info/images/use-transfer-staging.png differ diff --git a/preview-fall2024-info/images/uw-crest.svg b/preview-fall2024-info/images/uw-crest.svg new file mode 100644 index 000000000..80f073363 --- /dev/null +++ b/preview-fall2024-info/images/uw-crest.svg @@ -0,0 +1 @@ +crest \ No newline at end of file diff --git a/preview-fall2024-info/images/uw-sm-red.png b/preview-fall2024-info/images/uw-sm-red.png new file mode 100644 index 000000000..1895498be Binary files /dev/null and b/preview-fall2024-info/images/uw-sm-red.png differ diff --git a/preview-fall2024-info/images/uwlogo_web_sm_fl_wht.png b/preview-fall2024-info/images/uwlogo_web_sm_fl_wht.png new file mode 100644 index 000000000..12e5eae9e Binary files /dev/null and b/preview-fall2024-info/images/uwlogo_web_sm_fl_wht.png differ diff --git a/preview-fall2024-info/images/uwmadison-campus-user-map.png b/preview-fall2024-info/images/uwmadison-campus-user-map.png new file mode 100644 index 000000000..8daab86bc Binary files /dev/null and b/preview-fall2024-info/images/uwmadison-campus-user-map.png differ diff --git a/preview-fall2024-info/images/v_shadow1.gif b/preview-fall2024-info/images/v_shadow1.gif new file mode 100644 index 000000000..609a0a85d Binary files /dev/null and b/preview-fall2024-info/images/v_shadow1.gif differ diff --git a/preview-fall2024-info/images/vanveen.jpg b/preview-fall2024-info/images/vanveen.jpg new file mode 100644 index 000000000..46cca8a58 Binary files /dev/null and b/preview-fall2024-info/images/vanveen.jpg differ diff --git a/preview-fall2024-info/images/vanveen_image001.png b/preview-fall2024-info/images/vanveen_image001.png new file mode 100644 index 000000000..43a3fb22f Binary files /dev/null and b/preview-fall2024-info/images/vanveen_image001.png differ diff --git a/preview-fall2024-info/includes/CHTC-Engagement-Report-2010-2011.pdf b/preview-fall2024-info/includes/CHTC-Engagement-Report-2010-2011.pdf new file mode 100644 index 000000000..a32c18806 Binary files /dev/null and b/preview-fall2024-info/includes/CHTC-Engagement-Report-2010-2011.pdf differ diff --git a/preview-fall2024-info/includes/CHTC-Engagement-Report-2011-2012.pdf b/preview-fall2024-info/includes/CHTC-Engagement-Report-2011-2012.pdf new file mode 100644 index 000000000..8de6e4235 Binary files /dev/null and b/preview-fall2024-info/includes/CHTC-Engagement-Report-2011-2012.pdf differ diff --git a/preview-fall2024-info/includes/CHTC_logo_color_horiz.png b/preview-fall2024-info/includes/CHTC_logo_color_horiz.png new file mode 100644 index 000000000..cd511a4d5 Binary files /dev/null and b/preview-fall2024-info/includes/CHTC_logo_color_horiz.png differ diff --git a/preview-fall2024-info/includes/Gilson_bootstrap_hat_banner647.jpg b/preview-fall2024-info/includes/Gilson_bootstrap_hat_banner647.jpg new file mode 100644 index 000000000..7c4d69e8e Binary files /dev/null and b/preview-fall2024-info/includes/Gilson_bootstrap_hat_banner647.jpg differ diff --git a/preview-fall2024-info/includes/Jobstext.htm b/preview-fall2024-info/includes/Jobstext.htm new file mode 100644 index 000000000..859ae493f --- /dev/null +++ b/preview-fall2024-info/includes/Jobstext.htm @@ -0,0 +1,29 @@ + + + + + + + + + + + +


+

Researcher

+

Research Computing Facilitator PVL #75816

+

Candidate will demonstrate an appreciation for a wide range of compute and data intensive research and be experienced in at least one research area such as life science, computational science, physical science or social science. Candidate must understand how high performance and high throughput computing technologies are used to enable scientific discovery. Candidate must have at least one year of experience running an MPI-based science application in a high performance cluster using a PBS, SLURM or similar scheduler. Understanding user requirements and translating those requirements into functional and dependable solutions, working with scientific programming languages like Matlab or R and troubleshooting skills are required. Basic programming skills using scripting languages like Unix shell, Python or Perl, is also required. Ideal candidates will demonstrate experience applying advanced software tools and computing technologies in a dynamic and diverse research setting. Experience with national scientific computing initiatives such as the OSG Consortium or XSEDE will be a plus.

+

Principal Duties:

+

60% - Help campus researchers leverage state of the art distributed high throughput and high performance computing capabilities to accelerate their data acquisition and analysis. Work with researchers to understand their workflows and facilitate running their stand-alone applications on CHTC and on other national scientific computing resources.

+

30% - Create documents that describe given research problems and propose how to apply computing technics to address those problems. Document case management activities and help transfer documented solutions to research groups on campus for future project applications.

+

10% - Identify ways that our existing research computing middleware and infrastructure can enable or automate scientific discovery and work with the infrastructure and software development teams to address gaps where existing middleware/infrastructure is insufficient.

+


+ + diff --git a/preview-fall2024-info/includes/MATLABandR.html b/preview-fall2024-info/includes/MATLABandR.html new file mode 100755 index 000000000..237848c65 --- /dev/null +++ b/preview-fall2024-info/includes/MATLABandR.html @@ -0,0 +1,5 @@ + + + + + diff --git a/preview-fall2024-info/includes/RandMatlab/InstalledRpms b/preview-fall2024-info/includes/RandMatlab/InstalledRpms new file mode 100644 index 000000000..b1911ddd6 --- /dev/null +++ b/preview-fall2024-info/includes/RandMatlab/InstalledRpms @@ -0,0 +1,975 @@ +acl-2.2.39-6.el5.x86_64 +acpid-1.0.4-9.el5_4.2.x86_64 +alchemist-1.0.36-2.el5.x86_64 +alsa-lib-1.0.17-1.el5.i386 +alsa-lib-1.0.17-1.el5.x86_64 +alsa-lib-devel-1.0.17-1.el5.x86_64 +alsa-utils-1.0.17-1.el5.x86_64 +amtu-1.0.6-2.el5.x86_64 +anacron-2.3-45.el5.x86_64 +antlr-2.7.6-4jpp.2.x86_64 +apr-1.2.7-11.el5_6.5.i386 +apr-1.2.7-11.el5_6.5.x86_64 +apr-util-1.2.7-11.el5_5.2.i386 +apr-util-1.2.7-11.el5_5.2.x86_64 +aspell-0.60.3-7.1.i386 +aspell-0.60.3-7.1.x86_64 +aspell-en-6.0-2.1.x86_64 +at-3.1.8-84.el5.x86_64 +atk-1.12.2-1.fc6.i386 +atk-1.12.2-1.fc6.x86_64 +atk-devel-1.12.2-1.fc6.x86_64 +at-spi-1.7.11-3.el5.x86_64 +attr-2.4.32-1.1.x86_64 +audiofile-0.2.6-5.x86_64 +audiofile-devel-0.2.6-5.x86_64 +audit-1.7.18-2.el5.x86_64 +audit-libs-1.7.18-2.el5.i386 +audit-libs-1.7.18-2.el5.x86_64 +audit-libs-python-1.7.18-2.el5.x86_64 +augeas-0.9.0-1.el5.x86_64 +augeas-libs-0.9.0-1.el5.x86_64 +authconfig-5.3.21-6.el5.x86_64 +authconfig-gtk-5.3.21-6.el5.x86_64 +autoconf-2.59-12.noarch +autofs-5.0.1-0.rc2.143.el5_6.2.x86_64 +automake14-1.4p6-13.el5.1.noarch +automake15-1.5-16.el5.2.noarch +automake16-1.6.3-8.el5.1.noarch +automake17-1.7.9-7.el5.2.noarch +automake-1.9.6-2.3.el5.noarch +avahi-0.6.16-10.el5_6.i386 +avahi-0.6.16-10.el5_6.x86_64 +avahi-glib-0.6.16-10.el5_6.i386 +avahi-glib-0.6.16-10.el5_6.x86_64 +basesystem-8.0-5.1.1.noarch +bash-3.2-32.el5.x86_64 +bc-1.06-21.x86_64 +bind-libs-9.3.6-16.P1.el5.x86_64 +bind-utils-9.3.6-16.P1.el5.x86_64 +binutils-2.17.50.0.6-14.el5.x86_64 +bison-2.3-2.1.x86_64 +bitmap-fonts-0.3-5.1.1.noarch +bitstream-vera-fonts-1.10-7.noarch +bluez-gnome-0.5-5.fc6.x86_64 +bluez-hcidump-1.32-1.x86_64 +bluez-libs-3.7-1.1.x86_64 +bluez-utils-3.7-2.2.x86_64 +boost-1.33.1-10.el5.i386 +boost-1.33.1-10.el5.x86_64 +boost-devel-1.33.1-10.el5.i386 +boost-devel-1.33.1-10.el5.x86_64 +bridge-utils-1.1-2.x86_64 +busybox-1.2.0-7.el5.x86_64 +byacc-1.9-29.2.2.x86_64 +bzip2-1.0.3-6.el5_5.x86_64 +bzip2-devel-1.0.3-6.el5_5.i386 +bzip2-devel-1.0.3-6.el5_5.x86_64 +bzip2-libs-1.0.3-6.el5_5.i386 +bzip2-libs-1.0.3-6.el5_5.x86_64 +cairo-1.2.4-5.el5.i386 +cairo-1.2.4-5.el5.x86_64 +cairo-devel-1.2.4-5.el5.x86_64 +ccid-1.3.8-1.el5.x86_64 +checkpolicy-1.33.1-6.el5.x86_64 +chkconfig-1.3.30.2-2.el5.x86_64 +chkfontpath-1.10.1-1.1.x86_64 +comps-extras-11.1-1.1.noarch +condor-7.7.2-1.x86_64 +conman-0.1.9.2-8.el5.x86_64 +coolkey-1.1.0-15.el5.i386 +coolkey-1.1.0-15.el5.x86_64 +coolkey-devel-1.1.0-15.el5.i386 +coolkey-devel-1.1.0-15.el5.x86_64 +coreutils-5.97-23.el5_6.4.x86_64 +cpio-2.6-23.el5_4.1.x86_64 +cpp-4.1.2-50.el5.x86_64 +cpuspeed-1.2.1-10.el5.x86_64 +cracklib-2.8.9-3.3.i386 +cracklib-2.8.9-3.3.x86_64 +cracklib-dicts-2.8.9-3.3.x86_64 +crash-4.1.2-8.el5.x86_64 +crontabs-1.10-8.noarch +cryptsetup-luks-1.0.3-5.el5.i386 +cryptsetup-luks-1.0.3-5.el5.x86_64 +cscope-15.5-15.1.el5_3.1.x86_64 +ctags-5.6-1.1.x86_64 +cups-libs-1.3.7-26.el5_6.1.i386 +cups-libs-1.3.7-26.el5_6.1.x86_64 +curl-7.15.5-9.el5_6.3.i386 +curl-7.15.5-9.el5_6.3.x86_64 +curl-devel-7.15.5-9.el5_6.3.i386 +curl-devel-7.15.5-9.el5_6.3.x86_64 +cvs-1.11.22-7.el5.x86_64 +cyrus-sasl-2.1.22-5.el5_4.3.x86_64 +cyrus-sasl-devel-2.1.22-5.el5_4.3.i386 +cyrus-sasl-devel-2.1.22-5.el5_4.3.x86_64 +cyrus-sasl-lib-2.1.22-5.el5_4.3.i386 +cyrus-sasl-lib-2.1.22-5.el5_4.3.x86_64 +cyrus-sasl-md5-2.1.22-5.el5_4.3.x86_64 +cyrus-sasl-plain-2.1.22-5.el5_4.3.i386 +cyrus-sasl-plain-2.1.22-5.el5_4.3.x86_64 +db4-4.3.29-10.el5_5.2.i386 +db4-4.3.29-10.el5_5.2.x86_64 +db4-devel-4.3.29-10.el5_5.2.i386 +db4-devel-4.3.29-10.el5_5.2.x86_64 +dbus-1.1.2-16.el5_7.x86_64 +dbus-devel-1.1.2-16.el5_7.i386 +dbus-devel-1.1.2-16.el5_7.x86_64 +dbus-glib-0.73-10.el5_5.i386 +dbus-glib-0.73-10.el5_5.x86_64 +dbus-glib-devel-0.73-10.el5_5.x86_64 +dbus-libs-1.1.2-16.el5_7.i386 +dbus-libs-1.1.2-16.el5_7.x86_64 +dbus-python-0.70-9.el5_4.x86_64 +Deployment_Guide-en-US-5.2-11.noarch +desktop-file-utils-0.10-7.x86_64 +dev86-0.16.17-2.2.x86_64 +device-mapper-1.02.55-2.el5.i386 +device-mapper-1.02.55-2.el5.x86_64 +device-mapper-event-1.02.55-2.el5.x86_64 +device-mapper-multipath-0.4.7-42.el5_6.2.x86_64 +dhclient-3.0.5-29.el5_7.1.x86_64 +dhcpv6-client-1.0.10-20.el5.x86_64 +diffstat-1.41-1.2.3.el5.x86_64 +diffutils-2.8.1-15.2.3.el5.x86_64 +dmidecode-2.10-3.el5.x86_64 +dmraid-1.0.0.rc13-63.el5.x86_64 +dmraid-events-1.0.0.rc13-63.el5.x86_64 +dnsmasq-2.45-1.1.el5_3.x86_64 +docbook-dtds-1.0-30.1.noarch +dogtail-0.6.1-4.el5.noarch +dos2unix-3.1-27.2.el5.x86_64 +dosfstools-2.11-9.el5.x86_64 +doxygen-1.4.7-1.1.x86_64 +dump-0.4b41-5.el5.x86_64 +e2fsprogs-1.39-23.el5_5.1.x86_64 +e2fsprogs-devel-1.39-23.el5_5.1.x86_64 +e2fsprogs-libs-1.39-23.el5_5.1.i386 +e2fsprogs-libs-1.39-23.el5_5.1.x86_64 +e4fsprogs-1.41.12-2.el5.x86_64 +e4fsprogs-libs-1.41.12-2.el5.i386 +e4fsprogs-libs-1.41.12-2.el5.x86_64 +ebtables-2.0.9-5.el5.x86_64 +ed-0.2-39.el5_2.x86_64 +eject-2.1.5-4.2.el5.x86_64 +elfutils-0.137-3.el5.x86_64 +elfutils-libelf-0.137-3.el5.i386 +elfutils-libelf-0.137-3.el5.x86_64 +elfutils-libelf-devel-0.137-3.el5.x86_64 +elfutils-libelf-devel-static-0.137-3.el5.x86_64 +elfutils-libs-0.137-3.el5.x86_64 +elinks-0.11.1-6.el5_4.1.x86_64 +epel-release-5-4.noarch +esound-0.2.36-3.x86_64 +esound-devel-0.2.36-3.x86_64 +ethtool-6-4.el5.x86_64 +expat-1.95.8-8.3.el5_5.3.i386 +expat-1.95.8-8.3.el5_5.3.x86_64 +expat-devel-1.95.8-8.3.el5_5.3.i386 +expat-devel-1.95.8-8.3.el5_5.3.x86_64 +facter-1.6.0-2.el5.noarch +fbset-2.1-22.x86_64 +file-4.17-15.el5_3.1.x86_64 +filesystem-2.4.0-3.el5.x86_64 +findutils-4.2.27-6.el5.x86_64 +finger-0.17-33.x86_64 +fipscheck-1.2.0-1.el5.x86_64 +fipscheck-lib-1.2.0-1.el5.x86_64 +firstboot-1.4.27.8-1.el5.x86_64 +firstboot-tui-1.4.27.8-1.el5.x86_64 +flex-2.5.4a-41.fc6.x86_64 +fontconfig-2.4.1-7.el5.i386 +fontconfig-2.4.1-7.el5.x86_64 +fontconfig-devel-2.4.1-7.el5.x86_64 +fping-2.4b2-7.el5.x86_64 +freetype-2.2.1-28.el5_5.1.i386 +freetype-2.2.1-28.el5_5.1.x86_64 +freetype-devel-2.2.1-28.el5_5.1.x86_64 +ftp-0.17-35.el5.x86_64 +gail-1.9.2-3.el5.x86_64 +gamin-0.1.7-8.el5.i386 +gamin-0.1.7-8.el5.x86_64 +gamin-python-0.1.7-8.el5.x86_64 +ganglia-3.0.7-1.el5.x86_64 +ganglia-gmond-3.1.7-1.x86_64 +gawk-3.1.5-14.el5.x86_64 +gcc-4.1.2-50.el5.x86_64 +gcc44-4.4.4-13.el5.x86_64 +gcc44-c++-4.4.4-13.el5.x86_64 +gcc44-gfortran-4.4.4-13.el5.x86_64 +gcc-c++-4.1.2-50.el5.x86_64 +gcc-gfortran-4.1.2-50.el5.x86_64 +GConf2-2.14.0-9.el5.i386 +GConf2-2.14.0-9.el5.x86_64 +GConf2-devel-2.14.0-9.el5.x86_64 +gd-2.0.33-9.4.el5_4.2.x86_64 +gdb-7.0.1-32.el5_6.2.x86_64 +gdbm-1.8.0-26.2.1.el5_6.1.i386 +gdbm-1.8.0-26.2.1.el5_6.1.x86_64 +gdbm-devel-1.8.0-26.2.1.el5_6.1.i386 +gdbm-devel-1.8.0-26.2.1.el5_6.1.x86_64 +gettext-0.17-1.el5.i386 +gettext-0.17-1.el5.x86_64 +ghostscript-8.70-6.el5.i386 +ghostscript-8.70-6.el5.x86_64 +ghostscript-fonts-5.50-13.1.1.noarch +gjdoc-0.7.7-12.el5.x86_64 +glib2-2.12.3-4.el5_3.1.i386 +glib2-2.12.3-4.el5_3.1.x86_64 +glib2-devel-2.12.3-4.el5_3.1.x86_64 +glibc-2.5-58.el5_6.3.i686 +glibc-2.5-58.el5_6.3.x86_64 +glibc-common-2.5-58.el5_6.3.x86_64 +glibc-devel-2.5-58.el5_6.3.i386 +glibc-devel-2.5-58.el5_6.3.x86_64 +glibc-headers-2.5-58.el5_6.3.x86_64 +gmp-4.1.4-10.el5.i386 +gmp-4.1.4-10.el5.x86_64 +gmp-devel-4.1.4-10.el5.i386 +gmp-devel-4.1.4-10.el5.x86_64 +gnome-doc-utils-0.8.0-2.fc6.noarch +gnome-keyring-0.6.0-1.fc6.x86_64 +gnome-keyring-devel-0.6.0-1.fc6.x86_64 +gnome-mime-data-2.4.2-3.1.x86_64 +gnome-mount-0.5-3.el5.x86_64 +gnome-python2-2.16.0-1.fc6.x86_64 +gnome-python2-bonobo-2.16.0-1.fc6.x86_64 +gnome-python2-canvas-2.16.0-1.fc6.x86_64 +gnome-python2-extras-2.14.2-7.el5.x86_64 +gnome-python2-gconf-2.16.0-1.fc6.x86_64 +gnome-python2-gnomevfs-2.16.0-1.fc6.x86_64 +gnome-python2-gtkhtml2-2.14.2-7.el5.x86_64 +gnome-vfs2-2.16.2-8.el5.i386 +gnome-vfs2-2.16.2-8.el5.x86_64 +gnome-vfs2-devel-2.16.2-8.el5.x86_64 +gnupg-1.4.5-14.el5_5.1.x86_64 +gnuplot42-4.2.6-5.el5.x86_64 +gnutls-1.4.1-3.el5_4.8.i386 +gnutls-1.4.1-3.el5_4.8.x86_64 +gpg-pubkey-217521f6-45e8a532.(none) +gpm-1.20.1-74.1.i386 +gpm-1.20.1-74.1.x86_64 +gpm-devel-1.20.1-74.1.i386 +gpm-devel-1.20.1-74.1.x86_64 +grep-2.5.1-55.el5.x86_64 +groff-1.18.1.1-11.1.x86_64 +grub-0.97-13.5.x86_64 +gtk2-2.10.4-21.el5_5.6.i386 +gtk2-2.10.4-21.el5_5.6.x86_64 +gtk2-devel-2.10.4-21.el5_5.6.x86_64 +gtk2-engines-2.8.0-3.el5.x86_64 +gtkhtml2-2.11.0-3.x86_64 +gzip-1.3.5-11.el5_4.1.x86_64 +hal-0.5.8.1-62.el5.i386 +hal-0.5.8.1-62.el5.x86_64 +hal-devel-0.5.8.1-62.el5.x86_64 +hdparm-6.6-2.x86_64 +hesiod-3.1.0-8.i386 +hesiod-3.1.0-8.x86_64 +hesiod-devel-3.1.0-8.i386 +hesiod-devel-3.1.0-8.x86_64 +hicolor-icon-theme-0.9-2.1.noarch +hmaccalc-0.9.6-3.el5.x86_64 +htmlview-4.0.0-2.el5.noarch +httpd-2.2.3-53.sl5.1.x86_64 +hwdata-0.213.22-1.el5.noarch +ifd-egate-0.05-15.x86_64 +ImageMagick-6.2.8.0-4.el5_5.3.i386 +ImageMagick-6.2.8.0-4.el5_5.3.x86_64 +imake-1.0.2-3.x86_64 +indent-2.2.9-14.fc6.x86_64 +info-4.8-14.el5.x86_64 +initscripts-8.45.33-1.el5.x86_64 +iproute-2.6.18-11.el5.x86_64 +ipsec-tools-0.6.5-14.el5_5.5.x86_64 +iptables-1.3.5-5.3.el5_4.1.x86_64 +iptables-ipv6-1.3.5-5.3.el5_4.1.x86_64 +iptstate-1.4-2.el5.x86_64 +iputils-20020927-46.el5.x86_64 +ipw2100-firmware-1.3-5.noarch +ipw2200-firmware-3.1-1.noarch +irda-utils-0.9.17-2.fc6.x86_64 +irqbalance-0.55-15.el5.x86_64 +iscsi-initiator-utils-6.2.0.872-6.el5.x86_64 +isdn4k-utils-3.2-56.el5.x86_64 +iwlwifi-1000-ucode-128.50.3.1-1.el5.noarch +iwlwifi-3945-ucode-15.32.2.9-1.el5.noarch +iwlwifi-4965-ucode-228.61.2.24-8.el5.noarch +iwlwifi-5000-ucode-8.24.2.12-1.el5.noarch +iwlwifi-5150-ucode-8.24.2.2-1.el5.noarch +iwlwifi-6000-ucode-9.193.4.1-1.el5.noarch +java-1.4.2-gcj-compat-1.4.2.0-40jpp.115.x86_64 +jpackage-utils-1.7.3-1jpp.2.el5.noarch +jwhois-3.2.3-12.el5.x86_64 +kbd-1.12-21.el5.x86_64 +kernel-2.6.18-238.9.1.el5.x86_64 +kernel-2.6.18-274.3.1.el5.x86_64 +kernel-devel-2.6.18-238.9.1.el5.x86_64 +kernel-devel-2.6.18-274.3.1.el5.x86_64 +kernel-headers-2.6.18-274.3.1.el5.x86_64 +kexec-tools-1.102pre-126.el5_6.6.x86_64 +keyutils-libs-1.2-1.el5.i386 +keyutils-libs-1.2-1.el5.x86_64 +keyutils-libs-devel-1.2-1.el5.x86_64 +kpartx-0.4.7-42.el5_6.2.x86_64 +krb5-devel-1.6.1-55.el5_6.1.i386 +krb5-devel-1.6.1-55.el5_6.1.x86_64 +krb5-libs-1.6.1-55.el5_6.1.i386 +krb5-libs-1.6.1-55.el5_6.1.x86_64 +krb5-workstation-1.6.1-55.el5_6.1.x86_64 +ksh-20100202-1.el5_6.4.x86_64 +kudzu-1.2.57.1.26-1.x86_64 +kudzu-devel-1.2.57.1.26-1.i386 +kudzu-devel-1.2.57.1.26-1.x86_64 +lcms-1.18-0.1.beta1.el5_3.2.i386 +lcms-1.18-0.1.beta1.el5_3.2.x86_64 +less-436-7.el5.x86_64 +lftp-3.7.11-4.el5_5.3.x86_64 +libacl-2.2.39-6.el5.i386 +libacl-2.2.39-6.el5.x86_64 +libacl-devel-2.2.39-6.el5.i386 +libacl-devel-2.2.39-6.el5.x86_64 +libaio-0.3.106-5.i386 +libaio-0.3.106-5.x86_64 +libart_lgpl-2.3.17-4.x86_64 +libart_lgpl-devel-2.3.17-4.x86_64 +libattr-2.4.32-1.1.i386 +libattr-2.4.32-1.1.x86_64 +libattr-devel-2.4.32-1.1.i386 +libattr-devel-2.4.32-1.1.x86_64 +libbonobo-2.16.0-1.1.el5_5.1.i386 +libbonobo-2.16.0-1.1.el5_5.1.x86_64 +libbonobo-devel-2.16.0-1.1.el5_5.1.x86_64 +libbonoboui-2.16.0-1.fc6.x86_64 +libbonoboui-devel-2.16.0-1.fc6.x86_64 +libcap-1.10-26.i386 +libcap-1.10-26.x86_64 +libcap-devel-1.10-26.i386 +libcap-devel-1.10-26.x86_64 +libconfuse-2.5-4.el5.x86_64 +libcroco-0.6.1-2.1.i386 +libcroco-0.6.1-2.1.x86_64 +libdaemon-0.10-5.el5.i386 +libdaemon-0.10-5.el5.x86_64 +libdmx-1.0.2-3.1.x86_64 +libdrm-2.0.2-1.1.x86_64 +libevent-1.4.13-1.x86_64 +libfontenc-1.0.2-2.2.el5.x86_64 +libFS-1.0.0-3.1.x86_64 +libganglia-3_1_0-3.1.7-1.x86_64 +libgcc-4.1.2-50.el5.i386 +libgcc-4.1.2-50.el5.x86_64 +libgcj-4.1.2-50.el5.i386 +libgcj-4.1.2-50.el5.x86_64 +libgcrypt-1.4.4-5.el5.i386 +libgcrypt-1.4.4-5.el5.x86_64 +libgcrypt-devel-1.4.4-5.el5.x86_64 +libgfortran-4.1.2-50.el5.x86_64 +libgfortran44-4.4.4-13.el5.x86_64 +libglade2-2.6.0-2.x86_64 +libglade2-devel-2.6.0-2.x86_64 +libgnome-2.16.0-6.el5.x86_64 +libgnomecanvas-2.14.0-4.1.x86_64 +libgnomecanvas-devel-2.14.0-4.1.x86_64 +libgnome-devel-2.16.0-6.el5.x86_64 +libgnomeui-2.16.0-5.el5.x86_64 +libgnomeui-devel-2.16.0-5.el5.x86_64 +libgomp-4.4.4-13.el5.i386 +libgomp-4.4.4-13.el5.x86_64 +libgpg-error-1.4-2.i386 +libgpg-error-1.4-2.x86_64 +libgpg-error-devel-1.4-2.x86_64 +libgsf-1.14.1-6.1.i386 +libgsf-1.14.1-6.1.x86_64 +libgssapi-0.10-2.x86_64 +libhugetlbfs-1.3-8.2.el5.i386 +libhugetlbfs-1.3-8.2.el5.x86_64 +libICE-1.0.1-2.1.i386 +libICE-1.0.1-2.1.x86_64 +libICE-devel-1.0.1-2.1.x86_64 +libicu-3.6-5.16.i386 +libicu-3.6-5.16.x86_64 +libIDL-0.8.7-1.fc6.i386 +libIDL-0.8.7-1.fc6.x86_64 +libIDL-devel-0.8.7-1.fc6.x86_64 +libidn-0.6.5-1.1.i386 +libidn-0.6.5-1.1.x86_64 +libidn-devel-0.6.5-1.1.x86_64 +libjpeg-6b-37.i386 +libjpeg-6b-37.x86_64 +libjpeg-devel-6b-37.x86_64 +libnotify-0.4.2-6.el5.x86_64 +libnotify-devel-0.4.2-6.el5.x86_64 +libogg-1.1.3-3.el5.i386 +libogg-1.1.3-3.el5.x86_64 +libogg-devel-1.1.3-3.el5.i386 +libogg-devel-1.1.3-3.el5.x86_64 +libpcap-0.9.4-15.el5.x86_64 +libpng-1.2.10-7.1.el5_7.5.i386 +libpng-1.2.10-7.1.el5_7.5.x86_64 +libpng-devel-1.2.10-7.1.el5_7.5.x86_64 +librsvg2-2.16.1-1.el5.i386 +librsvg2-2.16.1-1.el5.x86_64 +libselinux-1.33.4-5.7.el5.i386 +libselinux-1.33.4-5.7.el5.x86_64 +libselinux-devel-1.33.4-5.7.el5.i386 +libselinux-devel-1.33.4-5.7.el5.x86_64 +libselinux-python-1.33.4-5.7.el5.x86_64 +libselinux-ruby-1.33.4-5.7.el5.x86_64 +libselinux-utils-1.33.4-5.7.el5.x86_64 +libsemanage-1.9.1-4.4.el5.x86_64 +libsepol-1.15.2-3.el5.i386 +libsepol-1.15.2-3.el5.x86_64 +libsepol-devel-1.15.2-3.el5.x86_64 +libSM-1.0.1-3.1.i386 +libSM-1.0.1-3.1.x86_64 +libsmbclient-3.0.33-3.29.el5_7.4.x86_64 +libSM-devel-1.0.1-3.1.x86_64 +libstdc++-4.1.2-50.el5.i386 +libstdc++-4.1.2-50.el5.x86_64 +libstdc++44-devel-4.4.4-13.el5.x86_64 +libstdc++-devel-4.1.2-50.el5.x86_64 +libsysfs-2.0.0-6.x86_64 +libtermcap-2.0.8-46.1.i386 +libtermcap-2.0.8-46.1.x86_64 +libtermcap-devel-2.0.8-46.1.i386 +libtermcap-devel-2.0.8-46.1.x86_64 +libtiff-3.8.2-7.el5_6.7.i386 +libtiff-3.8.2-7.el5_6.7.x86_64 +libtool-1.5.22-7.el5_4.x86_64 +libusb-0.1.12-5.1.i386 +libusb-0.1.12-5.1.x86_64 +libusb-devel-0.1.12-5.1.i386 +libusb-devel-0.1.12-5.1.x86_64 +libuser-0.54.7-2.1.el5_5.2.i386 +libuser-0.54.7-2.1.el5_5.2.x86_64 +libuser-devel-0.54.7-2.1.el5_5.2.i386 +libuser-devel-0.54.7-2.1.el5_5.2.x86_64 +libutempter-1.1.4-4.el5.i386 +libutempter-1.1.4-4.el5.x86_64 +libvirt-0.8.2-22.el5.i386 +libvirt-0.8.2-22.el5.x86_64 +libvolume_id-095-14.24.el5.i386 +libvolume_id-095-14.24.el5.x86_64 +libvorbis-1.1.2-3.el5_4.4.i386 +libvorbis-1.1.2-3.el5_4.4.x86_64 +libvorbis-devel-1.1.2-3.el5_4.4.i386 +libvorbis-devel-1.1.2-3.el5_4.4.x86_64 +libwmf-0.2.8.4-10.2.i386 +libwmf-0.2.8.4-10.2.x86_64 +libwnck-2.16.0-4.fc6.x86_64 +libwvstreams-4.2.2-2.1.x86_64 +libX11-1.0.3-11.el5.i386 +libX11-1.0.3-11.el5.x86_64 +libX11-devel-1.0.3-11.el5.x86_64 +libXau-1.0.1-3.1.i386 +libXau-1.0.1-3.1.x86_64 +libXau-devel-1.0.1-3.1.x86_64 +libXaw-1.0.2-8.1.x86_64 +libXcursor-1.1.7-1.1.i386 +libXcursor-1.1.7-1.1.x86_64 +libXcursor-devel-1.1.7-1.1.x86_64 +libXdmcp-1.0.1-2.1.i386 +libXdmcp-1.0.1-2.1.x86_64 +libXdmcp-devel-1.0.1-2.1.x86_64 +libXevie-1.0.1-3.1.x86_64 +libXext-1.0.1-2.1.i386 +libXext-1.0.1-2.1.x86_64 +libXext-devel-1.0.1-2.1.x86_64 +libXfixes-4.0.1-2.1.i386 +libXfixes-4.0.1-2.1.x86_64 +libXfixes-devel-4.0.1-2.1.x86_64 +libXfont-1.2.2-1.0.4.el5_7.x86_64 +libXfontcache-1.0.2-3.1.x86_64 +libXft-2.1.10-1.1.i386 +libXft-2.1.10-1.1.x86_64 +libXft-devel-2.1.10-1.1.x86_64 +libXi-1.0.1-4.el5_4.i386 +libXi-1.0.1-4.el5_4.x86_64 +libXi-devel-1.0.1-4.el5_4.x86_64 +libXinerama-1.0.1-2.1.i386 +libXinerama-1.0.1-2.1.x86_64 +libXinerama-devel-1.0.1-2.1.x86_64 +libxkbfile-1.0.3-3.1.x86_64 +libxml2-2.6.26-2.1.2.8.el5_5.1.i386 +libxml2-2.6.26-2.1.2.8.el5_5.1.x86_64 +libxml2-devel-2.6.26-2.1.2.8.el5_5.1.i386 +libxml2-devel-2.6.26-2.1.2.8.el5_5.1.x86_64 +libxml2-python-2.6.26-2.1.2.8.el5_5.1.x86_64 +libXmu-1.0.2-5.x86_64 +libXp-1.0.0-8.1.el5.x86_64 +libXpm-3.5.5-3.x86_64 +libXrandr-1.1.1-3.3.i386 +libXrandr-1.1.1-3.3.x86_64 +libXrandr-devel-1.1.1-3.3.x86_64 +libXrender-0.9.1-3.1.i386 +libXrender-0.9.1-3.1.x86_64 +libXrender-devel-0.9.1-3.1.x86_64 +libXres-1.0.1-3.1.x86_64 +libxslt-1.1.17-2.el5_2.2.i386 +libxslt-1.1.17-2.el5_2.2.x86_64 +libxslt-devel-1.1.17-2.el5_2.2.x86_64 +libxslt-python-1.1.17-2.el5_2.2.x86_64 +libXt-1.0.2-3.2.el5.i386 +libXt-1.0.2-3.2.el5.x86_64 +libXt-devel-1.0.2-3.2.el5.x86_64 +libXTrap-1.0.0-3.1.x86_64 +libXtst-1.0.1-3.1.i386 +libXtst-1.0.1-3.1.x86_64 +libXv-1.0.1-4.1.x86_64 +libXxf86dga-1.0.1-3.1.x86_64 +libXxf86misc-1.0.1-3.1.x86_64 +libXxf86vm-1.0.1-3.1.x86_64 +lm_sensors-2.10.7-9.el5.x86_64 +lockdev-1.0.1-10.i386 +lockdev-1.0.1-10.x86_64 +lockdev-devel-1.0.1-10.i386 +lockdev-devel-1.0.1-10.x86_64 +log4cpp-1.0-9.el5.i386 +log4cpp-1.0-9.el5.x86_64 +logrotate-3.7.4-9.el5_5.2.x86_64 +logwatch-7.3-9.el5_6.noarch +lrzsz-0.12.20-22.1.x86_64 +lsof-4.78-3.x86_64 +ltrace-0.5-13.45svn.el5.x86_64 +lvm2-2.02.74-5.el5_6.1.x86_64 +m2crypto-0.16-7.el5.x86_64 +m4-1.4.5-3.el5.1.x86_64 +mailcap-2.1.23-1.fc6.noarch +mailx-8.1.1-44.2.2.x86_64 +make-3.81-3.el5.x86_64 +MAKEDEV-3.23-1.2.x86_64 +man-1.6d-1.1.x86_64 +man-pages-2.39-17.el5.noarch +mcelog-0.9pre-1.30.el5.x86_64 +mcstrans-0.2.11-3.el5.x86_64 +mdadm-2.6.9-3.el5.x86_64 +mesa-libGL-6.5.1-7.8.el5.x86_64 +mesa-libGL-devel-6.5.1-7.8.el5.x86_64 +metacity-2.16.0-16.el5.x86_64 +mgetty-1.1.33-9.fc6.x86_64 +microcode_ctl-1.17-1.52.el5.x86_64 +mingetty-1.07-5.2.2.x86_64 +minicom-2.1-3.x86_64 +mkbootdisk-1.5.3-2.1.x86_64 +mkinitrd-5.1.19.6-68.el5_6.1.i386 +mkinitrd-5.1.19.6-68.el5_6.1.x86_64 +mktemp-1.5-23.2.2.x86_64 +mlocate-0.15-1.el5.2.x86_64 +module-init-tools-3.3-0.pre3.1.60.el5_5.1.x86_64 +mozldap-6.0.5-1.el5.x86_64 +mtools-3.9.10-2.fc6.x86_64 +mtr-0.71-3.1.x86_64 +mysql-5.0.77-4.el5_6.6.x86_64 +nagios-common-2.12-10.el5.x86_64 +nagios-plugins-1.4.15-2.el5.x86_64 +nagios-plugins-all-1.4.15-2.el5.x86_64 +nagios-plugins-breeze-1.4.15-2.el5.x86_64 +nagios-plugins-by_ssh-1.4.15-2.el5.x86_64 +nagios-plugins-cluster-1.4.15-2.el5.x86_64 +nagios-plugins-dhcp-1.4.15-2.el5.x86_64 +nagios-plugins-dig-1.4.15-2.el5.x86_64 +nagios-plugins-disk-1.4.15-2.el5.x86_64 +nagios-plugins-disk_smb-1.4.15-2.el5.x86_64 +nagios-plugins-dns-1.4.15-2.el5.x86_64 +nagios-plugins-dummy-1.4.15-2.el5.x86_64 +nagios-plugins-file_age-1.4.15-2.el5.x86_64 +nagios-plugins-flexlm-1.4.15-2.el5.x86_64 +nagios-plugins-fping-1.4.15-2.el5.x86_64 +nagios-plugins-game-1.4.15-2.el5.x86_64 +nagios-plugins-hpjd-1.4.15-2.el5.x86_64 +nagios-plugins-http-1.4.15-2.el5.x86_64 +nagios-plugins-icmp-1.4.15-2.el5.x86_64 +nagios-plugins-ide_smart-1.4.15-2.el5.x86_64 +nagios-plugins-ircd-1.4.15-2.el5.x86_64 +nagios-plugins-ldap-1.4.15-2.el5.x86_64 +nagios-plugins-linux_raid-1.4.15-2.el5.x86_64 +nagios-plugins-load-1.4.15-2.el5.x86_64 +nagios-plugins-log-1.4.15-2.el5.x86_64 +nagios-plugins-mailq-1.4.15-2.el5.x86_64 +nagios-plugins-mrtg-1.4.15-2.el5.x86_64 +nagios-plugins-mrtgtraf-1.4.15-2.el5.x86_64 +nagios-plugins-mysql-1.4.15-2.el5.x86_64 +nagios-plugins-nagios-1.4.15-2.el5.x86_64 +nagios-plugins-nt-1.4.15-2.el5.x86_64 +nagios-plugins-ntp-1.4.15-2.el5.x86_64 +nagios-plugins-nwstat-1.4.15-2.el5.x86_64 +nagios-plugins-oracle-1.4.15-2.el5.x86_64 +nagios-plugins-overcr-1.4.15-2.el5.x86_64 +nagios-plugins-perl-1.4.15-2.el5.x86_64 +nagios-plugins-pgsql-1.4.15-2.el5.x86_64 +nagios-plugins-ping-1.4.15-2.el5.x86_64 +nagios-plugins-procs-1.4.15-2.el5.x86_64 +nagios-plugins-real-1.4.15-2.el5.x86_64 +nagios-plugins-rpc-1.4.15-2.el5.x86_64 +nagios-plugins-sensors-1.4.15-2.el5.x86_64 +nagios-plugins-smtp-1.4.15-2.el5.x86_64 +nagios-plugins-snmp-1.4.15-2.el5.x86_64 +nagios-plugins-ssh-1.4.15-2.el5.x86_64 +nagios-plugins-swap-1.4.15-2.el5.x86_64 +nagios-plugins-tcp-1.4.15-2.el5.x86_64 +nagios-plugins-time-1.4.15-2.el5.x86_64 +nagios-plugins-ups-1.4.15-2.el5.x86_64 +nagios-plugins-users-1.4.15-2.el5.x86_64 +nagios-plugins-wave-1.4.15-2.el5.x86_64 +nano-1.3.12-1.1.x86_64 +nash-5.1.19.6-68.el5_6.1.x86_64 +nc-1.84-10.fc6.x86_64 +ncurses-5.5-24.20060715.i386 +ncurses-5.5-24.20060715.x86_64 +ncurses-devel-5.5-24.20060715.i386 +ncurses-devel-5.5-24.20060715.x86_64 +nedit-5.5-21.el5.x86_64 +neon-0.25.5-10.el5_4.1.i386 +neon-0.25.5-10.el5_4.1.x86_64 +net-snmp-5.3.2.2-9.el5_5.1.x86_64 +net-snmp-libs-5.3.2.2-9.el5_5.1.i386 +net-snmp-libs-5.3.2.2-9.el5_5.1.x86_64 +net-snmp-utils-5.3.2.2-9.el5_5.1.x86_64 +net-tools-1.60-81.el5.x86_64 +NetworkManager-0.7.0-10.el5_5.2.i386 +NetworkManager-0.7.0-10.el5_5.2.x86_64 +NetworkManager-glib-0.7.0-10.el5_5.2.i386 +NetworkManager-glib-0.7.0-10.el5_5.2.x86_64 +newt-0.52.2-15.el5.i386 +newt-0.52.2-15.el5.x86_64 +newt-devel-0.52.2-15.el5.i386 +newt-devel-0.52.2-15.el5.x86_64 +newt-perl-1.08-9.2.2.x86_64 +nfs-utils-1.0.9-50.el5.x86_64 +nfs-utils-lib-1.0.8-7.6.el5.x86_64 +nmap-4.11-1.1.x86_64 +notification-daemon-0.3.5-9.el5.x86_64 +notify-python-0.1.0-3.fc6.x86_64 +nrpe-2.12-16.el5.x86_64 +nscd-2.5-58.el5_6.3.x86_64 +nspr-4.8.8-1.el5_7.i386 +nspr-4.8.8-1.el5_7.x86_64 +nspr-devel-4.8.8-1.el5_7.x86_64 +nss-3.12.10-4.el5_7.i386 +nss-3.12.10-4.el5_7.x86_64 +nss_db-2.2-35.4.el5_5.i386 +nss_db-2.2-35.4.el5_5.x86_64 +nss-devel-3.12.10-4.el5_7.x86_64 +nss_ldap-253-37.el5.i386 +nss_ldap-253-37.el5.x86_64 +nss-tools-3.12.10-4.el5_7.x86_64 +ntp-4.2.2p1-9.el5_4.1.x86_64 +ntsysv-1.3.30.2-2.el5.x86_64 +numactl-0.9.8-11.el5.i386 +numactl-0.9.8-11.el5.x86_64 +oddjob-0.27-11.el5.x86_64 +oddjob-libs-0.27-11.el5.x86_64 +OpenIPMI-2.0.16-11.el5.x86_64 +OpenIPMI-libs-2.0.16-11.el5.x86_64 +OpenIPMI-tools-2.0.16-11.el5.x86_64 +openjade-1.3.2-27.x86_64 +openldap-2.3.43-12.el5_6.7.i386 +openldap-2.3.43-12.el5_6.7.x86_64 +openldap-clients-2.3.43-12.el5_6.7.x86_64 +openldap-devel-2.3.43-12.el5_6.7.i386 +openldap-devel-2.3.43-12.el5_6.7.x86_64 +openmotif-2.3.1-5.el5_5.1.x86_64 +opensp-1.5.2-4.x86_64 +openssh-4.3p2-72.el5_6.3.x86_64 +openssh-clients-4.3p2-72.el5_6.3.x86_64 +openssh-server-4.3p2-72.el5_6.3.x86_64 +openssl-0.9.8e-12.el5_5.7.i686 +openssl-0.9.8e-12.el5_5.7.x86_64 +openssl-devel-0.9.8e-12.el5_5.7.i386 +openssl-devel-0.9.8e-12.el5_5.7.x86_64 +oprofile-0.9.4-15.el5.x86_64 +ORBit2-2.14.3-5.el5.i386 +ORBit2-2.14.3-5.el5.x86_64 +ORBit2-devel-2.14.3-5.el5.x86_64 +pam-0.99.6.2-6.el5_5.2.i386 +pam-0.99.6.2-6.el5_5.2.x86_64 +pam_ccreds-3-5.i386 +pam_ccreds-3-5.x86_64 +pam-devel-0.99.6.2-6.el5_5.2.i386 +pam-devel-0.99.6.2-6.el5_5.2.x86_64 +pam_krb5-2.2.14-18.el5.i386 +pam_krb5-2.2.14-18.el5.x86_64 +pam_passwdqc-1.0.2-1.2.2.i386 +pam_passwdqc-1.0.2-1.2.2.x86_64 +pam_pkcs11-0.5.3-23.i386 +pam_pkcs11-0.5.3-23.x86_64 +pam_smb-1.1.7-7.2.1.i386 +pam_smb-1.1.7-7.2.1.x86_64 +pango-1.14.9-8.el5_7.3.i386 +pango-1.14.9-8.el5_7.3.x86_64 +pango-devel-1.14.9-8.el5_7.3.x86_64 +parted-1.8.1-27.el5.i386 +parted-1.8.1-27.el5.x86_64 +passwd-0.73-2.x86_64 +patch-2.5.4-31.el5.x86_64 +patchutils-0.2.31-2.2.2.x86_64 +pax-3.4-2.el5.x86_64 +pciutils-3.1.7-3.el5.x86_64 +pciutils-devel-3.1.7-3.el5.i386 +pciutils-devel-3.1.7-3.el5.x86_64 +pcmciautils-014-5.x86_64 +pcre-6.6-6.el5_6.1.x86_64 +pcsc-lite-1.4.4-4.el5_5.x86_64 +pcsc-lite-devel-1.4.4-4.el5_5.i386 +pcsc-lite-devel-1.4.4-4.el5_5.x86_64 +pcsc-lite-libs-1.4.4-4.el5_5.i386 +pcsc-lite-libs-1.4.4-4.el5_5.x86_64 +perl-5.8.8-32.el5_5.2.x86_64 +perl-Compress-Zlib-1.42-1.fc6.x86_64 +perl-Convert-ASN1-0.20-1.1.noarch +perl-DateManip-5.44-1.2.1.noarch +perl-DBI-1.52-2.el5.x86_64 +perl-HTML-Parser-3.55-1.fc6.x86_64 +perl-HTML-Tagset-3.10-2.1.1.noarch +perl-libwww-perl-5.805-1.1.1.noarch +perl-String-CRC32-1.4-2.fc6.x86_64 +perl-URI-1.35-3.noarch +perl-XML-Parser-2.34-6.1.2.2.1.x86_64 +perl-XML-Simple-2.14-4.fc6.noarch +php-5.1.6-27.el5_5.3.x86_64 +php-cli-5.1.6-27.el5_5.3.x86_64 +php-common-5.1.6-27.el5_5.3.x86_64 +pinfo-0.6.9-1.fc6.x86_64 +pirut-1.3.28-17.sl5.noarch +pkgconfig-0.21-2.el5.x86_64 +pkinit-nss-0.7.6-1.el5.x86_64 +pm-utils-0.99.3-10.el5.x86_64 +policycoreutils-1.33.12-14.8.el5.x86_64 +popt-1.10.2.3-22.el5.i386 +popt-1.10.2.3-22.el5.x86_64 +portmap-4.0-65.2.2.1.x86_64 +postgresql-libs-8.1.23-1.el5_6.1.i386 +postgresql-libs-8.1.23-1.el5_6.1.x86_64 +ppp-2.4.4-2.el5.x86_64 +prelink-0.4.0-2.el5.x86_64 +procmail-3.22-17.1.x86_64 +procps-3.2.7-17.el5.x86_64 +psacct-6.3.2-44.el5.x86_64 +psmisc-22.2-7.el5_6.2.x86_64 +pstack-1.2-7.2.2.x86_64 +puppet-2.6.6-2.el5.noarch +pycairo-1.2.0-1.1.x86_64 +pygobject2-2.12.1-5.el5.x86_64 +pygtk2-2.10.1-12.el5.x86_64 +pygtk2-libglade-2.10.1-12.el5.x86_64 +pyorbit-2.14.1-3.el5.x86_64 +pyspi-0.6.1-1.el5.x86_64 +python-2.4.3-44.el5.x86_64 +python-devel-2.4.3-44.el5.i386 +python-devel-2.4.3-44.el5.x86_64 +python-elementtree-1.2.6-5.x86_64 +python-iniparse-0.2.3-4.el5.noarch +python-ldap-2.2.0-2.1.x86_64 +python-libs-2.4.3-44.el5.x86_64 +python-numeric-23.7-2.2.2.x86_64 +python-sqlite-1.1.7-1.2.1.x86_64 +python-urlgrabber-3.1.0-6.el5.noarch +pyxf86config-0.3.31-2.fc6.x86_64 +qstat-2.11-3.el5.x86_64 +quota-3.13-5.el5.x86_64 +rcs-5.7-30.1.x86_64 +rdate-1.4-8.el5.x86_64 +rdist-6.1.5-44.x86_64 +readahead-1.3-8.el5.x86_64 +readline-5.1-3.el5.i386 +readline-5.1-3.el5.x86_64 +readline-devel-5.1-3.el5.i386 +readline-devel-5.1-3.el5.x86_64 +redhat-artwork-5.0.9-2.SL.4.x86_64 +redhat-logos-4.9.16-2.sl5.6.noarch +redhat-lsb-4.0-2.1.4.el5.i386 +redhat-lsb-4.0-2.1.4.el5.x86_64 +redhat-menus-6.7.8-3.el5.noarch +redhat-rpm-config-8.0.45-32.el5.noarch +rhel-instnum-1.0.9-1.el5.noarch +rhpl-0.194.1-1.x86_64 +rhpxl-0.41.1-9.el5.x86_64 +rmt-0.4b41-5.el5.x86_64 +rng-utils-2.0-4.el5.x86_64 +rootfiles-8.1-1.1.1.noarch +rpm-4.4.2.3-22.el5.x86_64 +rpm-build-4.4.2.3-22.el5.x86_64 +rpm-devel-4.4.2.3-22.el5.i386 +rpm-devel-4.4.2.3-22.el5.x86_64 +rpm-libs-4.4.2.3-22.el5.i386 +rpm-libs-4.4.2.3-22.el5.x86_64 +rpm-python-4.4.2.3-22.el5.x86_64 +rp-pppoe-3.5-32.1.x86_64 +rsh-0.17-40.el5.x86_64 +rsync-3.0.6-4.el5_7.1.x86_64 +rsyslog-3.22.1-3.el5_6.1.x86_64 +rt61pci-firmware-1.2-1.el5.noarch +rt73usb-firmware-1.8-1.el5.noarch +ruby-1.8.5-19.el5_6.1.x86_64 +ruby-augeas-0.4.1-1.el5.x86_64 +ruby-libs-1.8.5-19.el5_6.1.x86_64 +ruby-shadow-1.4.1-7.el5.x86_64 +sabayon-2.12.4-7.el5.x86_64 +sabayon-apply-2.12.4-7.el5.x86_64 +samba-3.0.33-3.29.el5_7.4.x86_64 +samba-client-3.0.33-3.29.el5_7.4.x86_64 +samba-common-3.0.33-3.29.el5_7.4.x86_64 +screen-4.0.3-4.el5.x86_64 +scrollkeeper-0.3.14-9.el5.x86_64 +sed-4.1.5-8.el5.x86_64 +selinux-policy-2.4.6-316.el5.noarch +selinux-policy-targeted-2.4.6-316.el5.noarch +sendmail-8.13.8-8.el5.x86_64 +setarch-2.0-1.1.x86_64 +setools-3.0-3.el5.x86_64 +setroubleshoot-2.0.5-5.el5.noarch +setroubleshoot-plugins-2.0.4-2.el5.noarch +setroubleshoot-server-2.0.5-5.el5.noarch +setserial-2.17-19.2.2.x86_64 +setup-2.5.58-7.el5.noarch +setuptool-1.19.2-1.x86_64 +sgml-common-0.6.3-18.noarch +sgpio-1.2.0_10-2.el5.x86_64 +shadow-utils-4.0.17-18.el5.x86_64 +shared-mime-info-0.19-5.el5.x86_64 +slang-2.0.6-4.el5.i386 +slang-2.0.6-4.el5.x86_64 +slang-devel-2.0.6-4.el5.i386 +slang-devel-2.0.6-4.el5.x86_64 +sl-release-5.6-1.x86_64 +sl-release-notes-5.6-1.noarch +SL_rpm_show_arch-1.1-1.noarch +smartmontools-5.38-2.el5.x86_64 +sos-1.7-9.49.el5.noarch +sox-12.18.1-1.el5_5.1.x86_64 +specspo-13-1.el5.noarch +splint-3.1.1-16.el5.x86_64 +sqlite-3.3.6-5.i386 +sqlite-3.3.6-5.x86_64 +sqlite-devel-3.3.6-5.x86_64 +startup-notification-0.8-4.1.x86_64 +startup-notification-devel-0.8-4.1.x86_64 +strace-4.5.18-5.el5_5.5.x86_64 +stunnel-4.15-2.el5.1.x86_64 +subversion-1.6.11-7.el5_6.4.i386 +subversion-1.6.11-7.el5_6.4.x86_64 +sudo-1.7.2p1-10.el5.x86_64 +svrcore-4.0.4-3.el5.i386 +svrcore-4.0.4-3.el5.x86_64 +swig-1.3.29-2.el5.x86_64 +symlinks-1.2-24.2.2.x86_64 +sysfsutils-2.0.0-6.x86_64 +syslinux-3.11-4.x86_64 +system-config-date-1.8.12-4.el5.noarch +system-config-display-1.0.48-2.el5.noarch +system-config-httpd-1.3.3.3-1.el5.noarch +system-config-kdump-1.0.14-4.el5.noarch +system-config-keyboard-1.2.11-1.el5.noarch +system-config-language-1.1.18-3.el5.noarch +system-config-lvm-1.1.5-8.el5.noarch +system-config-netboot-0.1.45.1-1.el5.noarch +system-config-netboot-cmd-0.1.45.1-1.el5.noarch +system-config-network-1.3.99.18-1.el5.noarch +system-config-network-tui-1.3.99.18-1.el5.noarch +system-config-nfs-1.3.23-1.el5.noarch +system-config-rootpassword-1.1.9.1-1.noarch +system-config-samba-1.2.41-5.el5.noarch +system-config-securitylevel-1.6.29.1-6.el5.x86_64 +system-config-securitylevel-tui-1.6.29.1-6.el5.x86_64 +system-config-services-0.9.4-5.el5.noarch +system-config-soundcard-2.0.6-1.el5.noarch +system-config-users-1.2.51-4.el5.noarch +systemtap-1.3-9.el5.x86_64 +systemtap-runtime-1.3-9.el5.x86_64 +SysVinit-2.86-15.el5.x86_64 +talk-0.17-29.2.2.x86_64 +tar-1.15.1-30.el5.x86_64 +tcl-8.4.13-4.el5.x86_64 +tcpdump-3.9.4-15.el5.x86_64 +tcp_wrappers-7.6-40.7.el5.i386 +tcp_wrappers-7.6-40.7.el5.x86_64 +tcsh-6.14-17.el5_5.2.x86_64 +telnet-0.17-39.el5.x86_64 +termcap-5.5-1.20060701.1.noarch +texinfo-4.8-14.el5.x86_64 +tftp-server-0.49-2.x86_64 +time-1.7-27.2.2.x86_64 +tmpwatch-2.9.7-1.1.el5.5.x86_64 +traceroute-2.0.1-6.el5.x86_64 +tree-1.5.0-4.x86_64 +ttmkfdir-3.0.9-23.el5.x86_64 +tzdata-2011h-2.el5.x86_64 +udev-095-14.24.el5.x86_64 +udftools-1.0.0b3-0.1.el5.x86_64 +unix2dos-2.2-26.2.3.el5.x86_64 +unzip-5.52-3.el5.x86_64 +urw-fonts-2.3-6.1.1.noarch +usbutils-0.71-2.1.x86_64 +usermode-1.88-3.el5.2.x86_64 +usermode-gtk-1.88-3.el5.2.x86_64 +util-linux-2.13-0.56.el5.x86_64 +valgrind-3.5.0-1.el5.i386 +valgrind-3.5.0-1.el5.x86_64 +vconfig-1.9-3.x86_64 +vdt-ca-certs-62-1.noarch +vim-common-7.0.109-7.el5.x86_64 +vim-enhanced-7.0.109-7.el5.x86_64 +vim-minimal-7.0.109-7.el5.x86_64 +vixie-cron-4.1-77.el5_4.1.x86_64 +vnc-4.1.2-14.el5_6.6.x86_64 +wget-1.11.4-2.el5_4.1.x86_64 +which-2.16-7.x86_64 +wireless-tools-28-2.el5.i386 +wireless-tools-28-2.el5.x86_64 +words-3.0-9.1.noarch +wpa_supplicant-0.5.10-9.el5.x86_64 +wvdial-1.54.0-5.2.2.1.x86_64 +xdelta-1.1.3-20.i386 +xdelta-1.1.3-20.x86_64 +xen-libs-3.0.3-120.el5_6.2.i386 +xen-libs-3.0.3-120.el5_6.2.x86_64 +xinetd-2.3.14-10.el5.x86_64 +xkeyboard-config-0.8-9.el5.noarch +xml-common-0.6.3-18.noarch +xmlsec1-1.2.9-8.1.2.i386 +xmlsec1-1.2.9-8.1.2.x86_64 +xmlsec1-devel-1.2.9-8.1.2.i386 +xmlsec1-devel-1.2.9-8.1.2.x86_64 +xorg-x11-drv-evdev-1.0.0.5-5.el5.x86_64 +xorg-x11-drv-keyboard-1.1.0-3.x86_64 +xorg-x11-drv-mouse-1.1.1-1.1.x86_64 +xorg-x11-drv-vesa-1.3.0-8.2.el5.x86_64 +xorg-x11-drv-void-1.1.0-3.1.x86_64 +xorg-x11-filesystem-7.1-2.fc6.noarch +xorg-x11-fonts-base-7.1-2.1.el5.noarch +xorg-x11-fonts-Type1-7.1-2.1.el5.noarch +xorg-x11-font-utils-7.1-3.x86_64 +xorg-x11-proto-devel-7.1-13.el5.x86_64 +xorg-x11-server-utils-7.1-5.el5_6.2.x86_64 +xorg-x11-server-Xnest-1.1.1-48.76.el5_6.4.x86_64 +xorg-x11-server-Xorg-1.1.1-48.76.el5_6.4.x86_64 +xorg-x11-server-Xvfb-1.1.1-48.76.el5_6.4.x86_64 +xorg-x11-utils-7.1-2.fc6.x86_64 +xorg-x11-xauth-1.0.1-2.1.x86_64 +xorg-x11-xfs-1.0.2-5.el5_6.1.x86_64 +xorg-x11-xinit-1.0.2-15.el5.x86_64 +xorg-x11-xkb-utils-1.0.2-2.1.x86_64 +xsri-2.1.0-10.fc6.x86_64 +xulrunner-1.9.2.23-1.el5_7.i386 +xulrunner-1.9.2.23-1.el5_7.x86_64 +xulrunner-devel-1.9.2.23-1.el5_7.i386 +xulrunner-devel-1.9.2.23-1.el5_7.x86_64 +xz-4.999.9-0.3.beta.20091007git.el5.x86_64 +xz-libs-4.999.9-0.3.beta.20091007git.el5.i386 +xz-libs-4.999.9-0.3.beta.20091007git.el5.x86_64 +yelp-2.16.0-26.el5.x86_64 +ypbind-1.19-12.el5.x86_64 +yp-tools-2.9-1.el5.x86_64 +yum-3.2.22-33.sl.noarch +yum-autoupdate-1.1-1.SL.noarch +yum-conf-56-1.SL.noarch +yum-metadata-parser-1.1.2-3.el5.x86_64 +zip-2.31-2.el5.x86_64 +zisofs-tools-1.0.6-3.2.2.x86_64 +zlib-1.2.3-3.i386 +zlib-1.2.3-3.x86_64 +zlib-devel-1.2.3-3.i386 +zlib-devel-1.2.3-3.x86_64 +zsh-4.2.6-5.el5.x86_64 diff --git a/preview-fall2024-info/includes/Thumbs.db b/preview-fall2024-info/includes/Thumbs.db new file mode 100644 index 000000000..d5e18383e Binary files /dev/null and b/preview-fall2024-info/includes/Thumbs.db differ diff --git a/preview-fall2024-info/includes/chtc-pools.png b/preview-fall2024-info/includes/chtc-pools.png new file mode 100644 index 000000000..f34fb3153 Binary files /dev/null and b/preview-fall2024-info/includes/chtc-pools.png differ diff --git a/preview-fall2024-info/includes/chtc.end.html b/preview-fall2024-info/includes/chtc.end.html new file mode 100755 index 000000000..58d32c661 --- /dev/null +++ b/preview-fall2024-info/includes/chtc.end.html @@ -0,0 +1,35 @@ + +

+ + + + + + + + +  + + + + + + + + + + +
+
+ + + + diff --git a/preview-fall2024-info/includes/chtc_on_campus.png b/preview-fall2024-info/includes/chtc_on_campus.png new file mode 100644 index 000000000..b77707bb1 Binary files /dev/null and b/preview-fall2024-info/includes/chtc_on_campus.png differ diff --git a/preview-fall2024-info/includes/chtcusers.jpg b/preview-fall2024-info/includes/chtcusers.jpg new file mode 100644 index 000000000..f647dbcaa Binary files /dev/null and b/preview-fall2024-info/includes/chtcusers.jpg differ diff --git a/preview-fall2024-info/includes/chtcusers_400.jpg b/preview-fall2024-info/includes/chtcusers_400.jpg new file mode 100644 index 000000000..efa7e6675 Binary files /dev/null and b/preview-fall2024-info/includes/chtcusers_400.jpg differ diff --git a/preview-fall2024-info/includes/chtcusers_L.jpg b/preview-fall2024-info/includes/chtcusers_L.jpg new file mode 100644 index 000000000..fac6f5084 Binary files /dev/null and b/preview-fall2024-info/includes/chtcusers_L.jpg differ diff --git a/preview-fall2024-info/includes/cron-generated/dynamic-resources-noedit.html b/preview-fall2024-info/includes/cron-generated/dynamic-resources-noedit.html new file mode 100644 index 000000000..f4161e936 --- /dev/null +++ b/preview-fall2024-info/includes/cron-generated/dynamic-resources-noedit.html @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Pool/Mem≥1GB≥2GB≥4GB≥8GB≥16GB≥32GB≥64GB
cm.chtc.wisc.edu0000000
condor.cs.wisc.edu0000000
condor.cae.wisc.edu0000000
Totals0000000
+
As of Mon Jun 12 07:30:02 CDT 2017
diff --git a/preview-fall2024-info/includes/jahns/Bundle Proximity Losses Paragraph.doc b/preview-fall2024-info/includes/jahns/Bundle Proximity Losses Paragraph.doc new file mode 100644 index 000000000..2695061cf Binary files /dev/null and b/preview-fall2024-info/includes/jahns/Bundle Proximity Losses Paragraph.doc differ diff --git a/preview-fall2024-info/includes/jahns/Thumbs.db b/preview-fall2024-info/includes/jahns/Thumbs.db new file mode 100644 index 000000000..dd3f84614 Binary files /dev/null and b/preview-fall2024-info/includes/jahns/Thumbs.db differ diff --git a/preview-fall2024-info/includes/old.intro.html b/preview-fall2024-info/includes/old.intro.html new file mode 100755 index 000000000..7b02b4a01 --- /dev/null +++ b/preview-fall2024-info/includes/old.intro.html @@ -0,0 +1,26 @@ +

Research is a computationally expensive endeavor, demanding on any computing resources available. Quite often, a researcher will require resources for computations for short bursts of time, frequently leaving the computer idle. This often results in wasted potential computation time. This issue can be addressed by means of high-throughput computing.

+

High-throughput computing allows for many computational tasks to be done over a long period of time. It is concerned largely with the number of compute resources that are available to people who wish to use the system. It is a very useful system for researchers, who are more concerned with the number of computations they can do over long spans of time than they are with short-burst computations. Because of its value to research computations, the Univeristy of Wisconsin set up the Center for High-Throughput Computing to bring researchers and compute resources together.

+

The Center for High-Throughput Computing (CHTC), approved in August 2006, has numerous resources at its disposal to keep up with the computational needs of UW Madison. These resources are being funded by the National Institute of Health (NIH), the Department of Energy (DOE), the National Science Foundation (NSF), and various grants from the University itself. Email us to see what we can do to help automate your research project at chtc@cs.wisc.edu It aims to pull four different resources together into one operation:

+
    +
  • HTC Technologies: The CHTC leans heavily on the HTCondor project to provide a framework where high-throughput computing can take place. The HTCondor project aims to make grid and high-throughput computing a reality in any number of environments.

  • +
  • Dedicated Resources: CHTC HTCondor pool + The CHTC cluster is now composed of 1900 cores for use by researches across our campus. These rack mounted blade systems run Linux. Each core is 2.8Ghz with 1.5GB RAM or better. CHTC has provided 10 million CPU hours of research computation between 05/17/2008 and 02/23/2010 prior to the additional 960 cores. With the recent server purchase, CHTC provides in excess of 37,000 CPU hours per day. +

    +

    + CHTC Policy Description here +

    +
  • +
  • Middleware: The GRIDS branch at UW Madison will be an essential part towards keeping the CHTC running efficiently. GRIDS is funded by the NSF Middleware Initiative (NMI). At the University of Wisconsin, the HTCondor project makes heavy use of this system with their NMI Build & Test facility. The NMI Build & Test facility provides a framework to build and test software on a wide variety of platform and hardware combinations.

  • +
  • Computing Laboratory: The University of Wisconsin has many compute clusters at its disposal. In 2004 the university won an award to build the Grid Laboratory of Wisconsin (GLOW). GLOW is an interdepartmental pool of HTCondor nodes, containing 3000 CPUs and about 1 PB of storage.

  • +
+

+The University of Wisconsin-Madison (UW-Madison) campus is an excellent match for meeting the computational needs of your project. Existing UW technology infrastructure that can be leveraged includes CPU capacity, network connectivity, storage availability, and middleware connectivity. But perhaps most important, the UW has significant staff experience and core competency in deploying, managing, and using computational technology. +

+

+To reiterate: The UW launched and funded the Center for High Throughput Computing (CHTC), a campus-wide organization dedicated to supercharging research on campus by working side-by-side with you, the domain scientists on infusing high throughput computing and grid computing techniques into your routine. Between the CHTC and the aforementioned HTCondor Project, the UW is home to over 20 full-time staff with a proven track record of making compute middleware work for scientists. Far beyond just being familiar with deployment and use of such software, UW staff has been intimately involved in its design and implementation. +

+

+Applications: Many researchers are already using these facilities. More information about a sampling of those using the CHTC can be found here. +And less recent projects in CHTC Older projects. +

+
diff --git a/preview-fall2024-info/includes/onerow b/preview-fall2024-info/includes/onerow new file mode 100644 index 000000000..9d97cdfc9 --- /dev/null +++ b/preview-fall2024-info/includes/onerow @@ -0,0 +1,18 @@ +

+ + + + + +
+

Professor +

+

+1,401,361 CPU hours since 1/1/2009 +

+

+

+
+
+ + diff --git a/preview-fall2024-info/includes/web.css b/preview-fall2024-info/includes/web.css new file mode 100644 index 000000000..293031cab --- /dev/null +++ b/preview-fall2024-info/includes/web.css @@ -0,0 +1,447 @@ +/* The following six styles set attibutes for heading tags H1-H6 */ + +div.announcement { + border: 1px solid #787878; + background-color: #efefef; + color: #787878; + padding: .5em; + margin: 1em 2em 1em 2em; + } + +table.gtable { + background: #B70101; + padding: 5px 10px; + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + + color: #FFFFFF; + overflow: hidden; + margin-bottom: 20px; + + background: #ddd; + color: #333; + border: 0; + border-bottom: 3px solid #bbb; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} + +table.gtable img{ + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; +} + +table.gtable td { + padding: 0.6em 0.8em 0.6em 0.8em; + background-color: #ddd; + border-bottom: 1px solid #bbb; + border-top: 0px; + overflow: visible; +} + +table.gtable th { + padding: 0.6em 0.8em 0.6em 0.8em; + background-color: #b70101; + color: #FFFFFF; + border: 0px; + border-bottom: 3px solid #920000; +} + +H1 { + margin-bottom: -5px; + text-align: left; + color: #000000; + font-size: 180%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + line-height: 195% + } + + +H2 { + margin-top: 20px; + margin-bottom: 0px; + text-align: left; + color: #000000; + font-size: 150%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + line-height: 180% + } + + + +H3 { + margin-top: 20px; + margin-bottom: -10px; + text-align: left; + color: #000000; + font-size: 120%; + line-height: 150%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + width: 100% + } + + +H4 { + margin-top: 15px; + margin-bottom: -10px; + text-align: left; + color: #000000; + font-size: 100%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + width: 100% + } + + + +H5 { + margin-top: 10px; + margin-bottom: -10px; + text-align: left; + color: #000000; + font-size: 95%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + width: 100% + } + + +H6 { + margin-top: 10px; + margin-bottom: -10px; + text-align: left; + color: #000000; + font-size: 95%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + width: 100% + } + +body { + background-color: #eee; + font-family: Verdana, Arial, Helvetica,sans-serif; +} + +#main { + background: #fff; + margin: 10px 5px; + padding: 20px; + min-height: 1300px; + border-bottom: 3px solid #bbb; + border-left: 1px solid #ddd; + border-right: 1px solid #ddd; + border-radius: 5px; + -moz-border-radius-right: 5px; + -webkit-border-radius-right: 5px; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; + +} + +.bgred { + background-color: #B70101; + -moz-border-top-right-radius: 10px; + -webkit-border-top-right-radius: 10px; + border-bottom-right-radius: 10px; + -moz-border-bottom-right-radius: 10px; + -webkit-border-bottom-right-radius: 10px; + margin: 10px 0px; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} + +#copyright { +font-family: Verdana, Arial, Helvetica, sans-serif; +font-size: 75%; + +background: #ddd; +color: #333; +border: 1px solid #bbb; +border-bottom: 3px solid #bbb; +border-top: 0px; + +padding: 5px 10px; +border-radius: 5px; +-moz-border-radius: 5px; +-webkit-border-radius: 5px; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; + + +margin-top: 40px; +} + +#copyright a { + color: #66a; +} + + +.navbodyblack { + font-family: Verdana,Arial, Helvetica, sans-serif; + color: #ffffff; background-color: #B70101; + text-decoration: none; padding-left: 8px; + padding-top: 5px; padding-bottom: 5px; + padding-right: 2px; + font-weight: normal; + margin: 0px; +} + +.navbodyblack a:link { + color:#ffffff; + text-decoration: none; +} + +.navbodyblack a:visited { + color:#ffffff; + text-decoration: none; +} + +.navbodyblack a:hover { + color:#cc9900; + text-decoration: none; +} + +code { + font-size: 120%; +} + +pre { + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + font-size: 120%; + margin: 1em 2em; + + background: #ddd; + color: #333; + border: 1px solid #bbb; + border-bottom: 3px solid #bbb; + border-top: 0px; + border-left: 5px solid #b70101; + padding: 0.5em 1.2em; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} +ul.sidebar { + width = 20%; + font-size: 80%; + margin: 0.8em; +} +ul.sidebar,ul.sidebar ul { + padding: 0; + border: 0; + color: white; +} +ul.sidebar ul { + margin: 0 0.8em; +} +ul.sidebar a:link, ul.sidebar a:visited, ul.sidebar a:active { + color: white; + text-decoration: none; + display: block; +} +ul.sidebar a:hover, ul.sidebar a:active { + /*text-decoration: underline; + color:#cc9900; */ + color: #b70101; + background-color: white; + transition: all linear 0.2s 0s; + position: relative; + left:5px; + + border-left:5px solid #d41a1a; +} +ul.sidebar li { + font-size: 16px; + margin: 0; + margin-top: .5em; + padding: 2px 4px; + border: 0; + list-style-type: none; +} +ul.sidebar li a{ + position: relative; + top: 0px; + + transition: all linear 0.2s 0s; + + padding: 5px 10px; + border-left:5px solid #d41a1a; + background: #d41a1a; + + -moz-box-shadow: 0px 2px 4px 1px #9E0000; + -webkit-box-shadow: 0px 2px 4px 1px #9E0000; + box-shadow: 0px 2px 4px 1px #9E0000; + + color: white; + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; +} + +ul.sidebar ul li a{ + font-size: 14px; + +} + +ul.sidebar li.spacer { +} + +#tile-wrapper { + margin-left:auto; + margin-right:auto; + padding:20px; +} + +a.tile { + display: inline-block; + position:relative; + width:800px; + text-decoration: none; + + overflow:hidden; + + margin-left:auto; + margin-right:auto; + + font-size:75%; + margin: 10px; + background: #cdcdcd; + padding: 10px; + border-radius: 5px; + border-bottom: 5px solid #aaa; + border-top: 5px solid #ddd; + color: #555; + + -moz-box-shadow: 0px 1px 8px 1px #bbb; + -webkit-box-shadow: 0px 1px 8px 1px #bbb; + box-shadow: 0px 1px 8px 1px #bbb; +} + +a.tile:hover { + top:-5px; + + + -moz-box-shadow: 0px 6px 8px 1px #bbb; + -webkit-box-shadow: 0px 6px 8px 1px #bbb; + box-shadow: 0px 6px 8px 1px #bbb; + +} + + +a.tile p{ + display:inline-block; + width:84%; + height:65px; + float:right; + + text-align: left; + + background: #fff; + color: #000; + padding: 10px; + + font-size: 10px; + + border-color: #fff; + background-color: #fff; + padding: 10px; + margin:5px; + border-radius: 5px; +} + +a.tile img{ + width:75px; + float:left; + + display:inline-block; + + border-color: #fff; + background-color: #fff; + padding: 5px; + margin:5px; + border-radius: 5px; +} + +a.tile h2{ + text-decoration: none; + color:#555; + margin:0px 5px; + font-size: 140%; +} + +#hours { + width: 120px; + height: 92px; + font-size:75%; + margin-left:10px; + float: right; + background: #B70101; + padding: 5px 10px; + border-radius: 5px; + border-bottom: 5px solid #920000; + border-top: 5px solid #d41a1a; + color: #fff; + + -moz-box-shadow: 0px 1px 4px 1px #bbb; + -webkit-box-shadow: 0px 1px 4px 1px #bbb; + box-shadow: 0px 1px 4px 1px #bbb; +} + +#osg_power { + height: 92px; + margin-left:10px; + float: right; + background: #F29B12; + padding: 5px 10px; + border-radius: 5px; + border-bottom: 5px solid #EF7821; + border-top: 5px solid #FDC10A; + color: #fff; + + -moz-box-shadow: 0px 1px 4px 1px #bbb; + -webkit-box-shadow: 0px 1px 4px 1px #bbb; + box-shadow: 0px 1px 4px 1px #bbb; +} + +#osg_power img { + border-radius: 5px; +} + +p.underconstruction { + border: 1px solid #666; + background-color: #FFA; + padding: 0.1em 0.5em; + margin-left: 2em; + margin-right: 2em; + font-style: italic; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} +.num { + text-align: right; +} +table { + border-collapse: collapse; +} +td,tr { + padding-left: 0.2em; + padding-right: 0.2em; +} diff --git a/preview-fall2024-info/index.html b/preview-fall2024-info/index.html new file mode 100644 index 000000000..05f7609c5 --- /dev/null +++ b/preview-fall2024-info/index.html @@ -0,0 +1,746 @@ + + + + + + +Home + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + +
+
+ CHTC is hiring, view the new position on the jobs page and apply now! + View Job Posting +
+
+ +
+ + + CHTC Logo + +
+
+
+
+ +
+ +
+ + + +
+
+
+ +
+
+ + + HTC Week 2024 Photos + + + +

+
+
+ +
+ +
+
+
+
+
+ +
+
+ +
+
+ +
+
+

+ + The Center for High Throughput Computing (CHTC), established in 2006, aims to bring the power + of High Throughput Computing to all fields of research, and to allow the future of HTC to be shaped + by insight from all fields. + +

+
+
+
+

+ Are you a UW-Madison researcher looking to expand your computing beyond your local resources? Request + an account now to take advantage of the open computing services offered by the CHTC! +

+
+ +
+
+
+
+

+ High Throughput Computing is a collection of principles and techniques which maximize the effective throughput + of computing resources towards a given problem. When applied for scientific computing, HTC can result in + improved use of a computing resource, improved automation, and help drive the scientific problem forward. +

+

+ The team at CHTC develops technologies and services for HTC. CHTC is the home of the HTCondor Software + Suite which has over 30 years of experience in tackling HTC problems; + it manages shared computing resources + for researchers on the UW-Madison campus; and it leads the OSG Consortium, + a national-scale environment for distributed HTC. +

+ +
+
+
+ +
+
+

News

+ +
+ +
+ + + +
+
+
+ +
+
+ + + HTC Week 2024 Photos + + + +

+
+
+ +
+ +
+
+
+
+
+ +
+
+ +
+
+ +
+
+
+

+ Last Year Serving UW-Madison +

+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +

+ HTCSS +

+ +
+

The HTCondor Software Suite (HTCSS) provides sites and users with the ability to manage and execute +HTC workloads. Whether it’s managing a single laptop or 250,000 cores at CERN, HTCondor +helps solve computational problems through the application of the HTC principles.

+ +
+
+
+
+ Laptop +
+
+
+
+ +

+ Services +

+ +
+

UW Research Computing

+

CHTC manages over 20,000 cores and dozens of GPUs for the UW-Madison +campus; this resource, which is free and shared, aims to advance the +mission of the University of Wisconsin in support of the Wisconsin +Idea. Researchers can place their workloads on an access point at +CHTC and utilize the resources at CHTC, across the campus, and across +the nation.

+ +

Research Facilitation

+

CHTC’s Research Facilitation team empowers researchers to utilize computing to achieve +their goals. The Research Facilitation approach emphasizes teaching users skills and +methodologies to manage & automate workloads on resources like those at CHTC, the campus, +or across the world.

+ +
+
+
+
+ Microscope +
+
+ +
+ +
+
+
+

+ As part of its many services to UW-Madison and beyond, + the CHTC is home to or supports the following Research + Projects and Partners. +

+
+
+

OSG

+

+ The OSG is a consortium of research collaborations, campuses, national laboratories and software + providers dedicated to the advancement of all of open science via the practice of distributed High Throughput + Computing (dHTC), and the advancement of its state of the art. The OSG operates a fabric of dHTC services + for the national science and engineering community and CHTC has been a major force in OSG since its inception + in 2005. +

+
+ + OSG Logo + +
+
+
+

PATh

+

+ The Partnership to Advance Throughput Computing (PATh) is a partnership between + CHTC and OSG to advance throughput computing. Funded through a major investment + from NSF, PATh helps advance HTC at a national level through support for + HTCSS and provides a fabric of services for the NSF science and engineering community + to access resources across the nation. +

+
+ + PATh Logo + +
+
+
+

Morgridge Institute for Research

+

+ The Morgridge Institute for Research is a private, biomedical research institute + located on the UW-Madison campus. Morgridge’s Research Computing Theme is a unique + partner with CHTC, investing in the vision of HTC and its ability to advance basic + research. +

+
+ + Morgridge Logo + +
+ +
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/internships.html b/preview-fall2024-info/internships.html new file mode 100644 index 000000000..ab6be8a44 --- /dev/null +++ b/preview-fall2024-info/internships.html @@ -0,0 +1,391 @@ + + + + + + +The Path to Internship and Fellowship Opportunities + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + +
+ Collage photos of current and previous CHTC interns. + Collage photos of current and previous CHTC interns. +
+ +
+
+
+

+ The Path to Internship and Fellowship Opportunities +

+ +

+ Want to make a difference and see your work directly + impact science across the globe? +

+

+ The Center for High Throughput Computing offers internship and summer fellowship opportunities + for undergraduate and graduate students. +

+ +

The Mission

+

CHTC is a research computing organization located within the University + of Wisconsin-Madison CS Department and at the Morgridge Institute for + Research. CHTC is an internationally recognized leader in high + throughput computing and provides access to free large-scale computing + capacity for research. CHTC advances the field of research computing + through innovative software and services, leading distributed computing + projects across the campus and the nation. +

+ +

The CHTC Fellows Program

+

Our Fellows Program provides undergraduate and graduate students with + learning opportunities in research computing, system administration, + and facilitation. Working with engineers and + dedicated mentors, fellows will have the opportunity to learn + from leaders in their field and access state of the art computing + facilities. Fellows can also attend workshops, lectures and social + and recreational events with CHTC team members. Learn more about the CHTC Fellows Program. +

+ +

We Value Diversity

+

CHTC and Morgridge are committed to increasing diversity among + interns and staff. We believe that advancing throughput computing + and scientific research is enhanced by a wide range of backgrounds + and perspectives. +

+ + + + +
+ +

Questions: chtc-jobs@g-groups.wisc.edu

+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/jobs.html b/preview-fall2024-info/jobs.html new file mode 100644 index 000000000..b2501c3c8 --- /dev/null +++ b/preview-fall2024-info/jobs.html @@ -0,0 +1,485 @@ + + + + + + +Available Positions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + +
+ IT Professional, Emile, working in the Discovery Server Rooms +
Photo: Morgridge Institute for Research
+
+ +
+
+
+

+ Available Positions +

+ +

+ If advancing the state of the art of distributed computing in an + academic environment interests you, the Center for High Throughput + Computing (CHTC) at the University of Wisconsin-Madison (UW) offers a + unique working environment. Our project’s home is in the UW Department + of Computer Sciences, an internationally recognized department + consistently ranked in the top ten across the USA. +

+

+ A position with CHTC + will provide you the opportunity to interact with both department + faculty and students to translate novel ideas into real-world solutions. + The software and infrastructure you will be working on is used by + scientists and engineers at hundreds of institutions, from universities + to national laboratories and from large high tech corporations to small + animation teams. +

+ +

Internships

+

+ Our internship program provides undergraduate and graduate students with learning opportunities in + research computing, system administration, web development, and communication. +

+ Learn More + + + + +

Full Time Positions

+

+ Details about our open full-time positions are typically provided below. + Positions pertaining to "HTCondor" and "CHTC" search terms can also + be found on the University's + Position Vacancy List + (PVL). +

+ + +
+
+

Research Software Engineer

+
+
+ Full Time - Morgridge Institute for Research +
+ +
+

+
+

The Research Software Engineer (RSE) will work to bring modern software engineering techniques and approaches +to research projects at the institute as part of long-running engagements and collaborations between scientists.

+ +

At Morgridge, the RSE will sit at the nexus of exciting research, large-scale computing, and national +cyberinfrastructure projects. Whether its bringing a codebase up to production quality, designing +programmatic interfaces, or making workloads run more effectively across thousands of cores, the +RSE will have a diversity of challenges and help advance Morgridge’s goals of Fearless Science.

+ +

Potential projects will be diverse, including facilitating machine learning projects, developing +tools for global distributed data management, and enabling large-scale protein simulation; +projects will be tailored based on the candidate.

+ + + + +
+ + + + +
+

Benefits

+ +

+ The University of Wisconsin-Madison is a great place to work. You can + read about the benefits in detail + elsewhere. In short, we have + five weeks of vacation/personal time per year, very good health + insurance (and cost effective for entire families), and a good + retirement plan. Please note that the minimum salary in our job listings + are just that - the minimum. Compensation will increase with experience. +

+ +

In addition to the official benefits, there are many side benefits:

+ +
    +
  1. You will work with the CHTC team. We are world leaders in solving + interesting distributed computing problems! +
  2. +
  3. You can attend interesting talks in the department
  4. +
  5. Relatively flexible working hours — we value work-life balance.
  6. +
  7. A Discounted Bus Pass!
  8. +
  9. You get staff access to the Union, + the UW athletic facilities, and + the UW library system. +
  10. +
  11. We're in a lively neighborhood with great restaurants in easy + walking distance. +
  12. +
+ +
+ +

+ If you are interested in a position with CHTC, explore the job listings + below! If you would like to apply, send your resume and cover letter to + chtc-jobs@g-groups.wisc.edu, and indicate + which job you would like to apply for. +

+ +

Please note:

+ +
    +
  • A criminal background check will be conducted prior to hiring.
  • +
  • A period of evaluation will be required.
  • +
  • UW-Madison is an equal opportunity/affirmative action employer. We + promote excellence through diversity and encourage all qualified + individuals to apply. +
  • +
+
+
+
+
+
+
+ +

Internships

+ +
+
+
+
+
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/machine-learning.html b/preview-fall2024-info/machine-learning.html new file mode 100644 index 000000000..c605fbb80 --- /dev/null +++ b/preview-fall2024-info/machine-learning.html @@ -0,0 +1,379 @@ + + + + + + +Machine learning insights into molecular science using the Open Science Pool + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Machine learning insights into molecular science using the Open Science Pool +

+

Machine learning insights into molecular science using the Open Science Pool +Computation has extended what researchers can investigate in chemistry, biology, and material science. Studying complex systems like proteins or nanocomposites can use similar techniques for common challenges. For example, computational power is expanding the horizons of protein research and opening up vast new possibilities for drug discovery and disease treatment.

+ +

Olexandr Isayev is an assistant professor at the School of Pharmacy, University of North Carolina (UNC) at Chapel Hill. Isayev is part of a group at UNC using machine learning for chemical problems and material science.

+ +

“Specifically, we apply machine learning to chemical and material science data to understand the data, find patterns in it, and make predictive models,” says Isayev. “We focus on three areas: computer-aided design of novel materials, computational drug discovery, and acceleration of quantum mechanical methods with GPUs (graphic processing units) and machine learning.”

+ +

For studying drug discovery, where small organic molecule binds to a protein receptor, Isayev uses machine learning to build predictive models based on historical collection of experimental data. “We want to challenge models and find a new molecule with better binding properties,” says Isayev.

+ +
+
+ Protein model visualization +
+
+
Protein Model
+

Example of a protein model that Isayev and his group study. Courtesy image.

+
+
+ +

Similar to the human genome project, five years ago President Obama created a new Materials Genome Initiative to accelerate the design of new materials. Using machine learning methods based on the crystal structure of the material he is studying, Isayev can predict its physical properties.

+ +

“Looking at a molecule or material based on geometry and topology, we can get the energy, and predict critical physical properties,” says Isayev. “This machine learning allows us to avoid many expensive uses of numeric simulation to understand the material.”

+ +

The challenge for Isayev’s group is that initial data accumulation is extremely numerically time consuming. So, they use the Open Science Pool to run simulations. Based on the data, they train their machine learning model, so the next time, instead of a time-consuming simulation model, they can use the machine learning model on a desktop PC.

+ +

“Using machine learning to do the preliminary screening saves a lot of computing time,” says Isayev. “Since we performed the hard work, scientists can save a lot of time by prioritizing a few promising candidate materials instead of running everything.”

+ +

For studying something like a photovoltaic semiconductor, Isayev selects a candidate after running about a thousand of quantum mechanical calculations. He then uses machine learning to screen 50,000 materials. “You can do this on a laptop,” says Isayev. “We prioritize a few—like ten to fifty. We can predict what to run next instead of running all of them. This saves a lot of computing time and gives us a powerful tool for screening and prioritization.”

+ +

On the OSG, they run “small density function (DFT) calculations. We are interested in molecular properties,” says Isayev. “We run a program package called ORCA (Quantum Chemistry Program), a free chemistry package. It implements lots of QM methods for molecules and crystals. We use it and then we have our own scripts, run them on the OSG, collect the data, and then analyze the data.”

+ +

“I am privileged to work with extremely talented people like Roman Zubatyuk,” says Isayev. Zubatyuk works with Isayev on many different projects. “Roman has developed our software ecosystem container using Docker. These simulations run locally on our machines through the Docker virtual environment and eliminate many issues. With a central database and set of scripts, we could seamlessly run hundreds of thousands of simulations without any problems.”

+ +

Finding new materials and molecules are hard science problems. “There is no one answer when looking for a new molecule,” says Isayev. “We cannot just use brute force. We have to be creative because it is like looking for a needle in a hay stack.”

+ +

For something like a solar cell device, researchers might find a drawback in the performance of the material. “We are looking to improve current materials, improve their performance, or make them cheaper, so we can move them to mass production so everyone benefits,” says Isayev.

+ +

“For us, the OSG is a fantastic resource for which we are very grateful,” says Isayev. “It gives us access to computation that enables our simulations that we could not do otherwise. To run all our simulations requires lots of computing resources that we cannot run on a local cluster. To do our simulation screening, we have to perform lots of calculations. We can easily distribute these calculations because they don’t need to communicate to each other. The OSG is a perfect fit.”

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/map/images/Thumbs.db b/preview-fall2024-info/map/images/Thumbs.db new file mode 100644 index 000000000..479db4c6c Binary files /dev/null and b/preview-fall2024-info/map/images/Thumbs.db differ diff --git a/preview-fall2024-info/map/images/add dept.xlsx b/preview-fall2024-info/map/images/add dept.xlsx new file mode 100644 index 000000000..f573423c5 Binary files /dev/null and b/preview-fall2024-info/map/images/add dept.xlsx differ diff --git a/preview-fall2024-info/map/images/map-empty.jpg b/preview-fall2024-info/map/images/map-empty.jpg new file mode 100644 index 000000000..d6558c927 Binary files /dev/null and b/preview-fall2024-info/map/images/map-empty.jpg differ diff --git a/preview-fall2024-info/map/images/map.jpg b/preview-fall2024-info/map/images/map.jpg new file mode 100644 index 000000000..b73fac42b Binary files /dev/null and b/preview-fall2024-info/map/images/map.jpg differ diff --git a/preview-fall2024-info/map/images/map.psd b/preview-fall2024-info/map/images/map.psd new file mode 100644 index 000000000..0fca71212 Binary files /dev/null and b/preview-fall2024-info/map/images/map.psd differ diff --git a/preview-fall2024-info/map/images/map1.jpg b/preview-fall2024-info/map/images/map1.jpg new file mode 100644 index 000000000..5364d0512 Binary files /dev/null and b/preview-fall2024-info/map/images/map1.jpg differ diff --git a/preview-fall2024-info/map/images/map2.jpg b/preview-fall2024-info/map/images/map2.jpg new file mode 100644 index 000000000..9389fdb41 Binary files /dev/null and b/preview-fall2024-info/map/images/map2.jpg differ diff --git a/preview-fall2024-info/map/images/map2.psd b/preview-fall2024-info/map/images/map2.psd new file mode 100644 index 000000000..7a457c7e4 Binary files /dev/null and b/preview-fall2024-info/map/images/map2.psd differ diff --git a/preview-fall2024-info/map/images/map3.jpg b/preview-fall2024-info/map/images/map3.jpg new file mode 100644 index 000000000..65bae21f5 Binary files /dev/null and b/preview-fall2024-info/map/images/map3.jpg differ diff --git a/preview-fall2024-info/map/images/map4.jpg b/preview-fall2024-info/map/images/map4.jpg new file mode 100644 index 000000000..dec68e6dc Binary files /dev/null and b/preview-fall2024-info/map/images/map4.jpg differ diff --git a/preview-fall2024-info/map/images/map5.jpg b/preview-fall2024-info/map/images/map5.jpg new file mode 100644 index 000000000..9d3927126 Binary files /dev/null and b/preview-fall2024-info/map/images/map5.jpg differ diff --git a/preview-fall2024-info/map/images/map6.jpg b/preview-fall2024-info/map/images/map6.jpg new file mode 100644 index 000000000..29f1a1aff Binary files /dev/null and b/preview-fall2024-info/map/images/map6.jpg differ diff --git a/preview-fall2024-info/map/index.html b/preview-fall2024-info/map/index.html new file mode 100644 index 000000000..9c8745e83 --- /dev/null +++ b/preview-fall2024-info/map/index.html @@ -0,0 +1,649 @@ + + + + + + +CHTC User Map + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ CHTC User Map +

+ + + + + + + +
+ +map1 +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/map/scripts/jquery.imagemapster.js b/preview-fall2024-info/map/scripts/jquery.imagemapster.js new file mode 100644 index 000000000..fce098724 --- /dev/null +++ b/preview-fall2024-info/map/scripts/jquery.imagemapster.js @@ -0,0 +1,4559 @@ +/* ImageMapster + Version: 1.2.8 (12/30/2012) + +Copyright 2011-2012 James Treworgy + +http://www.outsharked.com/imagemapster +https://github.com/jamietre/ImageMapster + +A jQuery plugin to enhance image maps. + +*/ + +; + +/// LICENSE (MIT License) +/// +/// Permission is hereby granted, free of charge, to any person obtaining +/// a copy of this software and associated documentation files (the +/// "Software"), to deal in the Software without restriction, including +/// without limitation the rights to use, copy, modify, merge, publish, +/// distribute, sublicense, and/or sell copies of the Software, and to +/// permit persons to whom the Software is furnished to do so, subject to +/// the following conditions: +/// +/// The above copyright notice and this permission notice shall be +/// included in all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +/// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +/// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +/// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +/// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +/// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +/// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +/// +/// January 19, 2011 + +/** @license MIT License (c) copyright B Cavalier & J Hann */ + +/** +* when +* A lightweight CommonJS Promises/A and when() implementation +* +* when is part of the cujo.js family of libraries (http://cujojs.com/) +* +* Licensed under the MIT License at: +* http://www.opensource.org/licenses/mit-license.php +* +* @version 1.2.0 +*/ + +/*lint-ignore-start*/ + +(function (define) { + define(function () { + var freeze, reduceArray, slice, undef; + + // + // Public API + // + + when.defer = defer; + when.reject = reject; + when.isPromise = isPromise; + + when.all = all; + when.some = some; + when.any = any; + + when.map = map; + when.reduce = reduce; + + when.chain = chain; + + /** Object.freeze */ + freeze = Object.freeze || function (o) { return o; }; + + /** + * Trusted Promise constructor. A Promise created from this constructor is + * a trusted when.js promise. Any other duck-typed promise is considered + * untrusted. + * + * @constructor + */ + function Promise() { } + + Promise.prototype = freeze({ + always: function (alwaysback, progback) { + return this.then(alwaysback, alwaysback, progback); + }, + + otherwise: function (errback) { + return this.then(undef, errback); + } + }); + + /** + * Create an already-resolved promise for the supplied value + * @private + * + * @param value anything + * @return {Promise} + */ + function resolved(value) { + + var p = new Promise(); + + p.then = function (callback) { + var nextValue; + try { + if (callback) nextValue = callback(value); + return promise(nextValue === undef ? value : nextValue); + } catch (e) { + return rejected(e); + } + }; + + return freeze(p); + } + + /** + * Create an already-rejected {@link Promise} with the supplied + * rejection reason. + * @private + * + * @param reason rejection reason + * @return {Promise} + */ + function rejected(reason) { + + var p = new Promise(); + + p.then = function (callback, errback) { + var nextValue; + try { + if (errback) { + nextValue = errback(reason); + return promise(nextValue === undef ? reason : nextValue) + } + + return rejected(reason); + + } catch (e) { + return rejected(e); + } + }; + + return freeze(p); + } + + /** + * Returns a rejected promise for the supplied promiseOrValue. If + * promiseOrValue is a value, it will be the rejection value of the + * returned promise. If promiseOrValue is a promise, its + * completion value will be the rejected value of the returned promise + * + * @param promiseOrValue {*} the rejected value of the returned {@link Promise} + * + * @return {Promise} rejected {@link Promise} + */ + function reject(promiseOrValue) { + return when(promiseOrValue, function (value) { + return rejected(value); + }); + } + + /** + * Creates a new, CommonJS compliant, Deferred with fully isolated + * resolver and promise parts, either or both of which may be given out + * safely to consumers. + * The Deferred itself has the full API: resolve, reject, progress, and + * then. The resolver has resolve, reject, and progress. The promise + * only has then. + * + * @memberOf when + * @function + * + * @returns {Deferred} + */ + function defer() { + var deferred, promise, listeners, progressHandlers, _then, _progress, complete; + + listeners = []; + progressHandlers = []; + + /** + * Pre-resolution then() that adds the supplied callback, errback, and progback + * functions to the registered listeners + * + * @private + * + * @param [callback] {Function} resolution handler + * @param [errback] {Function} rejection handler + * @param [progback] {Function} progress handler + * + * @throws {Error} if any argument is not null, undefined, or a Function + */ + _then = function unresolvedThen(callback, errback, progback) { + var deferred = defer(); + + listeners.push(function (promise) { + promise.then(callback, errback) + .then(deferred.resolve, deferred.reject, deferred.progress); + }); + + progback && progressHandlers.push(progback); + + return deferred.promise; + }; + + /** + * Registers a handler for this {@link Deferred}'s {@link Promise}. Even though all arguments + * are optional, each argument that *is* supplied must be null, undefined, or a Function. + * Any other value will cause an Error to be thrown. + * + * @memberOf Promise + * + * @param [callback] {Function} resolution handler + * @param [errback] {Function} rejection handler + * @param [progback] {Function} progress handler + * + * @throws {Error} if any argument is not null, undefined, or a Function + */ + function then(callback, errback, progback) { + return _then(callback, errback, progback); + } + + /** + * Resolves this {@link Deferred}'s {@link Promise} with val as the + * resolution value. + * + * @memberOf Resolver + * + * @param val anything + */ + function resolve(val) { + complete(resolved(val)); + } + + /** + * Rejects this {@link Deferred}'s {@link Promise} with err as the + * reason. + * + * @memberOf Resolver + * + * @param err anything + */ + function reject(err) { + complete(rejected(err)); + } + + /** + * @private + * @param update + */ + _progress = function (update) { + var progress, i = 0; + while (progress = progressHandlers[i++]) progress(update); + }; + + /** + * Emits a progress update to all progress observers registered with + * this {@link Deferred}'s {@link Promise} + * + * @memberOf Resolver + * + * @param update anything + */ + function progress(update) { + _progress(update); + } + + /** + * Transition from pre-resolution state to post-resolution state, notifying + * all listeners of the resolution or rejection + * + * @private + * + * @param completed {Promise} the completed value of this deferred + */ + complete = function (completed) { + var listener, i = 0; + + // Replace _then with one that directly notifies with the result. + _then = completed.then; + + // Replace complete so that this Deferred can only be completed + // once. Also Replace _progress, so that subsequent attempts to issue + // progress throw. + complete = _progress = function alreadyCompleted() { + // TODO: Consider silently returning here so that parties who + // have a reference to the resolver cannot tell that the promise + // has been resolved using try/catch + throw new Error("already completed"); + }; + + // Free progressHandlers array since we'll never issue progress events + // for this promise again now that it's completed + progressHandlers = undef; + + // Notify listeners + // Traverse all listeners registered directly with this Deferred + + while (listener = listeners[i++]) { + listener(completed); + } + + listeners = []; + }; + + /** + * The full Deferred object, with both {@link Promise} and {@link Resolver} + * parts + * @class Deferred + * @name Deferred + */ + deferred = {}; + + // Promise and Resolver parts + // Freeze Promise and Resolver APIs + + promise = new Promise(); + promise.then = deferred.then = then; + + /** + * The {@link Promise} for this {@link Deferred} + * @memberOf Deferred + * @name promise + * @type {Promise} + */ + deferred.promise = freeze(promise); + + /** + * The {@link Resolver} for this {@link Deferred} + * @memberOf Deferred + * @name resolver + * @class Resolver + */ + deferred.resolver = freeze({ + resolve: (deferred.resolve = resolve), + reject: (deferred.reject = reject), + progress: (deferred.progress = progress) + }); + + return deferred; + } + + /** + * Determines if promiseOrValue is a promise or not. Uses the feature + * test from http://wiki.commonjs.org/wiki/Promises/A to determine if + * promiseOrValue is a promise. + * + * @param promiseOrValue anything + * + * @returns {Boolean} true if promiseOrValue is a {@link Promise} + */ + function isPromise(promiseOrValue) { + return promiseOrValue && typeof promiseOrValue.then === 'function'; + } + + /** + * Register an observer for a promise or immediate value. + * + * @function + * @name when + * @namespace + * + * @param promiseOrValue anything + * @param {Function} [callback] callback to be called when promiseOrValue is + * successfully resolved. If promiseOrValue is an immediate value, callback + * will be invoked immediately. + * @param {Function} [errback] callback to be called when promiseOrValue is + * rejected. + * @param {Function} [progressHandler] callback to be called when progress updates + * are issued for promiseOrValue. + * + * @returns {Promise} a new {@link Promise} that will complete with the return + * value of callback or errback or the completion value of promiseOrValue if + * callback and/or errback is not supplied. + */ + function when(promiseOrValue, callback, errback, progressHandler) { + // Get a promise for the input promiseOrValue + // See promise() + var trustedPromise = promise(promiseOrValue); + + // Register promise handlers + return trustedPromise.then(callback, errback, progressHandler); + } + + /** + * Returns promiseOrValue if promiseOrValue is a {@link Promise}, a new Promise if + * promiseOrValue is a foreign promise, or a new, already-resolved {@link Promise} + * whose resolution value is promiseOrValue if promiseOrValue is an immediate value. + * + * Note that this function is not safe to export since it will return its + * input when promiseOrValue is a {@link Promise} + * + * @private + * + * @param promiseOrValue anything + * + * @returns Guaranteed to return a trusted Promise. If promiseOrValue is a when.js {@link Promise} + * returns promiseOrValue, otherwise, returns a new, already-resolved, when.js {@link Promise} + * whose resolution value is: + * * the resolution value of promiseOrValue if it's a foreign promise, or + * * promiseOrValue if it's a value + */ + function promise(promiseOrValue) { + var promise, deferred; + + if (promiseOrValue instanceof Promise) { + // It's a when.js promise, so we trust it + promise = promiseOrValue; + + } else { + // It's not a when.js promise. Check to see if it's a foreign promise + // or a value. + + deferred = defer(); + if (isPromise(promiseOrValue)) { + // It's a compliant promise, but we don't know where it came from, + // so we don't trust its implementation entirely. Introduce a trusted + // middleman when.js promise + + // IMPORTANT: This is the only place when.js should ever call .then() on + // an untrusted promise. + promiseOrValue.then(deferred.resolve, deferred.reject, deferred.progress); + promise = deferred.promise; + + } else { + // It's a value, not a promise. Create an already-resolved promise + // for it. + deferred.resolve(promiseOrValue); + promise = deferred.promise; + } + } + + return promise; + } + + /** + * Return a promise that will resolve when howMany of the supplied promisesOrValues + * have resolved. The resolution value of the returned promise will be an array of + * length howMany containing the resolutions values of the triggering promisesOrValues. + * + * @memberOf when + * + * @param promisesOrValues {Array} array of anything, may contain a mix + * of {@link Promise}s and values + * @param howMany + * @param [callback] + * @param [errback] + * @param [progressHandler] + * + * @returns {Promise} + */ + function some(promisesOrValues, howMany, callback, errback, progressHandler) { + + checkCallbacks(2, arguments); + + return when(promisesOrValues, function (promisesOrValues) { + + var toResolve, results, ret, deferred, resolver, rejecter, handleProgress, len, i; + + len = promisesOrValues.length >>> 0; + + toResolve = Math.max(0, Math.min(howMany, len)); + results = []; + deferred = defer(); + ret = when(deferred, callback, errback, progressHandler); + + // Wrapper so that resolver can be replaced + function resolve(val) { + resolver(val); + } + + // Wrapper so that rejecter can be replaced + function reject(err) { + rejecter(err); + } + + // Wrapper so that progress can be replaced + function progress(update) { + handleProgress(update); + } + + function complete() { + resolver = rejecter = handleProgress = noop; + } + + // No items in the input, resolve immediately + if (!toResolve) { + deferred.resolve(results); + + } else { + // Resolver for promises. Captures the value and resolves + // the returned promise when toResolve reaches zero. + // Overwrites resolver var with a noop once promise has + // be resolved to cover case where n < promises.length + resolver = function (val) { + // This orders the values based on promise resolution order + // Another strategy would be to use the original position of + // the corresponding promise. + results.push(val); + + if (! --toResolve) { + complete(); + deferred.resolve(results); + } + }; + + // Rejecter for promises. Rejects returned promise + // immediately, and overwrites rejecter var with a noop + // once promise to cover case where n < promises.length. + // TODO: Consider rejecting only when N (or promises.length - N?) + // promises have been rejected instead of only one? + rejecter = function (err) { + complete(); + deferred.reject(err); + }; + + handleProgress = deferred.progress; + + // TODO: Replace while with forEach + for (i = 0; i < len; ++i) { + if (i in promisesOrValues) { + when(promisesOrValues[i], resolve, reject, progress); + } + } + } + + return ret; + }); + } + + /** + * Return a promise that will resolve only once all the supplied promisesOrValues + * have resolved. The resolution value of the returned promise will be an array + * containing the resolution values of each of the promisesOrValues. + * + * @memberOf when + * + * @param promisesOrValues {Array|Promise} array of anything, may contain a mix + * of {@link Promise}s and values + * @param [callback] {Function} + * @param [errback] {Function} + * @param [progressHandler] {Function} + * + * @returns {Promise} + */ + function all(promisesOrValues, callback, errback, progressHandler) { + + checkCallbacks(1, arguments); + + return when(promisesOrValues, function (promisesOrValues) { + return _reduce(promisesOrValues, reduceIntoArray, []); + }).then(callback, errback, progressHandler); + } + + function reduceIntoArray(current, val, i) { + current[i] = val; + return current; + } + + /** + * Return a promise that will resolve when any one of the supplied promisesOrValues + * has resolved. The resolution value of the returned promise will be the resolution + * value of the triggering promiseOrValue. + * + * @memberOf when + * + * @param promisesOrValues {Array|Promise} array of anything, may contain a mix + * of {@link Promise}s and values + * @param [callback] {Function} + * @param [errback] {Function} + * @param [progressHandler] {Function} + * + * @returns {Promise} + */ + function any(promisesOrValues, callback, errback, progressHandler) { + + function unwrapSingleResult(val) { + return callback ? callback(val[0]) : val[0]; + } + + return some(promisesOrValues, 1, unwrapSingleResult, errback, progressHandler); + } + + /** + * Traditional map function, similar to `Array.prototype.map()`, but allows + * input to contain {@link Promise}s and/or values, and mapFunc may return + * either a value or a {@link Promise} + * + * @memberOf when + * + * @param promise {Array|Promise} array of anything, may contain a mix + * of {@link Promise}s and values + * @param mapFunc {Function} mapping function mapFunc(value) which may return + * either a {@link Promise} or value + * + * @returns {Promise} a {@link Promise} that will resolve to an array containing + * the mapped output values. + */ + function map(promise, mapFunc) { + return when(promise, function (array) { + return _map(array, mapFunc); + }); + } + + /** + * Private map helper to map an array of promises + * @private + * + * @param promisesOrValues {Array} + * @param mapFunc {Function} + * @return {Promise} + */ + function _map(promisesOrValues, mapFunc) { + + var results, len, i; + + // Since we know the resulting length, we can preallocate the results + // array to avoid array expansions. + len = promisesOrValues.length >>> 0; + results = new Array(len); + + // Since mapFunc may be async, get all invocations of it into flight + // asap, and then use reduce() to collect all the results + for (i = 0; i < len; i++) { + if (i in promisesOrValues) + results[i] = when(promisesOrValues[i], mapFunc); + } + + // Could use all() here, but that would result in another array + // being allocated, i.e. map() would end up allocating 2 arrays + // of size len instead of just 1. Since all() uses reduce() + // anyway, avoid the additional allocation by calling reduce + // directly. + return _reduce(results, reduceIntoArray, results); + } + + /** + * Traditional reduce function, similar to `Array.prototype.reduce()`, but + * input may contain {@link Promise}s and/or values, and reduceFunc + * may return either a value or a {@link Promise}, *and* initialValue may + * be a {@link Promise} for the starting value. + * + * @memberOf when + * + * @param promise {Array|Promise} array of anything, may contain a mix + * of {@link Promise}s and values. May also be a {@link Promise} for + * an array. + * @param reduceFunc {Function} reduce function reduce(currentValue, nextValue, index, total), + * where total is the total number of items being reduced, and will be the same + * in each call to reduceFunc. + * @param initialValue starting value, or a {@link Promise} for the starting value + * + * @returns {Promise} that will resolve to the final reduced value + */ + function reduce(promise, reduceFunc, initialValue) { + var args = slice.call(arguments, 1); + return when(promise, function (array) { + return _reduce.apply(undef, [array].concat(args)); + }); + } + + /** + * Private reduce to reduce an array of promises + * @private + * + * @param promisesOrValues {Array} + * @param reduceFunc {Function} + * @param initialValue {*} + * @return {Promise} + */ + function _reduce(promisesOrValues, reduceFunc, initialValue) { + + var total, args; + + total = promisesOrValues.length; + + // Skip promisesOrValues, since it will be used as 'this' in the call + // to the actual reduce engine below. + + // Wrap the supplied reduceFunc with one that handles promises and then + // delegates to the supplied. + + args = [ + function (current, val, i) { + return when(current, function (c) { + return when(val, function (value) { + return reduceFunc(c, value, i, total); + }); + }); + } + ]; + + if (arguments.length > 2) args.push(initialValue); + + return reduceArray.apply(promisesOrValues, args); + } + + /** + * Ensure that resolution of promiseOrValue will complete resolver with the completion + * value of promiseOrValue, or instead with resolveValue if it is provided. + * + * @memberOf when + * + * @param promiseOrValue + * @param resolver {Resolver} + * @param [resolveValue] anything + * + * @returns {Promise} + */ + function chain(promiseOrValue, resolver, resolveValue) { + var useResolveValue = arguments.length > 2; + + return when(promiseOrValue, + function (val) { + if (useResolveValue) val = resolveValue; + resolver.resolve(val); + return val; + }, + function (e) { + resolver.reject(e); + return rejected(e); + }, + resolver.progress + ); + } + + // + // Utility functions + // + + /** + * Helper that checks arrayOfCallbacks to ensure that each element is either + * a function, or null or undefined. + * + * @private + * + * @param arrayOfCallbacks {Array} array to check + * @throws {Error} if any element of arrayOfCallbacks is something other than + * a Functions, null, or undefined. + */ + function checkCallbacks(start, arrayOfCallbacks) { + var arg, i = arrayOfCallbacks.length; + while (i > start) { + arg = arrayOfCallbacks[--i]; + if (arg != null && typeof arg != 'function') throw new Error('callback is not a function'); + } + } + + /** + * No-Op function used in method replacement + * @private + */ + function noop() { } + + slice = [].slice; + + // ES5 reduce implementation if native not available + // See: http://es5.github.com/#x15.4.4.21 as there are many + // specifics and edge cases. + reduceArray = [].reduce || + function (reduceFunc /*, initialValue */) { + // ES5 dictates that reduce.length === 1 + + // This implementation deviates from ES5 spec in the following ways: + // 1. It does not check if reduceFunc is a Callable + + var arr, args, reduced, len, i; + + i = 0; + arr = Object(this); + len = arr.length >>> 0; + args = arguments; + + // If no initialValue, use first item of array (we know length !== 0 here) + // and adjust i to start at second item + if (args.length <= 1) { + // Skip to the first real element in the array + for (; ; ) { + if (i in arr) { + reduced = arr[i++]; + break; + } + + // If we reached the end of the array without finding any real + // elements, it's a TypeError + if (++i >= len) { + throw new TypeError(); + } + } + } else { + // If initialValue provided, use it + reduced = args[1]; + } + + // Do the actual reduce + for (; i < len; ++i) { + // Skip holes + if (i in arr) + reduced = reduceFunc(reduced, arr[i], i, arr); + } + + return reduced; + }; + + return when; + }); +})(typeof define == 'function' + ? define + : function (factory) { + typeof module != 'undefined' + ? (module.exports = factory()) + : (jQuery.mapster_when = factory()); + } +// Boilerplate for AMD, Node, and browser global +); +/*lint-ignore-end*/ +/* ImageMapster core */ + +/*jslint laxbreak: true, evil: true, unparam: true */ + +/*global jQuery: true, Zepto: true */ + + +(function ($) { + // all public functions in $.mapster.impl are methods + $.fn.mapster = function (method) { + var m = $.mapster.impl; + if ($.isFunction(m[method])) { + return m[method].apply(this, Array.prototype.slice.call(arguments, 1)); + } else if (typeof method === 'object' || !method) { + return m.bind.apply(this, arguments); + } else { + $.error('Method ' + method + ' does not exist on jQuery.mapster'); + } + }; + + $.mapster = { + version: "1.2.8", + render_defaults: { + isSelectable: true, + isDeselectable: true, + fade: false, + fadeDuration: 150, + fill: true, + fillColor: '000000', + fillColorMask: 'FFFFFF', + fillOpacity: 0.7, + highlight: true, + stroke: false, + strokeColor: 'ff0000', + strokeOpacity: 1, + strokeWidth: 1, + includeKeys: '', + altImage: null, + altImageId: null, // used internally + altImages: {} + }, + defaults: { + clickNavigate: false, + wrapClass: null, + wrapCss: null, + onGetList: null, + sortList: false, + listenToList: false, + mapKey: '', + mapValue: '', + singleSelect: false, + listKey: 'value', + listSelectedAttribute: 'selected', + listSelectedClass: null, + onClick: null, + onMouseover: null, + onMouseout: null, + mouseoutDelay: 0, + onStateChange: null, + boundList: null, + onConfigured: null, + configTimeout: 30000, + noHrefIsMask: true, + scaleMap: true, + safeLoad: false, + areas: [] + }, + shared_defaults: { + render_highlight: { fade: true }, + render_select: { fade: false }, + staticState: null, + selected: null + }, + area_defaults: + { + includeKeys: '', + isMask: false + }, + canvas_style: { + position: 'absolute', + left: 0, + top: 0, + padding: 0, + border: 0 + }, + hasCanvas: null, + isTouch: null, + windowLoaded: false, + map_cache: [], + hooks: {}, + addHook: function(name,callback) { + this.hooks[name]=(this.hooks[name]||[]).push(callback); + }, + callHooks: function(name,context) { + $.each(this.hooks[name]||[],function(i,e) { + e.apply(context); + }); + }, + utils: { + when: $.mapster_when, + defer: $.mapster_when.defer, + + // extends the constructor, returns a new object prototype. Does not refer to the + // original constructor so is protected if the original object is altered. This way you + // can "extend" an object by replacing it with its subclass. + subclass: function(BaseClass, constr) { + var Subclass=function() { + var me=this, + args=Array.prototype.slice.call(arguments,0); + me.base = BaseClass.prototype; + me.base.init = function() { + BaseClass.prototype.constructor.apply(me,args); + }; + constr.apply(me,args); + }; + Subclass.prototype = new BaseClass(); + Subclass.prototype.constructor=Subclass; + return Subclass; + }, + asArray: function (obj) { + return obj.constructor === Array ? + obj : this.split(obj); + }, + // clean split: no padding or empty elements + split: function (text,cb) { + var i,el, arr = text.split(','); + for (i = 0; i < arr.length; i++) { + el = $.trim(arr[i]); + if (el==='') { + arr.splice(i,1); + } else { + arr[i] = cb ? cb(el):el; + } + } + return arr; + }, + // similar to $.extend but does not add properties (only updates), unless the + // first argument is an empty object, then all properties will be copied + updateProps: function (_target, _template) { + var onlyProps, + target = _target || {}, + template = $.isEmptyObject(target) ? _template : _target; + + //if (template) { + onlyProps = []; + $.each(template, function (prop) { + onlyProps.push(prop); + }); + //} + + $.each(Array.prototype.slice.call(arguments, 1), function (i, src) { + $.each(src || {}, function (prop) { + if (!onlyProps || $.inArray(prop, onlyProps) >= 0) { + var p = src[prop]; + + if ($.isPlainObject(p)) { + // not recursive - only copies 1 level of subobjects, and always merges + target[prop] = $.extend(target[prop] || {}, p); + } else if (p && p.constructor === Array) { + target[prop] = p.slice(0); + } else if (typeof p !== 'undefined') { + target[prop] = src[prop]; + } + + } + }); + }); + return target; + }, + isElement: function (o) { + return (typeof HTMLElement === "object" ? o instanceof HTMLElement : + o && typeof o === "object" && o.nodeType === 1 && typeof o.nodeName === "string"); + }, + // finds element of array or object with a property "prop" having value "val" + // if prop is not defined, then just looks for property with value "val" + indexOfProp: function (obj, prop, val) { + var result = obj.constructor === Array ? -1 : null; + $.each(obj, function (i, e) { + if (e && (prop ? e[prop] : e) === val) { + result = i; + return false; + } + }); + return result; + }, + // returns "obj" if true or false, or "def" if not true/false + boolOrDefault: function (obj, def) { + return this.isBool(obj) ? + obj : def || false; + }, + isBool: function (obj) { + return typeof obj === "boolean"; + }, + isUndef: function(obj) { + return typeof obj === "undefined"; + }, + // evaluates "obj", if function, calls it with args + // (todo - update this to handle variable lenght/more than one arg) + ifFunction: function (obj, that, args) { + if ($.isFunction(obj)) { + obj.call(that, args); + } + }, + size: function(image, raw) { + var u=$.mapster.utils; + return { + width: raw ? (image.width || image.naturalWidth) : u.imgWidth(image,true) , + height: raw ? (image.height || image.naturalHeight) : u.imgHeight(image,true), + complete: function() { return !!this.height && !!this.width;} + }; + }, + + // basic function to set the opacity of an element. + // this gets monkey patched by the graphics module when running in IE6-8 + + setOpacity: function (el, opacity) { + el.style.opacity = opacity; + }, + + // fade "el" from opacity "op" to "endOp" over a period of time "duration" + + fader: (function () { + var elements = {}, + lastKey = 0, + fade_func = function (el, op, endOp, duration) { + var index, + cbIntervals = duration/15, + obj, u = $.mapster.utils; + + if (typeof el === 'number') { + obj = elements[el]; + if (!obj) { + return; + } + } else { + index = u.indexOfProp(elements, null, el); + if (index) { + delete elements[index]; + } + elements[++lastKey] = obj = el; + el = lastKey; + } + + endOp = endOp || 1; + + op = (op + (endOp / cbIntervals) > endOp - 0.01) ? endOp : op + (endOp / cbIntervals); + + u.setOpacity(obj, op); + if (op < endOp) { + setTimeout(function () { + fade_func(el, op, endOp, duration); + }, 15); + } + }; + return fade_func; + } ()) + }, + getBoundList: function (opts, key_list) { + if (!opts.boundList) { + return null; + } + var index, key, result = $(), list = $.mapster.utils.split(key_list); + opts.boundList.each(function (i,e) { + for (index = 0; index < list.length; index++) { + key = list[index]; + if ($(e).is('[' + opts.listKey + '="' + key + '"]')) { + result = result.add(e); + } + } + }); + return result; + }, + // Causes changes to the bound list based on the user action (select or deselect) + // area: the jQuery area object + // returns the matching elements from the bound list for the first area passed (normally only one should be passed, but + // a list can be passed + setBoundListProperties: function (opts, target, selected) { + target.each(function (i,e) { + if (opts.listSelectedClass) { + if (selected) { + $(e).addClass(opts.listSelectedClass); + } else { + $(e).removeClass(opts.listSelectedClass); + } + } + if (opts.listSelectedAttribute) { + $(e).attr(opts.listSelectedAttribute, selected); + } + }); + }, + getMapDataIndex: function (obj) { + var img, id; + switch (obj.tagName && obj.tagName.toLowerCase()) { + case 'area': + id = $(obj).parent().attr('name'); + img = $("img[usemap='#" + id + "']")[0]; + break; + case 'img': + img = obj; + break; + } + return img ? + this.utils.indexOfProp(this.map_cache, 'image', img) : -1; + }, + getMapData: function (obj) { + var index = this.getMapDataIndex(obj.length ? obj[0]:obj); + if (index >= 0) { + return index >= 0 ? this.map_cache[index] : null; + } + }, + queueCommand: function (map_data, that, command, args) { + if (!map_data) { + return false; + } + if (!map_data.complete || map_data.currentAction) { + map_data.commands.push( + { + that: that, + command: command, + args: args + }); + return true; + } + return false; + }, + unload: function () { + this.impl.unload(); + this.utils = null; + this.impl = null; + $.fn.mapster = null; + $.mapster = null; + $('*').unbind(); + } + }; + + // Config for object prototypes + // first: use only first object (for things that should not apply to lists) + /// calls back one of two fuinctions, depending on whether an area was obtained. + // opts: { + // name: 'method name', + // key: 'key, + // args: 'args' + // + //} + // name: name of method (required) + // args: arguments to re-call with + // Iterates through all the objects passed, and determines whether it's an area or an image, and calls the appropriate + // callback for each. If anything is returned from that callback, the process is stopped and that data return. Otherwise, + // the object itself is returned. + + var m = $.mapster, + u = m.utils, + ap = Array.prototype; + + + // jQuery's width() and height() are broken on IE9 in some situations. This tries everything. + $.each(["width","height"],function(i,e) { + var capProp = e.substr(0,1).toUpperCase() + e.substr(1); + // when jqwidth parm is passed, it also checks the jQuery width()/height() property + // the issue is that jQUery width() can report a valid size before the image is loaded in some browsers + // without it, we can read zero even when image is loaded in other browsers if its not visible + // we must still check because stuff like adblock can temporarily block it + // what a goddamn headache + u["img"+capProp]=function(img,jqwidth) { + return (jqwidth ? $(img)[e]() : 0) || + img[e] || img["natural"+capProp] || img["client"+capProp] || img["offset"+capProp]; + }; + + }); + + m.Method = function (that, func_map, func_area, opts) { + var me = this; + me.name = opts.name; + me.output = that; + me.input = that; + me.first = opts.first || false; + me.args = opts.args ? ap.slice.call(opts.args, 0) : []; + me.key = opts.key; + me.func_map = func_map; + me.func_area = func_area; + //$.extend(me, opts); + me.name = opts.name; + me.allowAsync = opts.allowAsync || false; + }; + m.Method.prototype.go = function () { + var i, data, ar, len, result, src = this.input, + area_list = [], + me = this; + + len = src.length; + for (i = 0; i < len; i++) { + data = $.mapster.getMapData(src[i]); + if (data) { + if (!me.allowAsync && m.queueCommand(data, me.input, me.name, me.args)) { + if (this.first) { + result = ''; + } + continue; + } + + ar = data.getData(src[i].nodeName === 'AREA' ? src[i] : this.key); + if (ar) { + if ($.inArray(ar, area_list) < 0) { + area_list.push(ar); + } + } else { + result = this.func_map.apply(data, me.args); + } + if (this.first || typeof result !== 'undefined') { + break; + } + } + } + // if there were areas, call the area function for each unique group + $(area_list).each(function (i,e) { + result = me.func_area.apply(e, me.args); + }); + + if (typeof result !== 'undefined') { + return result; + } else { + return this.output; + } + }; + + + $.mapster.impl = (function () { + var me = {}, + removeMap, addMap; + + addMap = function (map_data) { + return m.map_cache.push(map_data) - 1; + }; + removeMap = function (map_data) { + m.map_cache.splice(map_data.index, 1); + for (var i = m.map_cache.length - 1; i >= this.index; i--) { + m.map_cache[i].index--; + } + }; + /// return current map_data for an image or area + + // merge new area data into existing area options. used for rebinding. + function merge_areas(map_data, areas) { + var ar, index, + map_areas = map_data.options.areas; + if (areas) { + $.each(areas, function (i, e) { + + // Issue #68 - ignore invalid data in areas array + + if (!e || !e.key) { + return; + } + + index = u.indexOfProp(map_areas, "key", e.key); + if (index >= 0) { + $.extend(map_areas[index], e); + } + else { + map_areas.push(e); + } + ar = map_data.getDataForKey(e.key); + if (ar) { + $.extend(ar.options, e); + } + }); + } + } + function merge_options(map_data, options) { + var temp_opts = u.updateProps({}, options); + delete temp_opts.areas; + + u.updateProps(map_data.options, temp_opts); + + merge_areas(map_data, options.areas); + // refresh the area_option template + u.updateProps(map_data.area_options, map_data.options); + } + // Most methods use the "Method" object which handles figuring out whether it's an image or area called and + // parsing key parameters. The constructor wants: + // this, the jQuery object + // a function that is called when an image was passed (with a this context of the MapData) + // a function that is called when an area was passed (with a this context of the AreaData) + // options: first = true means only the first member of a jQuery object is handled + // key = the key parameters passed + // defaultReturn: a value to return other than the jQuery object (if its not chainable) + // args: the arguments + // Returns a comma-separated list of user-selected areas. "staticState" areas are not considered selected for the purposes of this method. + me.get = function (key) { + var md = m.getMapData(this); + if (!(md && md.complete)) { + throw("Can't access data until binding complete."); + } + + return (new m.Method(this, + function () { + // map_data return + return this.getSelected(); + }, + function () { + return this.isSelected(); + }, + { name: 'get', + args: arguments, + key: key, + first: true, + allowAsync: true, + defaultReturn: '' + } + )).go(); + }; + me.data = function (key) { + return (new m.Method(this, + null, + function () { + return this; + }, + { name: 'data', + args: arguments, + key: key + } + )).go(); + }; + + + // Set or return highlight state. + // $(img).mapster('highlight') -- return highlighted area key, or null if none + // $(area).mapster('highlight') -- highlight an area + // $(img).mapster('highlight','area_key') -- highlight an area + // $(img).mapster('highlight',false) -- remove highlight + me.highlight = function (key) { + return (new m.Method(this, + function () { + if (key === false) { + this.ensureNoHighlight(); + } else { + var id = this.highlightId; + return id >= 0 ? this.data[id].key : null; + } + }, + function () { + this.highlight(); + }, + { name: 'highlight', + args: arguments, + key: key, + first: true + } + )).go(); + }; + // Return the primary keys for an area or group key. + // $(area).mapster('key') + // includes all keys (not just primary keys) + // $(area).mapster('key',true) + // $(img).mapster('key','group-key') + + // $(img).mapster('key','group-key', true) + me.keys = function(key,all) { + var keyList=[], + md = m.getMapData(this); + + if (!(md && md.complete)) { + throw("Can't access data until binding complete."); + } + + + function addUniqueKeys(ad) { + var areas,keys=[]; + if (!all) { + keys.push(ad.key); + } else { + areas=ad.areas(); + $.each(areas,function(i,e) { + keys=keys.concat(e.keys); + }); + } + $.each(keys,function(i,e) { + if ($.inArray(e,keyList)<0) { + keyList.push(e); + } + }); + } + + if (!(md && md.complete)) { + return ''; + } + if (typeof key === 'string') { + if (all) { + addUniqueKeys(md.getDataForKey(key)); + } else { + keyList=[md.getKeysForGroup(key)]; + } + } else { + all = key; + this.each(function(i,e) { + if (e.nodeName==='AREA') { + addUniqueKeys(md.getDataForArea(e)); + } + }); + } + return keyList.join(','); + + + }; + me.select = function () { + me.set.call(this, true); + }; + me.deselect = function () { + me.set.call(this, false); + }; + + /** + * Select or unselect areas. Areas can be identified by a single string key, a comma-separated list of keys, + * or an array of strings. + * + * + * @param {boolean} selected Determines whether areas are selected or deselected + * @param {string|string[]} key A string, comma-separated string, or array of strings indicating + * the areas to select or deselect + * @param {object} options Rendering options to apply when selecting an area + */ + + me.set = function (selected, key, options) { + var lastMap, map_data, opts=options, + key_list, area_list; // array of unique areas passed + + function setSelection(ar) { + if (ar) { + switch (selected) { + case true: + ar.select(opts); break; + case false: + ar.deselect(true); break; + default: + ar.toggle(opts); break; + } + } + } + function addArea(ar) { + if (ar && $.inArray(ar, area_list) < 0) { + area_list.push(ar); + key_list+=(key_list===''?'':',')+ar.key; + } + } + // Clean up after a group that applied to the same map + function finishSetForMap(map_data) { + $.each(area_list, function (i, el) { + setSelection(el); + }); + if (!selected) { + map_data.removeSelectionFinish(); + } + if (map_data.options.boundList) { + m.setBoundListProperties(map_data.options, m.getBoundList(map_data.options, key_list), selected); + } + } + + this.filter('img,area').each(function (i,e) { + var keys; + map_data = m.getMapData(e); + + if (map_data !== lastMap) { + if (lastMap) { + finishSetForMap(lastMap); + } + + area_list = []; + key_list=''; + } + + if (map_data) { + + keys = ''; + if (e.nodeName.toUpperCase()==='IMG') { + if (!m.queueCommand(map_data, $(e), 'set', [selected, key, opts])) { + if (key instanceof Array) { + if (key.length) { + keys = key.join(","); + } + } + else { + keys = key; + } + + if (keys) { + $.each(u.split(keys), function (i,key) { + addArea(map_data.getDataForKey(key.toString())); + lastMap = map_data; + }); + } + } + } else { + opts=key; + if (!m.queueCommand(map_data, $(e), 'set', [selected, opts])) { + addArea(map_data.getDataForArea(e)); + lastMap = map_data; + } + + } + } + }); + + if (map_data) { + finishSetForMap(map_data); + } + + + return this; + }; + me.unbind = function (preserveState) { + return (new m.Method(this, + function () { + this.clearEvents(); + this.clearMapData(preserveState); + removeMap(this); + }, + null, + { name: 'unbind', + args: arguments + } + )).go(); + }; + + + // refresh options and update selection information. + me.rebind = function (options) { + return (new m.Method(this, + function () { + var me=this; + + me.complete=false; + me.configureOptions(options); + me.bindImages().then(function() { + me.buildDataset(true); + me.complete=true; + }); + //this.redrawSelections(); + }, + null, + { + name: 'rebind', + args: arguments + } + )).go(); + }; + // get options. nothing or false to get, or "true" to get effective options (versus passed options) + me.get_options = function (key, effective) { + var eff = u.isBool(key) ? key : effective; // allow 2nd parm as "effective" when no key + return (new m.Method(this, + function () { + var opts = $.extend({}, this.options); + if (eff) { + opts.render_select = u.updateProps( + {}, + m.render_defaults, + opts, + opts.render_select); + + opts.render_highlight = u.updateProps( + {}, + m.render_defaults, + opts, + opts.render_highlight); + } + return opts; + }, + function () { + return eff ? this.effectiveOptions() : this.options; + }, + { + name: 'get_options', + args: arguments, + first: true, + allowAsync: true, + key: key + } + )).go(); + }; + + // set options - pass an object with options to set, + me.set_options = function (options) { + return (new m.Method(this, + function () { + merge_options(this, options); + }, + null, + { + name: 'set_options', + args: arguments + } + )).go(); + }; + me.unload = function () { + var i; + for (i = m.map_cache.length - 1; i >= 0; i--) { + if (m.map_cache[i]) { + me.unbind.call($(m.map_cache[i].image)); + } + } + me.graphics = null; + }; + + me.snapshot = function () { + return (new m.Method(this, + function () { + $.each(this.data, function (i, e) { + e.selected = false; + }); + + this.base_canvas = this.graphics.createVisibleCanvas(this); + $(this.image).before(this.base_canvas); + }, + null, + { name: 'snapshot' } + )).go(); + }; + + // do not queue this function + + me.state = function () { + var md, result = null; + $(this).each(function (i,e) { + if (e.nodeName === 'IMG') { + md = m.getMapData(e); + if (md) { + result = md.state(); + } + return false; + } + }); + return result; + }; + + me.bind = function (options) { + + return this.each(function (i,e) { + var img, map, usemap, md; + + // save ref to this image even if we can't access it yet. commands will be queued + img = $(e); + + md = m.getMapData(e); + + // if already bound completely, do a total rebind + + if (md) { + me.unbind.apply(img); + if (!md.complete) { + // will be queued + img.bind(); + return true; + } + md = null; + } + + // ensure it's a valid image + // jQuery bug with Opera, results in full-url#usemap being returned from jQuery's attr. + // So use raw getAttribute instead. + + usemap = this.getAttribute('usemap'); + map = usemap && $('map[name="' + usemap.substr(1) + '"]'); + if (!(img.is('img') && usemap && map.size() > 0)) { + return true; + } + + // sorry - your image must have border:0, things are too unpredictable otherwise. + img.css('border', 0); + + if (!md) { + md = new m.MapData(this, options); + + md.index = addMap(md); + md.map = map; + md.bindImages().then(function() { + md.initialize(); + }); + } + }); + }; + + me.init = function (useCanvas) { + var style, shapes; + + + // check for excanvas explicitly - don't be fooled + m.hasCanvas = (document.namespaces && document.namespaces.g_vml_) ? false : + $('')[0].getContext ? true : false; + + m.isTouch = 'ontouchstart' in document.documentElement; + + if (!(m.hasCanvas || document.namespaces)) { + $.fn.mapster = function () { + return this; + }; + return; + } + + $.extend(m.defaults, m.render_defaults,m.shared_defaults); + $.extend(m.area_defaults, m.render_defaults,m.shared_defaults); + + // for testing/debugging, use of canvas can be forced by initializing manually with "true" or "false" + if (u.isBool(useCanvas)) { + m.hasCanvas = useCanvas; + } + if ($.browser.msie && !m.hasCanvas && !document.namespaces.v) { + document.namespaces.add("v", "urn:schemas-microsoft-com:vml"); + style = document.createStyleSheet(); + shapes = ['shape', 'rect', 'oval', 'circ', 'fill', 'stroke', 'imagedata', 'group', 'textbox']; + $.each(shapes, + function (i, el) { + style.addRule('v\\:' + el, "behavior: url(#default#VML); antialias:true"); + }); + } + }; + me.test = function (obj) { + return eval(obj); + }; + return me; + } ()); + + $.mapster.impl.init(); +} (jQuery)); +/* graphics.js + Graphics object handles all rendering. +*/ +(function ($) { + var p, m=$.mapster, + u=m.utils; + + /** + * Implemenation to add each area in an AreaData object to the canvas + * @param {Graphics} graphics The target graphics object + * @param {AreaData} areaData The AreaData object (a collection of area elements and metadata) + * @param {object} options Rendering options to apply when rendering this group of areas + */ + function addShapeGroupImpl(graphics, areaData, options) { + var me = graphics, + md = me.map_data, + isMask = options.isMask; + + // first get area options. Then override fade for selecting, and finally merge in the + // "select" effect options. + + $.each(areaData.areas(), function (i,e) { + options.isMask = isMask || (e.nohref && md.options.noHrefIsMask); + me.addShape(e, options); + }); + + // it's faster just to manipulate the passed options isMask property and restore it, than to + // copy the object each time + + options.isMask=isMask; + + } + + + /** + * An object associated with a particular map_data instance to manage renderin. + * @param {MapData} map_data The MapData object bound to this instance + */ + + m.Graphics = function (map_data) { + //$(window).unload($.mapster.unload); + // create graphics functions for canvas and vml browsers. usage: + // 1) init with map_data, 2) call begin with canvas to be used (these are separate b/c may not require canvas to be specified + // 3) call add_shape_to for each shape or mask, 4) call render() to finish + + var me = this; + me.active = false; + me.canvas = null; + me.width = 0; + me.height = 0; + me.shapes = []; + me.masks = []; + me.map_data = map_data; + }; + + p = m.Graphics.prototype= { + constructor: m.Graphics, + + /** + * Initiate a graphics request for a canvas + * @param {Element} canvas The canvas element that is the target of this operation + * @param {string} [elementName] The name to assign to the element (VML only) + */ + + begin: function(canvas, elementName) { + var c = $(canvas); + + this.elementName = elementName; + this.canvas = canvas; + + this.width = c.width(); + this.height = c.height(); + this.shapes = []; + this.masks = []; + this.active = true; + + }, + + /** + * Add an area to be rendered to this canvas. + * @param {MapArea} mapArea The MapArea object to render + * @param {object} options An object containing any rendering options that should override the + * defaults for the area + */ + + addShape: function(mapArea, options) { + var addto = options.isMask ? this.masks : this.shapes; + addto.push({ mapArea: mapArea, options: options }); + }, + + /** + * Create a canvas that is sized and styled for the MapData object + * @param {MapData} mapData The MapData object that will receive this new canvas + * @return {Element} A canvas element + */ + + createVisibleCanvas: function (mapData) { + return $(this.createCanvasFor(mapData)) + .addClass('mapster_el') + .css(m.canvas_style)[0]; + }, + + /** + * Add a group of shapes from an AreaData object to the canvas + * + * @param {AreaData} areaData An AreaData object (a set of area elements) + * @param {string} mode The rendering mode, "select" or "highlight". This determines the target + * canvas and which default options to use. + * @param {striong} options Rendering options + */ + + addShapeGroup: function (areaData, mode,options) { + // render includeKeys first - because they could be masks + var me = this, + list, name, canvas, + map_data = this.map_data, + opts = areaData.effectiveRenderOptions(mode); + + if (options) { + $.extend(opts,options); + } + + if (mode === 'select') { + name = "static_" + areaData.areaId.toString(); + canvas = map_data.base_canvas; + } else { + canvas = map_data.overlay_canvas; + } + + me.begin(canvas, name); + + if (opts.includeKeys) { + list = u.split(opts.includeKeys); + $.each(list, function (i,e) { + var areaData = map_data.getDataForKey(e.toString()); + addShapeGroupImpl(me,areaData, areaData.effectiveRenderOptions(mode)); + }); + } + + addShapeGroupImpl(me,areaData, opts); + me.render(); + if (opts.fade) { + + // fading requires special handling for IE. We must access the fill elements directly. The fader also has to deal with + // the "opacity" attribute (not css) + + u.fader(m.hasCanvas ? + canvas : + $(canvas).find('._fill').not('.mapster_mask'), + 0, + m.hasCanvas ? + 1 : + opts.fillOpacity, + opts.fadeDuration); + + } + + } + }; + + // configure remaining prototype methods for ie or canvas-supporting browser + + if (m.hasCanvas) { + + /** + * Convert a hex value to decimal + * @param {string} hex A hexadecimal string + * @return {int} Integer represenation of the hex string + */ + + p.hex_to_decimal = function (hex) { + return Math.max(0, Math.min(parseInt(hex, 16), 255)); + }; + + p.css3color = function (color, opacity) { + return 'rgba(' + this.hex_to_decimal(color.substr(0, 2)) + ',' + + this.hex_to_decimal(color.substr(2, 2)) + ',' + + this.hex_to_decimal(color.substr(4, 2)) + ',' + opacity + ')'; + }; + + p.renderShape = function (context, mapArea, offset) { + var i, + c = mapArea.coords(null,offset); + + switch (mapArea.shape) { + case 'rect': + context.rect(c[0], c[1], c[2] - c[0], c[3] - c[1]); + break; + case 'poly': + context.moveTo(c[0], c[1]); + + for (i = 2; i < mapArea.length; i += 2) { + context.lineTo(c[i], c[i + 1]); + } + context.lineTo(c[0], c[1]); + break; + case 'circ': + case 'circle': + context.arc(c[0], c[1], c[2], 0, Math.PI * 2, false); + break; + } + }; + + p.addAltImage = function (context, image, mapArea, options) { + context.beginPath(); + + this.renderShape(context, mapArea); + context.closePath(); + context.clip(); + + context.globalAlpha = options.altImageOpacity || options.fillOpacity; + + context.drawImage(image, 0, 0, mapArea.owner.scaleInfo.width, mapArea.owner.scaleInfo.height); + }; + + p.render = function () { + // firefox 6.0 context.save() seems to be broken. to work around, we have to draw the contents on one temp canvas, + // the mask on another, and merge everything. ugh. fixed in 1.2.2. unfortunately this is a lot more code for masks, + // but no other way around it that i can see. + + var maskCanvas, maskContext, + me = this, + md = me.map_data, + hasMasks = me.masks.length, + shapeCanvas = me.createCanvasFor(md), + shapeContext = shapeCanvas.getContext('2d'), + context = me.canvas.getContext('2d'); + + if (hasMasks) { + maskCanvas = me.createCanvasFor(md); + maskContext = maskCanvas.getContext('2d'); + maskContext.clearRect(0, 0, maskCanvas.width, maskCanvas.height); + + $.each(me.masks, function (i,e) { + maskContext.save(); + maskContext.beginPath(); + me.renderShape(maskContext, e.mapArea); + maskContext.closePath(); + maskContext.clip(); + maskContext.lineWidth = 0; + maskContext.fillStyle = '#000'; + maskContext.fill(); + maskContext.restore(); + }); + + } + + $.each(me.shapes, function (i,s) { + shapeContext.save(); + if (s.options.fill) { + if (s.options.altImageId) { + me.addAltImage(shapeContext, md.images[s.options.altImageId], s.mapArea, s.options); + } else { + shapeContext.beginPath(); + me.renderShape(shapeContext, s.mapArea); + shapeContext.closePath(); + //shapeContext.clip(); + shapeContext.fillStyle = me.css3color(s.options.fillColor, s.options.fillOpacity); + shapeContext.fill(); + } + } + shapeContext.restore(); + }); + + + // render strokes at end since masks get stroked too + + $.each(me.shapes.concat(me.masks), function (i,s) { + var offset = s.options.strokeWidth === 1 ? 0.5 : 0; + // offset applies only when stroke width is 1 and stroke would render between pixels. + + if (s.options.stroke) { + shapeContext.save(); + shapeContext.strokeStyle = me.css3color(s.options.strokeColor, s.options.strokeOpacity); + shapeContext.lineWidth = s.options.strokeWidth; + + shapeContext.beginPath(); + + me.renderShape(shapeContext, s.mapArea, offset); + shapeContext.closePath(); + shapeContext.stroke(); + shapeContext.restore(); + } + }); + + if (hasMasks) { + // render the new shapes against the mask + + maskContext.globalCompositeOperation = "source-out"; + maskContext.drawImage(shapeCanvas, 0, 0); + + // flatten into the main canvas + context.drawImage(maskCanvas, 0, 0); + } else { + context.drawImage(shapeCanvas, 0, 0); + } + + me.active = false; + return me.canvas; + }; + + // create a canvas mimicing dimensions of an existing element + p.createCanvasFor = function (md) { + return $('')[0]; + }; + p.clearHighlight = function () { + var c = this.map_data.overlay_canvas; + c.getContext('2d').clearRect(0, 0, c.width, c.height); + }; + p.removeSelections = function () { + + }; + // Draw all items from selected_list to a new canvas, then swap with the old one. This is used to delete items when using canvases. + p.refreshSelections = function () { + var canvas_temp, map_data = this.map_data; + // draw new base canvas, then swap with the old one to avoid flickering + canvas_temp = map_data.base_canvas; + + map_data.base_canvas = this.createVisibleCanvas(map_data); + $(map_data.base_canvas).hide(); + $(canvas_temp).before(map_data.base_canvas); + + map_data.redrawSelections(); + + $(map_data.base_canvas).show(); + $(canvas_temp).remove(); + }; + + } else { + + /** + * Set the opacity of the element. This is an IE<8 specific function for handling VML. + * When using VML we must override the "setOpacity" utility function (monkey patch ourselves). + * jQuery does not deal with opacity correctly for VML elements. This deals with that. + * + * @param {Element} el The DOM element + * @param {double} opacity A value between 0 and 1 inclusive. + */ + + u.setOpacity = function(el,opacity) { + $(el).each(function(i,e) { + if (typeof e.opacity !=='undefined') { + e.opacity=opacity; + } else { + $(e).css("opacity",opacity); + } + }); + }; + + p.renderShape = function (mapArea, options, cssclass) { + var me = this, fill,stroke, e, t_fill, el_name, el_class, template, c = mapArea.coords(); + el_name = me.elementName ? 'name="' + me.elementName + '" ' : ''; + el_class = cssclass ? 'class="' + cssclass + '" ' : ''; + + t_fill = ''; + + + stroke = options.stroke ? + ' strokeweight=' + options.strokeWidth + ' stroked="t" strokecolor="#' + + options.strokeColor + '"' : + ' stroked="f"'; + + fill = options.fill ? + ' filled="t"' : + ' filled="f"'; + + switch (mapArea.shape) { + case 'rect': + template = '' + t_fill + ''; + break; + case 'poly': + template = '' + t_fill + ''; + break; + case 'circ': + case 'circle': + template = '' + t_fill + ''; + break; + } + e = $(template); + $(me.canvas).append(e); + + return e; + }; + p.render = function () { + var opts, me = this; + + $.each(this.shapes, function (i,e) { + me.renderShape(e.mapArea, e.options); + }); + + if (this.masks.length) { + $.each(this.masks, function (i,e) { + opts = u.updateProps({}, + e.options, { + fillOpacity: 1, + fillColor: e.options.fillColorMask + }); + me.renderShape(e.mapArea, opts, 'mapster_mask'); + }); + } + + this.active = false; + return this.canvas; + }; + + p.createCanvasFor = function (md) { + var w = md.scaleInfo.width, + h = md.scaleInfo.height; + return $('')[0]; + }; + + p.clearHighlight = function () { + $(this.map_data.overlay_canvas).children().remove(); + }; + // remove single or all selections + p.removeSelections = function (area_id) { + if (area_id >= 0) { + $(this.map_data.base_canvas).find('[name="static_' + area_id.toString() + '"]').remove(); + } + else { + $(this.map_data.base_canvas).children().remove(); + } + }; + p.refreshSelections = function () { + return null; + }; + + } + +} (jQuery)); +/* mapimage.js + the MapImage object, repesents an instance of a single bound imagemap +*/ + +(function ($) { + + var m = $.mapster, + u = m.utils, + ap=[]; + /** + * An object encapsulating all the images used by a MapData. + */ + + m.MapImages = function(owner) { + this.owner = owner; + this.clear(); + }; + + + m.MapImages.prototype = { + constructor: m.MapImages, + + /* interface to make this array-like */ + + slice: function() { + return ap.slice.apply(this,arguments); + }, + splice: function() { + ap.slice.apply(this.status,arguments); + var result= ap.slice.apply(this,arguments); + return result; + }, + + /** + * a boolean value indicates whether all images are done loading + * @return {bool} true when all are done + */ + complete: function() { + return $.inArray(false, this.status) < 0; + }, + + /** + * Save an image in the images array and return its index + * @param {Image} image An Image object + * @return {int} the index of the image + */ + + _add: function(image) { + var index = ap.push.call(this,image)-1; + this.status[index] = false; + return index; + }, + + /** + * Return the index of an Image within the images array + * @param {Image} img An Image + * @return {int} the index within the array, or -1 if it was not found + */ + + indexOf: function(image) { + return $.inArray(image, this); + }, + + /** + * Clear this object and reset it to its initial state after binding. + */ + + clear: function() { + var me=this; + + if (me.ids && me.ids.length>0) { + $.each(me.ids,function(i,e) { + delete me[e]; + }); + } + + /** + * A list of the cross-reference IDs bound to this object + * @type {string[]} + */ + + me.ids=[]; + + /** + * Length property for array-like behavior, set to zero when initializing. Array prototype + * methods will update it after that. + * + * @type {int} + */ + + me.length=0; + + /** + * the loaded status of the corresponding image + * @type {boolean[]} + */ + + me.status=[]; + + + // actually erase the images + + me.splice(0); + + }, + + /** + * Bind an image to the map and add it to the queue to be loaded; return an ID that + * can be used to reference the + * + * @param {Image|string} image An Image object or a URL to an image + * @param {string} [id] An id to refer to this image + * @returns {int} an ID referencing the index of the image object in + * map_data.images + */ + + add: function(image,id) { + var index,src,me = this; + + if (!image) { return; } + + if (typeof image === 'string') { + src = image; + image = me[src]; + if (typeof image==='object') { + return me.indexOf(image); + } + + image = $('') + .addClass('mapster_el') + .hide(); + + index=me._add(image[0]); + + image + .bind('load',function(e) { + me.imageLoaded.call(me,e); + }) + .bind('error',function(e) { + me.imageLoadError.call(me,e); + }); + + image.attr('src', src); + } else { + + // use attr because we want the actual source, not the resolved path the browser will return directly calling image.src + + index=me._add($(image)[0]); + } + if (id) { + if (this[id]) { + throw(id+" is already used or is not available as an altImage alias."); + } + me.ids.push(id); + me[id]=me[index]; + } + return index; + }, + + /** + * Bind the images in this object, + * @param {boolean} retry when true, indicates that the function is calling itself after failure + * @return {Promise} a promise that resolves when the images have finished loading + */ + + bind: function(retry) { + var me = this, + promise, + triesLeft = me.owner.options.configTimeout / 200, + + /* A recursive function to continue checking that the images have been + loaded until a timeout has elapsed */ + + check=function() { + var i; + + // refresh status of images + + i=me.length; + + while (i-->0) { + if (!me.isLoaded(i)) { + break; + } + } + + // check to see if every image has already been loaded + + if (me.complete()) { + me.resolve(); + } else { + // to account for failure of onLoad to fire in rare situations + if (triesLeft-- > 0) { + me.imgTimeout=window.setTimeout(function() { + check.call(me,true); + }, 50); + } else { + me.imageLoadError.call(me); + } + } + + }; + + promise = me.deferred=u.defer(); + + check(); + return promise; + }, + + resolve: function() { + var me=this, + resolver=me.deferred; + + if (resolver) { + // Make a copy of the resolver before calling & removing it to ensure + // it is not called twice + me.deferred=null; + resolver.resolve(); + } + }, + + /** + * Event handler for image onload + * @param {object} e jQuery event data + */ + + imageLoaded: function(e) { + var me=this, + index = me.indexOf(e.target); + + if (index>=0) { + + me.status[index] = true; + if ($.inArray(false, me.status) < 0) { + me.resolve(); + } + } + }, + + /** + * Event handler for onload error + * @param {object} e jQuery event data + */ + + imageLoadError: function(e) { + clearTimeout(this.imgTimeout); + this.triesLeft=0; + var err = e ? 'The image ' + e.target.src + ' failed to load.' : + 'The images never seemed to finish loading. You may just need to increase the configTimeout if images could take a long time to load.'; + throw err; + }, + /** + * Test if the image at specificed index has finished loading + * @param {int} index The image index + * @return {boolean} true if loaded, false if not + */ + + isLoaded: function(index) { + var img, + me=this, + status=me.status; + + if (status[index]) { return true; } + img = me[index]; + + if (typeof img.complete !== 'undefined') { + status[index]=img.complete; + } else { + status[index]=!!u.imgWidth(img); + } + // if complete passes, the image is loaded, but may STILL not be available because of stuff like adblock. + // make sure it is. + + return status[index]; + } + }; + } (jQuery)); +/* mapdata.js + the MapData object, repesents an instance of a single bound imagemap +*/ + + +(function ($) { + + var m = $.mapster, + u = m.utils; + + /** + * Set default values for MapData object properties + * @param {MapData} me The MapData object + */ + + function initializeDefaults(me) { + $.extend(me,{ + complete: false, // (bool) when configuration is complete + map: null, // ($) the image map + base_canvas: null, // (canvas|var) where selections are rendered + overlay_canvas: null, // (canvas|var) where highlights are rendered + commands: [], // {} commands that were run before configuration was completed (b/c images weren't loaded) + data: [], // MapData[] area groups + mapAreas: [], // MapArea[] list. AreaData entities contain refs to this array, so options are stored with each. + _xref: {}, // (int) xref of mapKeys to data[] + highlightId: -1, // (int) the currently highlighted element. + currentAreaId: -1, + _tooltip_events: [], // {} info on events we bound to a tooltip container, so we can properly unbind them + scaleInfo: null, // {} info about the image size, scaling, defaults + index: -1, // index of this in map_cache - so we have an ID to use for wraper div + activeAreaEvent: null + }); + } + + /** + * Return an array of all image-containing options from an options object; + * that is, containers that may have an "altImage" property + * + * @param {object} obj An options object + * @return {object[]} An array of objects + */ + function getOptionImages(obj) { + return [obj, obj.render_highlight, obj.render_select]; + } + + /** + * Parse all the altImage references, adding them to the library so they can be preloaded + * and aliased. + * + * @param {MapData} me The MapData object on which to operate + */ + function configureAltImages(me) + { + var opts = me.options, + mi = me.images; + + // add alt images + + if ($.mapster.hasCanvas) { + // map altImage library first + + $.each(opts.altImages || {}, function(i,e) { + mi.add(e,i); + }); + + // now find everything else + + $.each([opts].concat(opts.areas),function(i,e) { + $.each(getOptionImages(e),function(i2,e2) { + if (e2 && e2.altImage) { + e2.altImageId=mi.add(e2.altImage); + } + }); + }); + } + + // set area_options + me.area_options = u.updateProps({}, // default options for any MapArea + m.area_defaults, + opts); + } + + /** + * Queue a mouse move action based on current delay settings + * (helper for mouseover/mouseout handlers) + * + * @param {MapData} me The MapData context + * @param {number} delay The number of milliseconds to delay the action + * @param {AreaData} area AreaData affected + * @param {Deferred} deferred A deferred object to return (instead of a new one) + * @return {Promise} A promise that resolves when the action is completed + */ + function queueMouseEvent(me,delay,area, deferred) { + + deferred = deferred || u.when.defer(); + + function cbFinal(areaId) { + if (me.currentAreaId!==areaId && me.highlightId>=0) { + deferred.resolve(); + } + } + if (me.activeAreaEvent) { + window.clearTimeout(me.activeAreaEvent); + me.activeAreaEvent=0; + } + if (delay<0) { + return; + } + + if (area.owner.currentAction || delay) { + me.activeAreaEvent = window.setTimeout((function() { + return function() { + queueMouseEvent(me,0,area,deferred); + }; + }(area)), + delay || 100); + } else { + cbFinal(area.areaId); + } + return deferred; + } + + /** + * Mousedown event. This is captured only to prevent browser from drawing an outline around an + * area when it's clicked. + * + * @param {EventData} e jQuery event data + */ + + function mousedown(e) { + if (!$.mapster.hasCanvas) { + this.blur(); + } + e.preventDefault(); + } + + /** + * Mouseover event. Handle highlight rendering and client callback on mouseover + * + * @param {MapData} me The MapData context + * @param {EventData} e jQuery event data + * @return {[type]} [description] + */ + + function mouseover(me,e) { + var arData = me.getAllDataForArea(this), + ar=arData.length ? arData[0] : null; + + // mouseover events are ignored entirely while resizing, though we do care about mouseout events + // and must queue the action to keep things clean. + + if (!ar || ar.isNotRendered() || ar.owner.currentAction) { + return; + } + + if (me.currentAreaId === ar.areaId) { + return; + } + if (me.highlightId !== ar.areaId) { + me.clearEffects(); + + ar.highlight(); + + if (me.options.showToolTip) { + $.each(arData,function(i,e) { + if (e.effectiveOptions().toolTip) { + e.showToolTip(); + } + }); + } + } + + me.currentAreaId = ar.areaId; + + if ($.isFunction(me.options.onMouseover)) { + me.options.onMouseover.call(this, + { + e: e, + options:ar.effectiveOptions(), + key: ar.key, + selected: ar.isSelected() + }); + } + } + + /** + * Mouseout event. + * + * @param {MapData} me The MapData context + * @param {EventData} e jQuery event data + * @return {[type]} [description] + */ + + function mouseout(me,e) { + var newArea, + ar = me.getDataForArea(this), + opts = me.options; + + + if (me.currentAreaId<0 || !ar) { + return; + } + + newArea=me.getDataForArea(e.relatedTarget); + + if (newArea === ar) { + return; + } + + me.currentAreaId = -1; + ar.area=null; + + queueMouseEvent(me,opts.mouseoutDelay,ar) + .then(me.clearEffects); + + if ($.isFunction(opts.onMouseout)) { + opts.onMouseout.call(this, + { + e: e, + options: opts, + key: ar.key, + selected: ar.isSelected() + }); + } + + } + + /** + * Clear any active tooltip or highlight + * + * @param {MapData} me The MapData context + * @param {EventData} e jQuery event data + * @return {[type]} [description] + */ + + function clearEffects(me) { + var opts = me.options; + + me.ensureNoHighlight(); + + if (opts.toolTipClose + && $.inArray('area-mouseout', opts.toolTipClose) >= 0 + && me.activeToolTip) + { + me.clearToolTip(); + } + } + + /** + * Mouse click event handler + * + * @param {MapData} me The MapData context + * @param {EventData} e jQuery event data + * @return {[type]} [description] + */ + + function click(me,e) { + var selected, list, list_target, newSelectionState, canChangeState, cbResult, + that = this, + ar = me.getDataForArea(this), + opts = me.options; + + function clickArea(ar) { + var areaOpts,target; + canChangeState = (ar.isSelectable() && + (ar.isDeselectable() || !ar.isSelected())); + + if (canChangeState) { + newSelectionState = !ar.isSelected(); + } else { + newSelectionState = ar.isSelected(); + } + + list_target = m.getBoundList(opts, ar.key); + + if ($.isFunction(opts.onClick)) + { + cbResult= opts.onClick.call(that, + { + e: e, + listTarget: list_target, + key: ar.key, + selected: newSelectionState + }); + + if (u.isBool(cbResult)) { + if (!cbResult) { + return false; + } + target = $(ar.area).attr('href'); + if (target!=='#') { + window.location.href=target; + return false; + } + } + } + + if (canChangeState) { + selected = ar.toggle(); + } + + if (opts.boundList && opts.boundList.length > 0) { + m.setBoundListProperties(opts, list_target, ar.isSelected()); + } + + areaOpts = ar.effectiveOptions(); + if (areaOpts.includeKeys) { + list = u.split(areaOpts.includeKeys); + $.each(list, function (i, e) { + var ar = me.getDataForKey(e.toString()); + if (!ar.options.isMask) { + clickArea(ar); + } + }); + } + } + + mousedown.call(this,e); + + if (opts.clickNavigate && ar.href) { + window.location.href=ar.href; + return; + } + + if (ar && !ar.owner.currentAction) { + opts = me.options; + clickArea(ar); + } + } + + /** + * Prototype for a MapData object, representing an ImageMapster bound object + * @param {Element} image an IMG element + * @param {object} options ImageMapster binding options + */ + m.MapData = function (image, options) + { + var me = this; + + // (Image) main map image + + me.image = image; + + me.images = new m.MapImages(me); + me.graphics = new m.Graphics(me); + + // save the initial style of the image for unbinding. This is problematic, chrome + // duplicates styles when assigning, and cssText is apparently not universally supported. + // Need to do something more robust to make unbinding work universally. + + me.imgCssText = image.style.cssText || null; + + initializeDefaults(me); + + me.configureOptions(options); + + // create context-bound event handlers from our private functions + + me.mouseover = function(e) { mouseover.call(this,me,e); }; + me.mouseout = function(e) { mouseout.call(this,me,e); }; + me.click = function(e) { click.call(this,me,e); }; + me.clearEffects = function(e) { clearEffects.call(this,me,e); }; + }; + + m.MapData.prototype = { + constructor: m.MapData, + + /** + * Set target.options from defaults + options + * @param {[type]} target The target + * @param {[type]} options The options to merge + */ + + configureOptions: function(options) { + this.options= u.updateProps({}, m.defaults, options); + }, + + /** + * Ensure all images are loaded + * @return {Promise} A promise that resolves when the images have finished loading (or fail) + */ + + bindImages: function() { + var me=this, + mi = me.images; + + // reset the images if this is a rebind + + if (mi.length>2) { + mi.splice(2); + } else if (mi.length===0) { + + // add the actual main image + mi.add(me.image); + // will create a duplicate of the main image, we need this to get raw size info + mi.add(me.image.src); + } + + configureAltImages(me); + + return me.images.bind(); + }, + + /** + * Test whether an async action is currently in progress + * @return {Boolean} true or false indicating state + */ + + isActive: function() { + return !this.complete || this.currentAction; + }, + + /** + * Return an object indicating the various states. This isn't really used by + * production code. + * + * @return {object} An object with properties for various states + */ + + state: function () { + return { + complete: this.complete, + resizing: this.currentAction==='resizing', + zoomed: this.zoomed, + zoomedArea: this.zoomedArea, + scaleInfo: this.scaleInfo + }; + }, + + /** + * Get a unique ID for the wrapper of this imagemapster + * @return {string} A string that is unique to this image + */ + + wrapId: function () { + return 'mapster_wrap_' + this.index; + }, + _idFromKey: function (key) { + return typeof key === "string" && this._xref.hasOwnProperty(key) ? + this._xref[key] : -1; + }, + + /** + * Return a comma-separated string of all selected keys + * @return {string} CSV of all keys that are currently selected + */ + + getSelected: function () { + var result = ''; + $.each(this.data, function (i,e) { + if (e.isSelected()) { + result += (result ? ',' : '') + this.key; + } + }); + return result; + }, + + /** + * Get an array of MapAreas associated with a specific AREA based on the keys for that area + * @param {Element} area An HTML AREA + * @param {number} atMost A number limiting the number of areas to be returned (typically 1 or 0 for no limit) + * @return {MapArea[]} Array of MapArea objects + */ + + getAllDataForArea:function (area,atMost) { + var i,ar, result, + me=this, + key = $(area).filter('area').attr(me.options.mapKey); + + if (key) { + result=[]; + key = u.split(key); + + for (i=0;i<(atMost || key.length);i++) { + ar = me.data[me._idFromKey(key[i])]; + ar.area=area.length ? area[0]:area; + // set the actual area moused over/selected + // TODO: this is a brittle model for capturing which specific area - if this method was not used, + // ar.area could have old data. fix this. + result.push(ar); + } + } + + return result; + }, + getDataForArea: function(area) { + var ar=this.getAllDataForArea(area,1); + return ar ? ar[0] || null : null; + }, + getDataForKey: function (key) { + return this.data[this._idFromKey(key)]; + }, + + /** + * Get the primary keys associated with an area group. + * If this is a primary key, it will be returned. + * + * @param {string key An area key + * @return {string} A CSV of area keys + */ + + getKeysForGroup: function(key) { + var ar=this.getDataForKey(key); + + return !ar ? '': + ar.isPrimary ? + ar.key : + this.getPrimaryKeysForMapAreas(ar.areas()).join(','); + }, + + /** + * given an array of MapArea object, return an array of its unique primary keys + * @param {MapArea[]} areas The areas to analyze + * @return {string[]} An array of unique primary keys + */ + + getPrimaryKeysForMapAreas: function(areas) + { + var keys=[]; + $.each(areas,function(i,e) { + if ($.inArray(e.keys[0],keys)<0) { + keys.push(e.keys[0]); + } + }); + return keys; + }, + getData: function (obj) { + if (typeof obj === 'string') { + return this.getDataForKey(obj); + } else if (obj && obj.mapster || u.isElement(obj)) { + return this.getDataForArea(obj); + } else { + return null; + } + }, + // remove highlight if present, raise event + ensureNoHighlight: function () { + var ar; + if (this.highlightId >= 0) { + this.graphics.clearHighlight(); + ar = this.data[this.highlightId]; + ar.changeState('highlight', false); + this.setHighlightId(-1); + } + }, + setHighlightId: function(id) { + this.highlightId = id; + }, + + /** + * Clear all active selections on this map + */ + + clearSelections: function () { + $.each(this.data, function (i,e) { + if (e.selected) { + e.deselect(true); + } + }); + this.removeSelectionFinish(); + + }, + + /** + * Set area options from an array of option data. + * + * @param {object[]} areas An array of objects containing area-specific options + */ + + setAreaOptions: function (areas) { + var i, area_options, ar; + areas = areas || []; + + // refer by: map_data.options[map_data.data[x].area_option_id] + + for (i = areas.length - 1; i >= 0; i--) { + area_options = areas[i]; + if (area_options) { + ar = this.getDataForKey(area_options.key); + if (ar) { + u.updateProps(ar.options, area_options); + + // TODO: will not deselect areas that were previously selected, so this only works + // for an initial bind. + + if (u.isBool(area_options.selected)) { + ar.selected = area_options.selected; + } + } + } + } + }, + // keys: a comma-separated list + drawSelections: function (keys) { + var i, key_arr = u.asArray(keys); + + for (i = key_arr.length - 1; i >= 0; i--) { + this.data[key_arr[i]].drawSelection(); + } + }, + redrawSelections: function () { + $.each(this.data, function (i, e) { + if (e.isSelectedOrStatic()) { + e.drawSelection(); + } + }); + + }, + ///called when images are done loading + initialize: function () { + var imgCopy, base_canvas, overlay_canvas, wrap, parentId, css, i,size, + img,sort_func, sorted_list, scale, + me = this, + opts = me.options; + + if (me.complete) { + return; + } + + img = $(me.image); + + parentId = img.parent().attr('id'); + + // create a div wrapper only if there's not already a wrapper, otherwise, own it + + if (parentId && parentId.length >= 12 && parentId.substring(0, 12) === "mapster_wrap") { + wrap = img.parent(); + wrap.attr('id', me.wrapId()); + } else { + wrap = $('
'); + + if (opts.wrapClass) { + if (opts.wrapClass === true) { + wrap.addClass(img[0].className); + } + else { + wrap.addClass(opts.wrapClass); + } + } + } + me.wrapper = wrap; + + // me.images[1] is the copy of the original image. It should be loaded & at its native size now so we can obtain the true + // width & height. This is needed to scale the imagemap if not being shown at its native size. It is also needed purely + // to finish binding in case the original image was not visible. It can be impossible in some browsers to obtain the + // native size of a hidden image. + + me.scaleInfo = scale = u.scaleMap(me.images[0],me.images[1], opts.scaleMap); + + me.base_canvas = base_canvas = me.graphics.createVisibleCanvas(me); + me.overlay_canvas = overlay_canvas = me.graphics.createVisibleCanvas(me); + + // Now we got what we needed from the copy -clone from the original image again to make sure any other attributes are copied + imgCopy = $(me.images[1]) + .addClass('mapster_el '+ me.images[0].className) + .attr({id:null, usemap: null}); + + size=u.size(me.images[0]); + + if (size.complete) { + imgCopy.css({ + width: size.width, + height: size.height + }); + } + + me.buildDataset(); + + // now that we have processed all the areas, set css for wrapper, scale map if needed + + css = { + display: 'block', + position: 'relative', + padding: 0, + width: scale.width, + height: scale.height + }; + + if (opts.wrapCss) { + $.extend(css, opts.wrapCss); + } + // if we were rebinding with an existing wrapper, the image will aready be in it + if (img.parent()[0] !== me.wrapper[0]) { + + img.before(me.wrapper); + } + + wrap.css(css); + + // move all generated images into the wrapper for easy removal later + + $(me.images.slice(2)).hide(); + for (i = 1; i < me.images.length; i++) { + wrap.append(me.images[i]); + } + + //me.images[1].style.cssText = me.image.style.cssText; + + wrap.append(base_canvas) + .append(overlay_canvas) + .append(img.css(m.canvas_style)); + + // images[0] is the original image with map, images[1] is the copy/background that is visible + + u.setOpacity(me.images[0], 0); + $(me.images[1]).show(); + + u.setOpacity(me.images[1],1); + + if (opts.isSelectable && opts.onGetList) { + sorted_list = me.data.slice(0); + if (opts.sortList) { + if (opts.sortList === "desc") { + sort_func = function (a, b) { + return a === b ? 0 : (a > b ? -1 : 1); + }; + } + else { + sort_func = function (a, b) { + return a === b ? 0 : (a < b ? -1 : 1); + }; + } + + sorted_list.sort(function (a, b) { + a = a.value; + b = b.value; + return sort_func(a, b); + }); + } + + me.options.boundList = opts.onGetList.call(me.image, sorted_list); + } + + me.complete=true; + me.processCommandQueue(); + + if (opts.onConfigured && typeof opts.onConfigured === 'function') { + opts.onConfigured.call(img, true); + } + }, + + // when rebind is true, the MapArea data will not be rebuilt. + buildDataset: function(rebind) { + var sel,areas,j,area_id,$area,area,curKey,mapArea,key,keys,mapAreaId,group_value,dataItem,href, + me=this, + opts=me.options, + default_group; + + function addAreaData(key, value) { + var dataItem = new m.AreaData(me, key, value); + dataItem.areaId = me._xref[key] = me.data.push(dataItem) - 1; + return dataItem.areaId; + } + + me._xref = {}; + me.data = []; + if (!rebind) { + me.mapAreas=[]; + } + + default_group = !opts.mapKey; + if (default_group) { + opts.mapKey = 'data-mapster-key'; + } + sel = ($.browser.msie && $.browser.version <= 7) ? 'area' : + (default_group ? 'area[coords]' : 'area[' + opts.mapKey + ']'); + areas = $(me.map).find(sel).unbind('.mapster'); + + for (mapAreaId = 0;mapAreaId= 0; j--) { + key = keys[j]; + + if (opts.mapValue) { + group_value = $area.attr(opts.mapValue); + } + if (default_group) { + // set an attribute so we can refer to the area by index from the DOM object if no key + area_id = addAreaData(me.data.length, group_value); + dataItem = me.data[area_id]; + dataItem.key = key = area_id.toString(); + } + else { + area_id = me._xref[key]; + if (area_id >= 0) { + dataItem = me.data[area_id]; + if (group_value && !me.data[area_id].value) { + dataItem.value = group_value; + } + } + else { + area_id = addAreaData(key, group_value); + dataItem = me.data[area_id]; + dataItem.isPrimary=j===0; + } + } + mapArea.areaDataXref.push(area_id); + dataItem.areasXref.push(mapAreaId); + } + + href=$area.attr('href'); + if (href && href!=='#' && !dataItem.href) + { + dataItem.href=href; + } + + if (!mapArea.nohref) { + $area.bind('click.mapster', me.click); + + if (!m.isTouch) { + $area.bind('mouseover.mapster', me.mouseover) + .bind('mouseout.mapster', me.mouseout) + .bind('mousedown.mapster', me.mousedown); + + } + + } + + // store an ID with each area. + $area.data("mapster", mapAreaId+1); + } + + // TODO listenToList + // if (opts.listenToList && opts.nitG) { + // opts.nitG.bind('click.mapster', event_hooks[map_data.hooks_index].listclick_hook); + // } + + // populate areas from config options + me.setAreaOptions(opts.areas); + me.redrawSelections(); + + }, + processCommandQueue: function() { + + var cur,me=this; + while (!me.currentAction && me.commands.length) { + cur = me.commands[0]; + me.commands.splice(0,1); + m.impl[cur.command].apply(cur.that, cur.args); + } + }, + clearEvents: function () { + $(this.map).find('area') + .unbind('.mapster'); + $(this.images) + .unbind('.mapster'); + }, + _clearCanvases: function (preserveState) { + // remove the canvas elements created + if (!preserveState) { + $(this.base_canvas).remove(); + } + $(this.overlay_canvas).remove(); + }, + clearMapData: function (preserveState) { + var me = this; + this._clearCanvases(preserveState); + + // release refs to DOM elements + $.each(this.data, function (i, e) { + e.reset(); + }); + this.data = null; + if (!preserveState) { + // get rid of everything except the original image + this.image.style.cssText = this.imgCssText; + $(this.wrapper).before(this.image).remove(); + } + + me.images.clear(); + + this.image = null; + u.ifFunction(this.clearTooltip, this); + }, + + // Compelete cleanup process for deslecting items. Called after a batch operation, or by AreaData for single + // operations not flagged as "partial" + + removeSelectionFinish: function () { + var g = this.graphics; + + g.refreshSelections(); + // do not call ensure_no_highlight- we don't really want to unhilight it, just remove the effect + g.clearHighlight(); + } + }; +} (jQuery)); +/* areadata.js + AreaData and MapArea protoypes +*/ + +(function ($) { + var m = $.mapster, u = m.utils; + + /** + * Select this area + * + * @param {AreaData} me AreaData context + * @param {object} options Options for rendering the selection + */ + function select(options) { + // need to add the new one first so that the double-opacity effect leaves the current one highlighted for singleSelect + + var me=this, o = me.owner; + if (o.options.singleSelect) { + o.clearSelections(); + } + + // because areas can overlap - we can't depend on the selection state to tell us anything about the inner areas. + // don't check if it's already selected + if (!me.isSelected()) { + if (options) { + + // cache the current options, and map the altImageId if an altimage + // was passed + + me.optsCache = $.extend(me.effectiveRenderOptions('select'), + options, + { + altImageId: o.images.add(options.altImage) + }); + } + + me.drawSelection(); + + me.selected = true; + me.changeState('select', true); + } + + if (o.options.singleSelect) { + o.graphics.refreshSelections(); + } + } + + /** + * Deselect this area, optionally deferring finalization so additional areas can be deselected + * in a single operation + * + * @param {boolean} partial when true, the caller must invoke "finishRemoveSelection" to render + */ + + function deselect(partial) { + var me=this; + me.selected = false; + me.changeState('select', false); + + // release information about last area options when deselecting. + + me.optsCache=null; + me.owner.graphics.removeSelections(me.areaId); + + // Complete selection removal process. This is separated because it's very inefficient to perform the whole + // process for multiple removals, as the canvas must be totally redrawn at the end of the process.ar.remove + + if (!partial) { + me.owner.removeSelectionFinish(); + } + } + + /** + * Toggle the selection state of this area + * @param {object} options Rendering options, if toggling on + * @return {bool} The new selection state + */ + function toggle(options) { + var me=this; + if (!me.isSelected()) { + me.select(options); + } + else { + me.deselect(); + } + return me.isSelected(); + } + + /** + * An AreaData object; represents a conceptual area that can be composed of + * one or more MapArea objects + * + * @param {MapData} owner The MapData object to which this belongs + * @param {string} key The key for this area + * @param {string} value The mapValue string for this area + */ + + m.AreaData = function (owner, key, value) { + $.extend(this,{ + owner: owner, + key: key || '', + // means this represents the first key in a list of keys (it's the area group that gets highlighted on mouseover) + isPrimary: true, + areaId: -1, + href: '', + value: value || '', + options:{}, + // "null" means unchanged. Use "isSelected" method to just test true/false + selected: null, + // xref to MapArea objects + areasXref: [], + // (temporary storage) - the actual area moused over + area: null, + // the last options used to render this. Cache so when re-drawing after a remove, changes in options won't + // break already selected things. + optsCache: null + }); + }; + + /** + * The public API for AreaData object + */ + + m.AreaData.prototype = { + constuctor: m.AreaData, + select: select, + deselect: deselect, + toggle: toggle, + areas: function() { + var i,result=[]; + for (i=0;i= 0; j -= 2) { + curX = coords[j]; + curY = coords[j + 1]; + + if (curX < minX) { + minX = curX; + bestMaxY = curY; + } + if (curX > maxX) { + maxX = curX; + bestMinY = curY; + } + if (curY < minY) { + minY = curY; + bestMaxX = curX; + } + if (curY > maxY) { + maxY = curY; + bestMinX = curX; + } + + } + + // try to figure out the best place for the tooltip + + if (width && height) { + found=false; + $.each([[bestMaxX - width, minY - height], [bestMinX, minY - height], + [minX - width, bestMaxY - height], [minX - width, bestMinY], + [maxX,bestMaxY - height], [ maxX,bestMinY], + [bestMaxX - width, maxY], [bestMinX, maxY] + ],function (i, e) { + if (!found && (e[0] > rootx && e[1] > rooty)) { + nest = e; + found=true; + return false; + } + }); + + // default to lower-right corner if nothing fit inside the boundaries of the image + + if (!found) { + nest=[maxX,maxY]; + } + } + return nest; + }; +} (jQuery)); +/* scale.js: resize and zoom functionality + requires areacorners.js, when.js +*/ + + +(function ($) { + var m = $.mapster, u = m.utils, p = m.MapArea.prototype; + + m.utils.getScaleInfo = function (eff, actual) { + var pct; + if (!actual) { + pct = 1; + actual=eff; + } else { + pct = eff.width / actual.width || eff.height / actual.height; + // make sure a float error doesn't muck us up + if (pct > 0.98 && pct < 1.02) { pct = 1; } + } + return { + scale: (pct !== 1), + scalePct: pct, + realWidth: actual.width, + realHeight: actual.height, + width: eff.width, + height: eff.height, + ratio: eff.width / eff.height + }; + }; + // Scale a set of AREAs, return old data as an array of objects + m.utils.scaleMap = function (image, imageRaw, scale) { + + // stunningly, jQuery width can return zero even as width does not, seems to happen only + // with adBlock or maybe other plugins. These must interfere with onload events somehow. + + + var vis=u.size(image), + raw=u.size(imageRaw,true); + + if (!raw.complete()) { + throw("Another script, such as an extension, appears to be interfering with image loading. Please let us know about this."); + } + if (!vis.complete()) { + vis=raw; + } + return this.getScaleInfo(vis, scale ? raw : null); + }; + + /** + * Resize the image map. Only one of newWidth and newHeight should be passed to preserve scale + * + * @param {int} width The new width OR an object containing named parameters matching this function sig + * @param {int} height The new height + * @param {int} effectDuration Time in ms for the resize animation, or zero for no animation + * @param {function} callback A function to invoke when the operation finishes + * @return {promise} NOT YET IMPLEMENTED + */ + + m.MapData.prototype.resize = function (width, height, duration, callback) { + var p,promises,newsize,els, highlightId, ratio, + me = this; + + // allow omitting duration + callback = callback || duration; + + function sizeCanvas(canvas, w, h) { + if ($.mapster.hasCanvas) { + canvas.width = w; + canvas.height = h; + } else { + $(canvas).width(w); + $(canvas).height(h); + } + } + + // Finalize resize action, do callback, pass control to command queue + + function cleanupAndNotify() { + + me.currentAction = ''; + + if ($.isFunction(callback)) { + callback(); + } + + me.processCommandQueue(); + } + + // handle cleanup after the inner elements are resized + + function finishResize() { + sizeCanvas(me.overlay_canvas, width, height); + + // restore highlight state if it was highlighted before + if (highlightId >= 0) { + var areaData = me.data[highlightId]; + areaData.tempOptions = { fade: false }; + me.getDataForKey(areaData.key).highlight(); + areaData.tempOptions = null; + } + sizeCanvas(me.base_canvas, width, height); + me.redrawSelections(); + cleanupAndNotify(); + } + + function resizeMapData() { + $(me.image).css(newsize); + // start calculation at the same time as effect + me.scaleInfo = u.getScaleInfo({ + width: width, + height: height + }, + { + width: me.scaleInfo.realWidth, + height: me.scaleInfo.realHeight + }); + $.each(me.data, function (i, e) { + $.each(e.areas(), function (i, e) { + e.resize(); + }); + }); + } + + if (me.scaleInfo.width === width && me.scaleInfo.height === height) { + return; + } + + highlightId = me.highlightId; + + + if (!width) { + ratio = height / me.scaleInfo.realHeight; + width = Math.round(me.scaleInfo.realWidth * ratio); + } + if (!height) { + ratio = width / me.scaleInfo.realWidth; + height = Math.round(me.scaleInfo.realHeight * ratio); + } + + newsize = { 'width': String(width) + 'px', 'height': String(height) + 'px' }; + if (!$.mapster.hasCanvas) { + $(me.base_canvas).children().remove(); + } + + // resize all the elements that are part of the map except the image itself (which is not visible) + // but including the div wrapper + els = $(me.wrapper).find('.mapster_el').add(me.wrapper); + + if (duration) { + promises = []; + me.currentAction = 'resizing'; + els.each(function (i, e) { + p = u.defer(); + promises.push(p); + + $(e).animate(newsize, { + duration: duration, + complete: p.resolve, + easing: "linear" + }); + }); + + p = u.defer(); + promises.push(p); + + // though resizeMapData is not async, it needs to be finished just the same as the animations, + // so add it to the "to do" list. + + u.when.all(promises).then(finishResize); + resizeMapData(); + p.resolve(); + } else { + els.css(newsize); + resizeMapData(); + finishResize(); + + } + }; + + + m.MapArea = u.subclass(m.MapArea, function () { + //change the area tag data if needed + this.base.init(); + if (this.owner.scaleInfo.scale) { + this.resize(); + } + }); + + p.coords = function (percent, coordOffset) { + var j, newCoords = [], + pct = percent || this.owner.scaleInfo.scalePct, + offset = coordOffset || 0; + + if (pct === 1 && coordOffset === 0) { + return this.originalCoords; + } + + for (j = 0; j < this.length; j++) { + //amount = j % 2 === 0 ? xPct : yPct; + newCoords.push(Math.round(this.originalCoords[j] * pct) + offset); + } + return newCoords; + }; + p.resize = function () { + this.area.coords = this.coords().join(','); + }; + + p.reset = function () { + this.area.coords = this.coords(1).join(','); + }; + + m.impl.resize = function (width, height, duration, callback) { + if (!width && !height) { + return false; + } + var x= (new m.Method(this, + function () { + this.resize(width, height, duration, callback); + }, + null, + { + name: 'resize', + args: arguments + } + )).go(); + return x; + }; + +/* + m.impl.zoom = function (key, opts) { + var options = opts || {}; + + function zoom(areaData) { + // this will be MapData object returned by Method + + var scroll, corners, height, width, ratio, + diffX, diffY, ratioX, ratioY, offsetX, offsetY, newWidth, newHeight, scrollLeft, scrollTop, + padding = options.padding || 0, + scrollBarSize = areaData ? 20 : 0, + me = this, + zoomOut = false; + + if (areaData) { + // save original state on first zoom operation + if (!me.zoomed) { + me.zoomed = true; + me.preZoomWidth = me.scaleInfo.width; + me.preZoomHeight = me.scaleInfo.height; + me.zoomedArea = areaData; + if (options.scroll) { + me.wrapper.css({ overflow: 'auto' }); + } + } + corners = $.mapster.utils.areaCorners(areaData.coords(1, 0)); + width = me.wrapper.innerWidth() - scrollBarSize - padding * 2; + height = me.wrapper.innerHeight() - scrollBarSize - padding * 2; + diffX = corners.maxX - corners.minX; + diffY = corners.maxY - corners.minY; + ratioX = width / diffX; + ratioY = height / diffY; + ratio = Math.min(ratioX, ratioY); + offsetX = (width - diffX * ratio) / 2; + offsetY = (height - diffY * ratio) / 2; + + newWidth = me.scaleInfo.realWidth * ratio; + newHeight = me.scaleInfo.realHeight * ratio; + scrollLeft = (corners.minX) * ratio - padding - offsetX; + scrollTop = (corners.minY) * ratio - padding - offsetY; + } else { + if (!me.zoomed) { + return; + } + zoomOut = true; + newWidth = me.preZoomWidth; + newHeight = me.preZoomHeight; + scrollLeft = null; + scrollTop = null; + } + + this.resize({ + width: newWidth, + height: newHeight, + duration: options.duration, + scroll: scroll, + scrollLeft: scrollLeft, + scrollTop: scrollTop, + // closure so we can be sure values are correct + callback: (function () { + var isZoomOut = zoomOut, + scroll = options.scroll, + areaD = areaData; + return function () { + if (isZoomOut) { + me.preZoomWidth = null; + me.preZoomHeight = null; + me.zoomed = false; + me.zoomedArea = false; + if (scroll) { + me.wrapper.css({ overflow: 'inherit' }); + } + } else { + // just to be sure it wasn't canceled & restarted + me.zoomedArea = areaD; + } + }; + } ()) + }); + } + return (new m.Method(this, + function (opts) { + zoom.call(this); + }, + function () { + zoom.call(this.owner, this); + }, + { + name: 'zoom', + args: arguments, + first: true, + key: key + } + )).go(); + + + }; + */ +} (jQuery)); +/* tooltip.js - tooltip functionality + requires areacorners.js +*/ + +(function ($) { + + var m = $.mapster, u = m.utils; + + $.extend(m.defaults, { + toolTipContainer: '
', + showToolTip: false, + toolTipFade: true, + toolTipClose: ['area-mouseout','image-mouseout'], + onShowToolTip: null, + onHideToolTip: null + }); + + $.extend(m.area_defaults, { + toolTip: null, + toolTipClose: null + }); + + + /** + * Show a tooltip positioned near this area. + * + * @param {string|jquery} html A string of html or a jQuery object containing the tooltip content. + * @param {string|jquery} [template] The html template in which to wrap the content + * @param {string|object} [css] CSS to apply to the outermost element of the tooltip + * @return {jquery} The tooltip that was created + */ + + function createToolTip(html, template, css) { + var tooltip; + + // wrap the template in a jQuery object, or clone the template if it's already one. + // This assumes that anything other than a string is a jQuery object; if it's not jQuery will + // probably throw an error. + + if (template) { + tooltip = typeof template === 'string' ? + $(template) : + $(template).clone(); + + tooltip.append(html); + } else { + tooltip=$(html); + } + + // always set display to block, or the positioning css won't work if the end user happened to + // use a non-block type element. + + tooltip.css($.extend((css || {}),{ + display:"block", + position:"absolute" + })).hide(); + + $('body').append(tooltip); + + // we must actually add the tooltip to the DOM and "show" it in order to figure out how much space it + // consumes, and then reposition it with that knowledge. + // We also cache the actual opacity setting to restore finally. + + tooltip.attr("data-opacity",tooltip.css("opacity")) + .css("opacity",0); + + // doesn't really show it because opacity=0 + + return tooltip.show(); + } + + + /** + * Show a tooltip positioned near this area. + * + * @param {jquery} tooltip The tooltip + * @param {object} [options] options for displaying the tooltip. + * @config {int} [left] The 0-based absolute x position for the tooltip + * @config {int} [top] The 0-based absolute y position for the tooltip + * @config {string|object} [css] CSS to apply to the outermost element of the tooltip + * @config {bool} [fadeDuration] When non-zero, the duration in milliseconds of a fade-in effect for the tooltip. + */ + + function showToolTipImpl(tooltip,options) + { + var tooltipCss = { + "left": options.left + "px", + "top": options.top + "px" + }, + actalOpacity=tooltip.attr("data-opacity") || 0, + zindex = tooltip.css("z-index"); + + if (parseInt(zindex,10)===0 + || zindex === "auto") { + tooltipCss["z-index"] = 9999; + } + + tooltip.css(tooltipCss) + .addClass('mapster_tooltip'); + + + if (options.fadeDuration && options.fadeDuration>0) { + u.fader(tooltip[0], 0, actalOpacity, options.fadeDuration); + } else { + u.setOpacity(tooltip[0], actalOpacity); + } + } + + /** + * Hide and remove active tooltips + * + * @param {MapData} this The mapdata object to which the tooltips belong + */ + + m.MapData.prototype.clearToolTip = function() { + if (this.activeToolTip) { + this.activeToolTip.stop().remove(); + this.activeToolTip = null; + this.activeToolTipID = null; + u.ifFunction(this.options.onHideToolTip, this); + } + }; + + /** + * Configure the binding between a named tooltip closing option, and a mouse event. + * + * If a callback is passed, it will be called when the activating event occurs, and the tooltip will + * only closed if it returns true. + * + * @param {MapData} [this] The MapData object to which this tooltip belongs. + * @param {String} option The name of the tooltip closing option + * @param {String} event UI event to bind to this option + * @param {Element} target The DOM element that is the target of the event + * @param {Function} [beforeClose] Callback when the tooltip is closed + * @param {Function} [onClose] Callback when the tooltip is closed + */ + function bindToolTipClose(options, bindOption, event, target, beforeClose, onClose) { + var event_name = event + '.mapster-tooltip'; + + if ($.inArray(bindOption, options) >= 0) { + target.unbind(event_name) + .bind(event_name, function (e) { + if (!beforeClose || beforeClose.call(this,e)) { + target.unbind('.mapster-tooltip'); + if (onClose) { + onClose.call(this); + } + } + }); + + return { + object: target, + event: event_name + }; + } + } + + /** + * Show a tooltip. + * + * @param {string|jquery} [tooltip] A string of html or a jQuery object containing the tooltip content. + * + * @param {string|jquery} [target] The target of the tooltip, to be used to determine positioning. If null, + * absolute position values must be passed with left and top. + * + * @param {string|jquery} [image] If target is an [area] the image that owns it + * + * @param {string|jquery} [container] An element within which the tooltip must be bounded + * + * + * + * @param {object|string|jQuery} [options] options to apply when creating this tooltip - OR - + * The markup, or a jquery object, containing the data for the tooltip + * + * @config {string} [closeEvents] A string with one or more comma-separated values that determine when the tooltip + * closes: 'area-click','tooltip-click','image-mouseout' are valid values + * then no template will be used. + * @config {int} [offsetx] the horizontal amount to offset the tooltip + * @config {int} [offsety] the vertical amount to offset the tooltip + * @config {string|object} [css] CSS to apply to the outermost element of the tooltip + */ + + function showToolTip(tooltip,target,image,container,options) { + var corners, + ttopts = {}; + + options = options || {}; + + + if (target) { + + corners = u.areaCorners(target,image,container, + tooltip.outerWidth(true), + tooltip.outerHeight(true)); + + // Try to upper-left align it first, if that doesn't work, change the parameters + + ttopts.left = corners[0]; + ttopts.top = corners[1]; + + } else { + + ttopts.left = options.left; + ttopts.top = options.top; + } + + ttopts.left += (options.offsetx || 0); + ttopts.top +=(options.offsety || 0); + + ttopts.css= options.css; + ttopts.fadeDuration = options.fadeDuration; + + showToolTipImpl(tooltip,ttopts); + + return tooltip; + } + + /** + * Show a tooltip positioned near this area. + * + * @param {string|jquery} [content] A string of html or a jQuery object containing the tooltip content. + + * @param {object|string|jQuery} [options] options to apply when creating this tooltip - OR - + * The markup, or a jquery object, containing the data for the tooltip + * @config {string|jquery} [container] An element within which the tooltip must be bounded + * @config {bool} [template] a template to use instead of the default. If this property exists and is null, + * then no template will be used. + * @config {string} [closeEvents] A string with one or more comma-separated values that determine when the tooltip + * closes: 'area-click','tooltip-click','image-mouseout' are valid values + * then no template will be used. + * @config {int} [offsetx] the horizontal amount to offset the tooltip + * @config {int} [offsety] the vertical amount to offset the tooltip + * @config {string|object} [css] CSS to apply to the outermost element of the tooltip + */ + m.AreaData.prototype.showToolTip= function(content,options) { + var tooltip, closeOpts, target, tipClosed, template, + ttopts = {}, + ad=this, + md=ad.owner, + areaOpts = ad.effectiveOptions(); + + // copy the options object so we can update it + options = options ? $.extend({},options) : {}; + + content = content || areaOpts.toolTip; + closeOpts = options.closeEvents || areaOpts.toolTipClose || md.options.toolTipClose || 'tooltip-click'; + + template = typeof options.template !== 'undefined' ? + options.template : + md.options.toolTipContainer; + + options.closeEvents = typeof closeOpts === 'string' ? + closeOpts = u.split(closeOpts) : + closeOpts; + + options.fadeDuration = options.fadeDuration || + (md.options.toolTipFade ? + (md.options.fadeDuration || areaOpts.fadeDuration) : 0); + + target = ad.area ? + ad.area : + $.map(ad.areas(), + function(e) { + return e.area; + }); + + if (md.activeToolTipID===ad.areaId) { + return; + } + + md.clearToolTip(); + + md.activeToolTip = tooltip = createToolTip(content, + template, + options.css); + + md.activeToolTipID = ad.areaId; + + tipClosed = function() { + md.clearToolTip(); + }; + + bindToolTipClose(closeOpts,'area-click', 'click', $(md.map), null, tipClosed); + bindToolTipClose(closeOpts,'tooltip-click', 'click', tooltip,null, tipClosed); + bindToolTipClose(closeOpts,'image-mouseout', 'mouseout', $(md.image), function(e) { + return (e.relatedTarget && e.relatedTarget.nodeName!=='AREA' && e.relatedTarget!==ad.area); + }, tipClosed); + + + showToolTip(tooltip, + target, + md.image, + options.container, + template, + options); + + u.ifFunction(md.options.onShowToolTip, ad.area, + { + toolTip: tooltip, + options: ttopts, + areaOptions: areaOpts, + key: ad.key, + selected: ad.isSelected() + }); + + return tooltip; + }; + + + /** + * Parse an object that could be a string, a jquery object, or an object with a "contents" property + * containing html or a jQuery object. + * + * @param {object|string|jQuery} options The parameter to parse + * @return {string|jquery} A string or jquery object + */ + function getHtmlFromOptions(options) { + + // see if any html was passed as either the options object itself, or the content property + + return (options ? + ((typeof options === 'string' || options.jquery) ? + options : + options.content) : + null); + } + + /** + * Activate or remove a tooltip for an area. When this method is called on an area, the + * key parameter doesn't apply and "options" is the first parameter. + * + * When called with no parameters, or "key" is a falsy value, any active tooltip is cleared. + * + * When only a key is provided, the default tooltip for the area is used. + * + * When html is provided, this is used instead of the default tooltip. + * + * When "noTemplate" is true, the default tooltip template will not be used either, meaning only + * the actual html passed will be used. + * + * @param {string|AreaElement} key The area for which to activate a tooltip, or a DOM element. + * + * @param {object|string|jquery} [options] options to apply when creating this tooltip - OR - + * The markup, or a jquery object, containing the data for the tooltip + * @config {string|jQuery} [content] the inner content of the tooltip; the tooltip text or HTML + * @config {Element|jQuery} [container] the inner content of the tooltip; the tooltip text or HTML + * @config {bool} [template] a template to use instead of the default. If this property exists and is null, + * then no template will be used. + * @config {int} [offsetx] the horizontal amount to offset the tooltip. + * @config {int} [offsety] the vertical amount to offset the tooltip. + * @config {string|object} [css] CSS to apply to the outermost element of the tooltip + * @config {string|object} [css] CSS to apply to the outermost element of the tooltip + * @config {bool} [fadeDuration] When non-zero, the duration in milliseconds of a fade-in effect for the tooltip. + * @return {jQuery} The jQuery object + */ + + m.impl.tooltip = function (key,options) { + return (new m.Method(this, + function mapData() { + var tooltip, target, md=this; + if (!key) { + md.clearToolTip(); + } else { + target=$(key); + if (md.activeToolTipID ===target[0]) { + return; + } + md.clearToolTip(); + + md.activeToolTip = tooltip = createToolTip(getHtmlFromOptions(options), + options.template || md.options.toolTipContainer, + options.css); + md.activeToolTipID = target[0]; + + bindToolTipClose(['tooltip-click'],'tooltip-click', 'click', tooltip, null, function() { + md.clearToolTip(); + }); + + md.activeToolTip = tooltip = showToolTip(tooltip, + target, + md.image, + options.container, + options); + } + }, + function areaData() { + if ($.isPlainObject(key) && !options) { + options = key; + } + + this.showToolTip(getHtmlFromOptions(options),options); + }, + { + name: 'tooltip', + args: arguments, + key: key + } + )).go(); + }; +} (jQuery)); diff --git a/preview-fall2024-info/materials-science.html b/preview-fall2024-info/materials-science.html new file mode 100644 index 000000000..1f8d76ee2 --- /dev/null +++ b/preview-fall2024-info/materials-science.html @@ -0,0 +1,362 @@ + + + + + + +Empowering Computational Materials Science Research using HTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Empowering Computational Materials Science Research using HTC +

+

Ajay Annamareddy, a research scientist at the University of Wisconsin-Madison, describes how he utilizes high-throughput computing in computational materials science.

+ +
+ Computer screen with lines of code. Uploaded by AltumCode on [Unsplash](https://unsplash.com/photos/oZ61KFUQsus). +
Computer screen with lines of code. Uploaded by AltumCode on Unsplash
+
+ +

Groundbreaking research is in the works for the Computational Materials Group (CMG) at the University of Wisconsin-Madison (UW-Madison). Ajay Annamareddy, a research scientist within CMG, has been a leading user of GPU hours with the Center for High Throughput Computing (CHTC). He utilizes this capacity to run machine learning (ML) simulations as applied to material science problems that have gained tremendous interest in the past decade. CHTC resources have allowed him to study hugely data-driven problems that are practically impossible to deal with using regular resources.

+ +

Before coming to UW-Madison, Annamareddy received his Ph.D. in Nuclear Engineering from North Carolina State University. He was introduced to modeling and simulation work there, but he started using high-throughput computing (HTC) and CHTC services when he came to UW-Madison to work as a PostDoc with Prof. Dane Morgan in the Materials Science and Engineering department. He now works for CMG as a Research Scientist, where he’s been racking up GPU hours for over a year.

+ +

Working in the field of computational materials, Annamareddy and his group use computers to determine the properties of materials. So rather than preparing material and measuring it in experiments, they use a computer, which is less expensive and more time efficient. Annamareddy studies metallic glasses. These materials have many valuable properties and applications, but are not easy to make. Instead, he uses computer simulations of these materials to analyze and understand their fundamental properties.

+ +

Annamareddy’s group utilizes HTC and high-performance computing (HPC) for their work, so his project lead asked him to contact CHTC and set up an account. Christina Koch, the lead research computing facilitator, responded. “She helped me set up the account and determine how many resources we needed,” Annamareddy explained. “She was very generous in that whenever I exceeded my limits, she would increase them a bit more!”

+ +

CHTC resources have become critical for Annamareddy’s work. One of the projects involves running ML simulations, which he notes would be “difficult to complete” without the support of CHTC. Annamareddy uses graph neural networks (GNN), a powerful yet slightly inefficient deep learning technique. The upside to using GNN is that as long as there is some physics component in the underlying research problem, this technique can analyze just about anything. “The caveat is you need to provide lots of data for this technique to figure out a solution.”

+ +

Meeting this data challenge, Annamareddy put the input data he generates using high-performance computing (HPC) on the HTC staging location, which gets transferred to a local machine before the ML job starts running. “I use close to twenty gigabytes of data for my simulation, so this would be extremely inefficient to run without staging,” he explains. The CHTC provides Annamareddy with the storage and organization he needs to perform these potentially ground-breaking ML simulations.

+ +

Researchers often study materials in traditional atomistic simulations at different timescales, ranging from picoseconds to microseconds. Annamareddy’s goal with his work is to extend the time scales of these conventional simulations by using ML, which he found is well supported by HTC resources. “We have yet to reach it, but we hope we can use ML to extend the time scale of atomistic simulations by a few orders of magnitude. This would be extremely valuable when modeling systems like glass-forming materials where we should be able to obtain properties, like density and diffusion coefficients, much closer to experiments than currently possible with atomistic simulations,” Annamareddy elaborates. This is something that has never been done before in the field.

+ +

This project can potentially extend the time scales possible for conventional molecular dynamic simulations, allowing researchers in this field to predict how materials will behave over more extended periods of time. “It’s ambitious – but I’ve been working on it for more than a year, and we’ve made a lot of progress…I enjoy the challenge immensely and am happy I’m working on this problem!”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/neuroscientist.html b/preview-fall2024-info/neuroscientist.html new file mode 100644 index 000000000..d728d2ef5 --- /dev/null +++ b/preview-fall2024-info/neuroscientist.html @@ -0,0 +1,381 @@ + + + + + + +For neuroscientist Chris Cox, the OSG helps process mountains of data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ For neuroscientist Chris Cox, the OSG helps process mountains of data +

+

Whether exploring how the brain is fooled by fake news or explaining the decline of knowledge in dementia, cognitive neuroscientists like Chris Cox are relying more on high-throughput computing resources like the Open Science Pool to understand how the brain makes sense of information.

+ +

Cognitive neuroscientist Chris Cox recently defended his dissertation at the University of Wisconsin Madison (UW-Madison). Unlike molecular or cellular study of neuroscience, cognitive neuroscience seeks a larger view of neural systems—of “how the brain supports cognition,” said Cox.

+ +

Cox and other neuroscience researchers seek to understand which parts of the brain support memory and decision making, and answer more nuanced questions like how objects are represented in the brain. For Cox, this has involved developing new techniques for studying the brain that rely heavily on high-throughput computing.

+ +

“Our research gets into the transformations that take place in the brain. We ask questions like ‘how is information from our senses combined to support abstract knowledge that seems to transcend our senses,’” said Cox. “For example, we can recognize a single object from different perspectives and scales as being the same thing, and when we read a word we can call to mind all kinds of meaning that have little if anything to do with the letters on the page.”

+ +

The brain is highly complex, so neural imaging methods like functional MRI yield thousands of individual data points for every two seconds of imaging. Cox first turned to high performance computing and finally to the Open Science Pool for high-throughput computing (HTC) to deal with the massive amounts of data. Because computing support at UW-Madison is so seamless, when he first started out on HTC, Cox wasn’t even aware that the OSG was powering the vast improvement in his research.

+ +

“The OSG at UW-Madison is like flipping a switch,” said Cox. “It cut my computing time in half and was totally painless. Our research was a good candidate for the OSG and the advantages of HTC. The OSG and the Center for High Throughput Computing at UW-Madison have empowered us to get results quickly that inform our next steps. This would be impossible without the extensive and robust HTC infrastructure provided by the OSG.”

+ +

A 45-minute experiment from many participants would produce enormous amounts of data. “From that, we can make inferences that generalize to humanity at large about how our brains work,” said Cox. “Our previous approach was to only look for activation that is located in the same place in the brain in different people and look for anatomical landmarks that we can line up across people. Then we ask whether they respond the same way (across people).”

+ +

“But now, we have expanded beyond that approach and look at how multiple parts of the brain are working together,” said Cox. “Even in one region of the brain, not every subcomponent might be working the same way, so when we start adding in all this extra diversity of the activation profile, we get very complicated models that have to be tuned to the data set.”

+ +

Cox’s major parameters now are how many data points to include when it’s time to build a model. “For cross-validation, that then increases the need for computing by an order of magnitude,” said Cox.

+ +

Each model can take 30 minutes to an hour to compute. Cox then runs hundreds of thousands of them to narrow in on the appropriate parameter values.

+ +

Further increasing the computational burden, this whole procedure has to be done multiple times, each time holding out a portion of the data for cross-validation. “By cross-validating and running simulations to determine what random performance looks like, we can test whether the models are revealing something meaningful about the brain,” said Cox.

+ +

Cox gains a particular advantage from high-throughput computing on the OSG by creating novel optimization procedures to probe MRI data that is more connected with cognitive theory.

+ +

“Saving a minute or two on each individual job is not important,” said Cox. “Our main priority can focus on the most conceptually sound algorithms and we can get to real work more quickly. We don’t need to optimize for a HPC cluster, we can just use the scale of HTC.”

+ +

Cox’s research is beginning to explore the neural dynamics involved when calling to mind a concept, with millisecond resolution. This requires looking at data collected with other methods like electroencephalography (EEG) and electrocortography (EcoG). Cox said that it takes about two full seconds for MRI to collect a single sample.

+ +

“The problem is that lots of cognitive activity is going on in those two seconds that is being missed,” said Cox. “When you gain resolution in the time domain you have a chance to notice qualitative shifts that may delimit different neural processes. Identifying when they occur has a lot of theoretical relevance, but also practical relevance in understanding when information is available to the person.”

+ +

“People think of the brain as a library—adding books to the stack and looking in a card catalog,” said Cox. “We are seeing knowledge more like Lego blocks than a library—no single block has meaning, but a collection can express meaning when properly composed. The brain puts those blocks together to give meaning. My research so far supports the Lego perspective over the library perspective.”

+ +

Cognitive neuroscience may offer clues to cognitive decline, which in turn could inform how we think about learning, instruction, and training. How we understand challenges like dementia can lead to better, more correct therapies by understanding the patterns of decline in the brain.

+ +

“Also, having a more accurate understanding of what it means to ‘know’ something can also help us understand how fake news and misinformation take hold in individuals and spread through social networks,” said Cox. “At the core of these issues are fundamental questions about how we process and assimilate information.

+ +

“We know it is hard to get someone to change their mind, so the question is what is happening in the brain. The answers depend on a better understanding of what knowledge is and how we acquire it. Our research is pointed to these higher level questions.”

+ +

“Once we had access to the computational resources of the OSG, we saw a paradigm shift in the way we think about research,” said Cox. “Previously, we might have jobs running for months. With HTC on the OSG, that job length became just a few days. It gave new legs to the whole research program and pushed us forward on new optimization techniques that we never would have tried otherwise.”

+ +

– Greg Moore

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/news-navigation.html b/preview-fall2024-info/news-navigation.html new file mode 100644 index 000000000..0b632a710 --- /dev/null +++ b/preview-fall2024-info/news-navigation.html @@ -0,0 +1,357 @@ + + + + + + +News + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ News +

+ +
+
+
+ +
+
+
+ + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/news.html b/preview-fall2024-info/news.html new file mode 100644 index 000000000..1b4b0a360 --- /dev/null +++ b/preview-fall2024-info/news.html @@ -0,0 +1,2575 @@ + + + + + + +News: How CHTC is Making An Impact + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+

+ News: How CHTC is Making An Impact +

+ +

+ CHTC’s computing pioneering continues to advance science and society in new ways. Located at the heart of + UW-Madison’s School for Computer, Data & Information Sciences (CDIS), CHTC offers + exceptional computing capabilities and experienced facilitation support to campus researchers and + international scientists alike. Working in collaboration with projects across all areas of study, + CHTC helps innovate solutions that otherwise might not have been possible, while at + the same time evolving the field of distributed computing. +

+
+
+
+
+
+
+
+

Featured News

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + + + + + + + +
+ +
+
+ + + Emile working in the server room + + +
+ +

The Pelican Project: Building a universal plug for scientific data-sharing

+
+
+ +
+ By: Brian Mattmiller +
+ +
+ Nov 16, 2023 +
+
+ +
+
+
+ +
+ + + + + + + + + + + + + + +
+ +
+
+ + + HIRISE camera image of Mars + + +
+ +

USGS uses HTCondor to advance Mars research

+
+
+ +
+ By: USGS Communications +
+ +
+ Jun 12, 2023 +
+
+ +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ + + Research Computing Facilitators Christina Koch (left) and Rachel Lombardi (right). + + +
+ +

CHTC Facilitation Innovations for Research Computing

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Dec 14, 2022 +
+
+ +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+

Read about some of CHTC’s latest news and projects:

+
+ + + + + + + + +
+ +
+
+ + + HTC Week 2024 Photos + + +
+ +

High Throughput Community Builds Stronger Ties at HTC24 Week

+
+
+ +
+ By: Jordan Sklar and Cristina Encarnacion +
+ +
+ Jul 17, 2024 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + NOAA Sonar Banner + + +
+ +

CHTC Launches First Fellow Program

+
+
+ +
+ By: Cristina Encarnacion +
+ +
+ Jun 26, 2024 +
+
+ +
+
+
+ +
+ + + +
+ + + +
+ + + +
+ +
+
+ + + PATh Facility hardware + + + +
+
+ +
+ + + +
+ +
+
+ + + HTCondor logo + + +
+ +

Addressing the challenges of transferring large datasets with the OSDF

+
+
+ +
+ By: Sarah Matysiak +
+ +
+ May 14, 2024 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + EHT's first black hole image of Sgr A* utilizing polarized light. + + +
+ +

Junior researchers advance black hole research with OSPool open capacity

+
+
+ +
+ By: Malia Bicoy +
+ +
+ Apr 29, 2024 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Map of institutions contributing to the Open Science Pool (OSPool). + + + +
+
+ +
+ + + +
+ + + +
+ + + + + + + +
+ +
+
+ + + Photo of California wildfires, 2021 + + +
+ +

Ecologists utilizing HTC to examine the effects of megafires on wildlife

+
+
+ +
+ By: Bryna Goeking +
+ +
+ Feb 26, 2024 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + 1937 aerial photo of central UW Madison campus + + +
+ +

Preserving historic Wisconsin aerial photos with a little help from CHTC

+
+
+ +
+ By: Malia Bicoy +
+ +
+ Feb 06, 2024 +
+
+ +
+
+
+ +
+ + + + + + + + +
+ +
+
+ + + The OSG School 2023 attendees + + +
+ +

OSG School mission: Don’t let computing be a barrier to research

+
+
+ +
+ By: Malia Bicoy +
+ +
+ Dec 20, 2023 +
+
+ +
+
+
+ +
+ + + +
+ + + +
+ + + +
+ +
+
+ + + From left to right, Senior Bioinformaticist of the Institute for Comparative Genomics Apurva Narechania, Research Computing Facilitator Rachel Lombardi, and Bioinformatics Specialist Dean Bobo at the AMNH. + + +
+ +

The American Museum of Natural History Ramps Up Education on Research Computing

+
+
+ +
+ By: Sarah Matysiak +
+ +
+ Dec 15, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +
+ +
+
+ + + Group photo of members of the Hanna Lab + + +
+ +

Training a dog and training a robot aren’t so different

+
+
+ +
+ By: Sarah Matysiak +
+ +
+ Nov 17, 2023 +
+
+ +
+
+
+ +
+ + + + + + + + +
+ +
+
+ + + Members of the Spalding Research Lab + + + +
+
+ +
+ + + +
+ +
+
+ + + facilitation team introducing htc to students + + +
+ +

CHTC Launches First Introductory Workshop on HTC and HPC

+
+
+ +
+ By: Malia Bicoy +
+ +
+ Nov 10, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Group photo of those involved with the 2023 HTCondor European Workshop + + +
+ +

HTCondor European Workshop returns for ninth year in Orsay, France

+
+
+ +
+ By: Sarah Matysiak +
+ +
+ Nov 01, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + González (left) and Tripathee (right) pictured with their awards. Photo provided by Jimena González. + + +
+ +

OSG David Swanson Awardees Honored at HTC23

+
+
+ +
+ By: Sarah Matysiak +
+ +
+ Oct 30, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + + +
+ +
+
+ + + HIRISE camera image of Mars + + +
+ +

USGS uses HTCondor to advance Mars research

+
+
+ +
+ By: USGS Communications +
+ +
+ Jun 12, 2023 +
+
+ +
+
+
+ +
+ + + + +
+ +
+
+ + + CDIS Building Render + + +
+ +

Construction Commences on CHTC's Future Home in New CDIS Building

+
+
+ +
+ By: Shirley Obih +
+ +
+ Apr 27, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Microscope beside computer by Tima Miroshnichenko from Pexels. + + +
+ +

OSPool As a Tool for Advancing Research in Computational Chemistry

+
+
+ +
+ By: Shirley Obih +
+ +
+ Apr 25, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Headshot of Hannah Cheren + + +
+ +

Get To Know Student Communications Specialist Hannah Cheren

+
+
+ +
+ By: Shirley Obih +
+ +
+ Apr 24, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Image of a server room by Elias from Pixabay. + + +
+ +

The CHTC Philosophy of High Throughput Computing – A Talk by Greg Thain

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Apr 24, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Image obtained from the official ASP2022 page on the African School of Physics website. + + +
+ +

Distributed Computing at the African School of Physics 2022 Workshop

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Apr 24, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ +
+ +

Fire up the GPUs: UW-Madison, Morgridge project sparks next-level computing

+
+
+ +
+ By: Brian Mattmiller +
+ +
+ Mar 28, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Quantum AI Logo. Image from Quantum AI Product Manager Catherine Vollgraff Heidweiller’s research blog post. + + +
+ +

Google Quantum Computing Utilizing HTCondor

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Mar 01, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Image of Todd T taking a selfie with a tropical beach in the background. + + +
+ +

Get To Know Todd Tannenbaum

+
+
+ +
+ By: Shirley Obih +
+ +
+ Jan 23, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Computer screen with lines of code. Uploaded by AltumCode on Unsplash. + + +
+ +

Empowering Computational Materials Science Research using HTC

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Jan 20, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Christina Koch presenting to Kaiping Chen's class. + + +
+ +

CHTC Leads High Throughput Computing Demonstrations

+
+
+ +
+ By: Shirley Obih +
+ +
+ Jan 20, 2023 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + A broad lens image of some students present at the demo. + + +
+ +

CHTC Hosts Machine Learning Demo and Q+A session

+
+
+ +
+ By: Shirley Obih +
+ +
+ Dec 19, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Staff and researchers from the OSG User School 2022. + + +
+ +

OSG User School 2022 Researchers Present Inspirational Lightning Talks

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Dec 19, 2022 +
+
+ +
+
+
+ +
+ + + + + + + + +
+ +
+
+ + + Conference Room + + +
+ +

High-throughput computing: Fostering data science without limits

+
+
+ +
+ By: Brian Mattmiller +
+ +
+ Dec 06, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + HPCwire 2022 Readers' Choice Awards - Best use of HPC in the Cloud ( Use Case ) + + +
+ +

UW–Madison's Icecube Neutrino Observatory Wins HPCwire Award

+
+
+ +
+ By: Anna Hildebrandt +
+ +
+ Nov 16, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Pool Record Banner + + +
+ +

Over 240,000 CHTC Jobs Hit Record Daily Capacity Consumption

+
+
+ +
+ By: Shirley Obih +
+ +
+ Nov 09, 2022 +
+
+ +
+
+
+ +
+ + + + + + + +
+ +
+
+ +
+ +

Meet Joe B. from the CHTC

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Oct 03, 2022 +
+
+ +
+
+
+ +
+ + + + + + + +
+ +
+
+ + + Image of Servers + + +
+ +

Technology Refresh

+
+
+ +
+ By: Christina Koch +
+ +
+ Aug 31, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Image of two black holes from Cody Messick’s presentation slides. + + +
+ +

LIGO's Search for Gravitational Waves Signals Using HTCondor

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Jul 21, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Image of the black hole in the center of our Milky Way galaxy. + + +
+ +

The Future of Radio Astronomy Using High Throughput Computing

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Jul 12, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Photo by Dan Myers on Unsplash + + +
+ +

Expediting Nuclear Forensics and Security Using High Throughput Computing

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Jul 06, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Justin Hiemstra, a Machine Learning Application Specialist for CHTC’s GPU Lab, discusses the testing suite developed to test CHTC's support for GPU and ML framework compatibility. + + +
+ +

Testing GPU/ML Framework Compatibility

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Jul 06, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + For the first time, UW Statistics undergraduates could participate in a course teaching high throughput computing (HTC). John Gillett, lecturer of Statistics at the University of Wisconsin-Madison, designed and taught the course with the support of the Center for High Throughput Computing (CHTC). + + +
+ +

UW Statistics Course using HTC

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Jul 06, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Matthew Garcia, a Postdoctoral Research Associate in the Department of Forest & Wildlife Ecology at the University of Wisconsin–Madison, discusses how he used the HTCondor Software Suite to combine HTC and HPC capacity to perform simulations that modeled the dispersal of budworm moths. + + +
+ +

Using HTC and HPC Applications to Track the Dispersal of Spruce Budworm Moths

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Jul 06, 2022 +
+
+ +
+
+
+ +
+ + + +
+ + + +
+ + + +
+ +
+
+ + + PATh Facility hardware + + +
+ +

Introducing the PATh Facility: A Unique Distributed High Throughput Computing Service

+
+
+ +
+ By: Josephine Watkins +
+ +
+ Jun 01, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Computer rendering of DNA. Image credit: Sangharsh Lohakare (@sangharsh_l) on Unsplash. + + +
+ +

The role of HTC in advancing population genetics research

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Jun 01, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Collage of photos from HTCondor Week + + +
+ +

A Long-Awaited Reunion: HTCondor Week 2022 in Photos

+
+
+ +
+ By: Josephine Watkins +
+ +
+ Jun 01, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Simulated image of Sagittarius A* black hole. Image library credit: EHT Theory Working Group, CK Chan. + + +
+ +

High-throughput computing as an enabler of black hole science

+
+
+ +
+ By: Brian Mattmiller +
+ +
+ May 12, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Map of Africa; Mali and Uganda are highlighted where their respective flags point. Image credit: © 2010 Roland Urbanek. Flags are edited in and overlayed on the image. + + +
+ +

NIAID/ACE - OSG collaboration leads to a successful virtual training session

+
+
+ +
+ By: Hannah Cheren +
+ +
+ May 02, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Cows Feeding with machine Learning overlay + + +
+ +

Machine Learning and Image Analyses for Livestock Data

+
+
+ +
+ By: Hannah Cheren +
+ +
+ Feb 22, 2022 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Gaylen Fronk Headshot + + + +
+
+ +
+ + + +
+ +
+
+ + + Satellite image collage graphic + + +
+ +

Protecting ecosystems with HTC

+
+
+ +
+ By: Josephine Watkins +
+ +
+ Nov 09, 2021 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Newspaper Spread + + +
+ +

Centuries of newspapers are now easily searchable thanks to HTCSS

+
+
+ +
+ By: Josephine Watkins +
+ +
+ Oct 26, 2021 +
+
+ +
+
+
+ +
+ + + +
+ +
+
+ + + Hero Image for Morgridge Article Courtesy of Morgridge + + +
+ +

Resilience: How COVID-19 challenged the scientific world

+
+
+ +
+ By: Josephine Watkins +
+ +
+ Sep 23, 2021 +
+
+ +
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +
+ +
+
+ + + Proton-proton collision + + +
+ +

Antimatter: Using HTC to study very rare processes

+
+
+ +
+ By: Josephine Watkins +
+ +
+ Aug 19, 2021 +
+
+ +
+
+
+ +
+ + +
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/news/PULL_REQUEST_TEMPLATE.md b/preview-fall2024-info/news/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..cde45773f --- /dev/null +++ b/preview-fall2024-info/news/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,7 @@ +Article PR Template + +Article previews can be found [here](https://chtc.github.io/article-preview/) + +* [ ] Linked the preview url `https://chtc.github.io/article-preview/` +* [ ] Looked at the link in https://socialsharepreview.com/ to verify socials +* [ ] Requested reviews from the correct parties diff --git a/preview-fall2024-info/news/images/Ananya-headshot.jpeg b/preview-fall2024-info/news/images/Ananya-headshot.jpeg new file mode 100644 index 000000000..80567facc Binary files /dev/null and b/preview-fall2024-info/news/images/Ananya-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Anirvan-Showcase-1.png b/preview-fall2024-info/news/images/Anirvan-Showcase-1.png new file mode 100644 index 000000000..6c5d44c6a Binary files /dev/null and b/preview-fall2024-info/news/images/Anirvan-Showcase-1.png differ diff --git a/preview-fall2024-info/news/images/Anirvan-Showcase-2.png b/preview-fall2024-info/news/images/Anirvan-Showcase-2.png new file mode 100644 index 000000000..1bd6ba5b6 Binary files /dev/null and b/preview-fall2024-info/news/images/Anirvan-Showcase-2.png differ diff --git a/preview-fall2024-info/news/images/Anirvan-Showcase-banner.png b/preview-fall2024-info/news/images/Anirvan-Showcase-banner.png new file mode 100644 index 000000000..3f1a5cc95 Binary files /dev/null and b/preview-fall2024-info/news/images/Anirvan-Showcase-banner.png differ diff --git a/preview-fall2024-info/news/images/Banner-Image.png b/preview-fall2024-info/news/images/Banner-Image.png new file mode 100644 index 000000000..104a73aa8 Binary files /dev/null and b/preview-fall2024-info/news/images/Banner-Image.png differ diff --git a/preview-fall2024-info/news/images/Bayly-work.png b/preview-fall2024-info/news/images/Bayly-work.png new file mode 100644 index 000000000..7946597bc Binary files /dev/null and b/preview-fall2024-info/news/images/Bayly-work.png differ diff --git a/preview-fall2024-info/news/images/Black-hole-banner.jpg b/preview-fall2024-info/news/images/Black-hole-banner.jpg new file mode 100644 index 000000000..478276c57 Binary files /dev/null and b/preview-fall2024-info/news/images/Black-hole-banner.jpg differ diff --git a/preview-fall2024-info/news/images/BrianBockelman.jpeg b/preview-fall2024-info/news/images/BrianBockelman.jpeg new file mode 100644 index 000000000..6890d9043 Binary files /dev/null and b/preview-fall2024-info/news/images/BrianBockelman.jpeg differ diff --git a/preview-fall2024-info/news/images/CDIS_render.jpg b/preview-fall2024-info/news/images/CDIS_render.jpg new file mode 100644 index 000000000..7f66984f2 Binary files /dev/null and b/preview-fall2024-info/news/images/CDIS_render.jpg differ diff --git a/preview-fall2024-info/news/images/CHTC_Workshop.jpg b/preview-fall2024-info/news/images/CHTC_Workshop.jpg new file mode 100644 index 000000000..6fca7382d Binary files /dev/null and b/preview-fall2024-info/news/images/CHTC_Workshop.jpg differ diff --git a/preview-fall2024-info/news/images/CLAS-Collaboration.jpg b/preview-fall2024-info/news/images/CLAS-Collaboration.jpg new file mode 100644 index 000000000..059ab8694 Binary files /dev/null and b/preview-fall2024-info/news/images/CLAS-Collaboration.jpg differ diff --git a/preview-fall2024-info/news/images/CLAS-Detector.jpg b/preview-fall2024-info/news/images/CLAS-Detector.jpg new file mode 100644 index 000000000..1c7957786 Binary files /dev/null and b/preview-fall2024-info/news/images/CLAS-Detector.jpg differ diff --git a/preview-fall2024-info/news/images/CLAS12-Banner.jpg b/preview-fall2024-info/news/images/CLAS12-Banner.jpg new file mode 100644 index 000000000..45361cb26 Binary files /dev/null and b/preview-fall2024-info/news/images/CLAS12-Banner.jpg differ diff --git a/preview-fall2024-info/news/images/California_wildfire.jpg b/preview-fall2024-info/news/images/California_wildfire.jpg new file mode 100644 index 000000000..f3b588cea Binary files /dev/null and b/preview-fall2024-info/news/images/California_wildfire.jpg differ diff --git a/preview-fall2024-info/news/images/Central-Campus-Madison.jpg b/preview-fall2024-info/news/images/Central-Campus-Madison.jpg new file mode 100644 index 000000000..c51e972a1 Binary files /dev/null and b/preview-fall2024-info/news/images/Central-Campus-Madison.jpg differ diff --git a/preview-fall2024-info/news/images/Chan.jpg b/preview-fall2024-info/news/images/Chan.jpg new file mode 100644 index 000000000..6e7e164fa Binary files /dev/null and b/preview-fall2024-info/news/images/Chan.jpg differ diff --git a/preview-fall2024-info/news/images/Connor-Natzke-Square-smaller.jpg b/preview-fall2024-info/news/images/Connor-Natzke-Square-smaller.jpg new file mode 100644 index 000000000..55c179176 Binary files /dev/null and b/preview-fall2024-info/news/images/Connor-Natzke-Square-smaller.jpg differ diff --git a/preview-fall2024-info/news/images/Dark-Matter-Map-II-e1521045294690.jpg b/preview-fall2024-info/news/images/Dark-Matter-Map-II-e1521045294690.jpg new file mode 100644 index 000000000..393579eb2 Binary files /dev/null and b/preview-fall2024-info/news/images/Dark-Matter-Map-II-e1521045294690.jpg differ diff --git a/preview-fall2024-info/news/images/DavidSwanson.png b/preview-fall2024-info/news/images/DavidSwanson.png new file mode 100644 index 000000000..7f70da651 Binary files /dev/null and b/preview-fall2024-info/news/images/DavidSwanson.png differ diff --git a/preview-fall2024-info/news/images/Devin-headshot.jpeg b/preview-fall2024-info/news/images/Devin-headshot.jpeg new file mode 100644 index 000000000..295d73329 Binary files /dev/null and b/preview-fall2024-info/news/images/Devin-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Dorsey-headshot.jpg b/preview-fall2024-info/news/images/Dorsey-headshot.jpg new file mode 100644 index 000000000..0b69815ce Binary files /dev/null and b/preview-fall2024-info/news/images/Dorsey-headshot.jpg differ diff --git a/preview-fall2024-info/news/images/East Side-Madison.jpg b/preview-fall2024-info/news/images/East Side-Madison.jpg new file mode 100644 index 000000000..67ebd7152 Binary files /dev/null and b/preview-fall2024-info/news/images/East Side-Madison.jpg differ diff --git a/preview-fall2024-info/news/images/Facilitation-cover.jpeg b/preview-fall2024-info/news/images/Facilitation-cover.jpeg new file mode 100644 index 000000000..ff622665b Binary files /dev/null and b/preview-fall2024-info/news/images/Facilitation-cover.jpeg differ diff --git a/preview-fall2024-info/news/images/Fermion-Bag-300x300.jpg b/preview-fall2024-info/news/images/Fermion-Bag-300x300.jpg new file mode 100644 index 000000000..962ac6e13 Binary files /dev/null and b/preview-fall2024-info/news/images/Fermion-Bag-300x300.jpg differ diff --git a/preview-fall2024-info/news/images/Frank.jpg b/preview-fall2024-info/news/images/Frank.jpg new file mode 100644 index 000000000..ae50ea2b8 Binary files /dev/null and b/preview-fall2024-info/news/images/Frank.jpg differ diff --git a/preview-fall2024-info/news/images/Fulvio-card.jpeg b/preview-fall2024-info/news/images/Fulvio-card.jpeg new file mode 100644 index 000000000..52710f382 Binary files /dev/null and b/preview-fall2024-info/news/images/Fulvio-card.jpeg differ diff --git a/preview-fall2024-info/news/images/Fulvio-headshot.jpeg b/preview-fall2024-info/news/images/Fulvio-headshot.jpeg new file mode 100644 index 000000000..bcca89ab3 Binary files /dev/null and b/preview-fall2024-info/news/images/Fulvio-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Fulvio-research.png b/preview-fall2024-info/news/images/Fulvio-research.png new file mode 100644 index 000000000..cb1b59c05 Binary files /dev/null and b/preview-fall2024-info/news/images/Fulvio-research.png differ diff --git a/preview-fall2024-info/news/images/Garcia-card.png b/preview-fall2024-info/news/images/Garcia-card.png new file mode 100644 index 000000000..ad582e6d9 Binary files /dev/null and b/preview-fall2024-info/news/images/Garcia-card.png differ diff --git a/preview-fall2024-info/news/images/Garcia-cycle.png b/preview-fall2024-info/news/images/Garcia-cycle.png new file mode 100644 index 000000000..154da8ff1 Binary files /dev/null and b/preview-fall2024-info/news/images/Garcia-cycle.png differ diff --git a/preview-fall2024-info/news/images/Garcia-workflow.png b/preview-fall2024-info/news/images/Garcia-workflow.png new file mode 100644 index 000000000..473bdffc2 Binary files /dev/null and b/preview-fall2024-info/news/images/Garcia-workflow.png differ diff --git a/preview-fall2024-info/news/images/Gaylen-Fronk-square.jpg b/preview-fall2024-info/news/images/Gaylen-Fronk-square.jpg new file mode 100644 index 000000000..4290f44f7 Binary files /dev/null and b/preview-fall2024-info/news/images/Gaylen-Fronk-square.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Bike.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Bike.jpg new file mode 100644 index 000000000..845151635 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Bike.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Brian-Question.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Brian-Question.jpg new file mode 100644 index 000000000..95a4a0a7e Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Brian-Question.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Closing.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Closing.jpg new file mode 100644 index 000000000..0047a2cc9 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Closing.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Collage.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Collage.jpg new file mode 100644 index 000000000..76f7311f5 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Collage.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Conversation.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Conversation.jpg new file mode 100644 index 000000000..e4aa7d070 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Conversation.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Emile-Listening.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Emile-Listening.jpg new file mode 100644 index 000000000..ed2639df3 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Emile-Listening.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Justin-Presenting.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Justin-Presenting.jpg new file mode 100644 index 000000000..278d2e92c Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Justin-Presenting.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Lobby.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Lobby.jpg new file mode 100644 index 000000000..dacf03b49 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Lobby.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Outside.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Outside.jpg new file mode 100644 index 000000000..557294690 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Outside.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Terrace.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Terrace.jpg new file mode 100644 index 000000000..5184a8800 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Terrace.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Welcome.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Welcome.jpg new file mode 100644 index 000000000..f181610e5 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Welcome.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Wilcots.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Wilcots.jpg new file mode 100644 index 000000000..06d80a147 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Wilcots.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondorWeek2022-Yudhajit.jpg b/preview-fall2024-info/news/images/HTCondorWeek2022-Yudhajit.jpg new file mode 100644 index 000000000..715140a42 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondorWeek2022-Yudhajit.jpg differ diff --git a/preview-fall2024-info/news/images/HTCondor_Banner.jpeg b/preview-fall2024-info/news/images/HTCondor_Banner.jpeg new file mode 100644 index 000000000..c6eca8bee Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondor_Banner.jpeg differ diff --git a/preview-fall2024-info/news/images/HTCondor_Bird.png b/preview-fall2024-info/news/images/HTCondor_Bird.png new file mode 100644 index 000000000..2d19edf9a Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondor_Bird.png differ diff --git a/preview-fall2024-info/news/images/HTCondor_red_blk.png b/preview-fall2024-info/news/images/HTCondor_red_blk.png new file mode 100644 index 000000000..667a492e8 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondor_red_blk.png differ diff --git a/preview-fall2024-info/news/images/HTCondor_red_blk_notag.png b/preview-fall2024-info/news/images/HTCondor_red_blk_notag.png new file mode 100644 index 000000000..f7d74b8c4 Binary files /dev/null and b/preview-fall2024-info/news/images/HTCondor_red_blk_notag.png differ diff --git a/preview-fall2024-info/news/images/Hannah-Showcase-Banner.jpg b/preview-fall2024-info/news/images/Hannah-Showcase-Banner.jpg new file mode 100644 index 000000000..f229dd844 Binary files /dev/null and b/preview-fall2024-info/news/images/Hannah-Showcase-Banner.jpg differ diff --git a/preview-fall2024-info/news/images/Hannah-Showcase.jpg b/preview-fall2024-info/news/images/Hannah-Showcase.jpg new file mode 100644 index 000000000..f999b4c5b Binary files /dev/null and b/preview-fall2024-info/news/images/Hannah-Showcase.jpg differ diff --git a/preview-fall2024-info/news/images/Hiemstra-card.png b/preview-fall2024-info/news/images/Hiemstra-card.png new file mode 100644 index 000000000..72586d59e Binary files /dev/null and b/preview-fall2024-info/news/images/Hiemstra-card.png differ diff --git a/preview-fall2024-info/news/images/HimadriChakraborty2024.jpg b/preview-fall2024-info/news/images/HimadriChakraborty2024.jpg new file mode 100644 index 000000000..72d2ed1fa Binary files /dev/null and b/preview-fall2024-info/news/images/HimadriChakraborty2024.jpg differ diff --git a/preview-fall2024-info/news/images/IMG_4838.JPG b/preview-fall2024-info/news/images/IMG_4838.JPG new file mode 100644 index 000000000..1c25df2c9 Binary files /dev/null and b/preview-fall2024-info/news/images/IMG_4838.JPG differ diff --git a/preview-fall2024-info/news/images/IMG_4839.JPG b/preview-fall2024-info/news/images/IMG_4839.JPG new file mode 100644 index 000000000..abf88e3f3 Binary files /dev/null and b/preview-fall2024-info/news/images/IMG_4839.JPG differ diff --git a/preview-fall2024-info/news/images/Icecube_Wins_Award.png b/preview-fall2024-info/news/images/Icecube_Wins_Award.png new file mode 100644 index 000000000..05760b0d6 Binary files /dev/null and b/preview-fall2024-info/news/images/Icecube_Wins_Award.png differ diff --git a/preview-fall2024-info/news/images/Jefferson-Lab.jpg b/preview-fall2024-info/news/images/Jefferson-Lab.jpg new file mode 100644 index 000000000..a5becfb69 Binary files /dev/null and b/preview-fall2024-info/news/images/Jefferson-Lab.jpg differ diff --git a/preview-fall2024-info/news/images/Jem-headshot.jpeg b/preview-fall2024-info/news/images/Jem-headshot.jpeg new file mode 100644 index 000000000..148bcd8f8 Binary files /dev/null and b/preview-fall2024-info/news/images/Jem-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Joshi.png b/preview-fall2024-info/news/images/Joshi.png new file mode 100644 index 000000000..a197e58e3 Binary files /dev/null and b/preview-fall2024-info/news/images/Joshi.png differ diff --git a/preview-fall2024-info/news/images/Lacy-portrait.jpeg b/preview-fall2024-info/news/images/Lacy-portrait.jpeg new file mode 100644 index 000000000..19218394d Binary files /dev/null and b/preview-fall2024-info/news/images/Lacy-portrait.jpeg differ diff --git a/preview-fall2024-info/news/images/Lacy-presentation-screenshot.png b/preview-fall2024-info/news/images/Lacy-presentation-screenshot.png new file mode 100644 index 000000000..1ffa9053e Binary files /dev/null and b/preview-fall2024-info/news/images/Lacy-presentation-screenshot.png differ diff --git a/preview-fall2024-info/news/images/Lightning-Talks-card.jpeg b/preview-fall2024-info/news/images/Lightning-Talks-card.jpeg new file mode 100644 index 000000000..0587fb1ef Binary files /dev/null and b/preview-fall2024-info/news/images/Lightning-Talks-card.jpeg differ diff --git a/preview-fall2024-info/news/images/ML_1.jpeg b/preview-fall2024-info/news/images/ML_1.jpeg new file mode 100644 index 000000000..bccd59d57 Binary files /dev/null and b/preview-fall2024-info/news/images/ML_1.jpeg differ diff --git a/preview-fall2024-info/news/images/Matthew-headshot.jpeg b/preview-fall2024-info/news/images/Matthew-headshot.jpeg new file mode 100644 index 000000000..dd1b2270c Binary files /dev/null and b/preview-fall2024-info/news/images/Matthew-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Maurizio.jpg b/preview-fall2024-info/news/images/Maurizio.jpg new file mode 100644 index 000000000..187b3e98e Binary files /dev/null and b/preview-fall2024-info/news/images/Maurizio.jpg differ diff --git a/preview-fall2024-info/news/images/Max-headshot.jpeg b/preview-fall2024-info/news/images/Max-headshot.jpeg new file mode 100644 index 000000000..08a87bf8c Binary files /dev/null and b/preview-fall2024-info/news/images/Max-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Meng-headshot.jpeg b/preview-fall2024-info/news/images/Meng-headshot.jpeg new file mode 100644 index 000000000..13fa73d1c Binary files /dev/null and b/preview-fall2024-info/news/images/Meng-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Messick-card.png b/preview-fall2024-info/news/images/Messick-card.png new file mode 100644 index 000000000..1062d3780 Binary files /dev/null and b/preview-fall2024-info/news/images/Messick-card.png differ diff --git a/preview-fall2024-info/news/images/Messick-workflow.png b/preview-fall2024-info/news/images/Messick-workflow.png new file mode 100644 index 000000000..615c50bfb Binary files /dev/null and b/preview-fall2024-info/news/images/Messick-workflow.png differ diff --git a/preview-fall2024-info/news/images/Mike-headshot.jpeg b/preview-fall2024-info/news/images/Mike-headshot.jpeg new file mode 100644 index 000000000..02b65b992 Binary files /dev/null and b/preview-fall2024-info/news/images/Mike-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Miron.jpg b/preview-fall2024-info/news/images/Miron.jpg new file mode 100644 index 000000000..47605b945 Binary files /dev/null and b/preview-fall2024-info/news/images/Miron.jpg differ diff --git a/preview-fall2024-info/news/images/NIAID-banner.jpeg b/preview-fall2024-info/news/images/NIAID-banner.jpeg new file mode 100644 index 000000000..4b8ab10f2 Binary files /dev/null and b/preview-fall2024-info/news/images/NIAID-banner.jpeg differ diff --git a/preview-fall2024-info/news/images/NIAID-card.jpeg b/preview-fall2024-info/news/images/NIAID-card.jpeg new file mode 100644 index 000000000..29755f70d Binary files /dev/null and b/preview-fall2024-info/news/images/NIAID-card.jpeg differ diff --git a/preview-fall2024-info/news/images/NIAID_students.jpeg b/preview-fall2024-info/news/images/NIAID_students.jpeg new file mode 100644 index 000000000..f741f8462 Binary files /dev/null and b/preview-fall2024-info/news/images/NIAID_students.jpeg differ diff --git a/preview-fall2024-info/news/images/Nathan.jpg b/preview-fall2024-info/news/images/Nathan.jpg new file mode 100644 index 000000000..9d735ea50 Binary files /dev/null and b/preview-fall2024-info/news/images/Nathan.jpg differ diff --git a/preview-fall2024-info/news/images/OSG-User-School.jpg b/preview-fall2024-info/news/images/OSG-User-School.jpg new file mode 100644 index 000000000..4f795bae6 Binary files /dev/null and b/preview-fall2024-info/news/images/OSG-User-School.jpg differ diff --git a/preview-fall2024-info/news/images/OSGVS21-Logo.png b/preview-fall2024-info/news/images/OSGVS21-Logo.png new file mode 100644 index 000000000..888101cc5 Binary files /dev/null and b/preview-fall2024-info/news/images/OSGVS21-Logo.png differ diff --git a/preview-fall2024-info/news/images/OSPool_Contributors.png b/preview-fall2024-info/news/images/OSPool_Contributors.png new file mode 100644 index 000000000..07ccfb3ad Binary files /dev/null and b/preview-fall2024-info/news/images/OSPool_Contributors.png differ diff --git a/preview-fall2024-info/news/images/Olaitan-headshot.jpeg b/preview-fall2024-info/news/images/Olaitan-headshot.jpeg new file mode 100644 index 000000000..f02d1fcd4 Binary files /dev/null and b/preview-fall2024-info/news/images/Olaitan-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Opotowsky-card.jpeg b/preview-fall2024-info/news/images/Opotowsky-card.jpeg new file mode 100644 index 000000000..15f710fda Binary files /dev/null and b/preview-fall2024-info/news/images/Opotowsky-card.jpeg differ diff --git a/preview-fall2024-info/news/images/Opotowsky-headshot.png b/preview-fall2024-info/news/images/Opotowsky-headshot.png new file mode 100644 index 000000000..2082adbe5 Binary files /dev/null and b/preview-fall2024-info/news/images/Opotowsky-headshot.png differ diff --git a/preview-fall2024-info/news/images/PATh-Facility-Hardware.jpg b/preview-fall2024-info/news/images/PATh-Facility-Hardware.jpg new file mode 100644 index 000000000..fe5e6ff85 Binary files /dev/null and b/preview-fall2024-info/news/images/PATh-Facility-Hardware.jpg differ diff --git a/preview-fall2024-info/news/images/PATh-Facility-Map.jpg b/preview-fall2024-info/news/images/PATh-Facility-Map.jpg new file mode 100644 index 000000000..a7da36c8c Binary files /dev/null and b/preview-fall2024-info/news/images/PATh-Facility-Map.jpg differ diff --git a/preview-fall2024-info/news/images/PATh_Facility_Preview.jpeg b/preview-fall2024-info/news/images/PATh_Facility_Preview.jpeg new file mode 100644 index 000000000..b6a10d899 Binary files /dev/null and b/preview-fall2024-info/news/images/PATh_Facility_Preview.jpeg differ diff --git a/preview-fall2024-info/news/images/Parul-Johri-headshot.jpg b/preview-fall2024-info/news/images/Parul-Johri-headshot.jpg new file mode 100644 index 000000000..d18c7fc95 Binary files /dev/null and b/preview-fall2024-info/news/images/Parul-Johri-headshot.jpg differ diff --git a/preview-fall2024-info/news/images/Pascal.jpg b/preview-fall2024-info/news/images/Pascal.jpg new file mode 100644 index 000000000..eae5c53c6 Binary files /dev/null and b/preview-fall2024-info/news/images/Pascal.jpg differ diff --git a/preview-fall2024-info/news/images/Peder-headshot.jpeg b/preview-fall2024-info/news/images/Peder-headshot.jpeg new file mode 100644 index 000000000..9f6ffb1ff Binary files /dev/null and b/preview-fall2024-info/news/images/Peder-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Pool-Record-Image.jpg b/preview-fall2024-info/news/images/Pool-Record-Image.jpg new file mode 100644 index 000000000..943b7c885 Binary files /dev/null and b/preview-fall2024-info/news/images/Pool-Record-Image.jpg differ diff --git a/preview-fall2024-info/news/images/RNAMake-Example.png b/preview-fall2024-info/news/images/RNAMake-Example.png new file mode 100644 index 000000000..5fa43e2bb Binary files /dev/null and b/preview-fall2024-info/news/images/RNAMake-Example.png differ diff --git a/preview-fall2024-info/news/images/Raffaela.jpg b/preview-fall2024-info/news/images/Raffaela.jpg new file mode 100644 index 000000000..317a1ce84 Binary files /dev/null and b/preview-fall2024-info/news/images/Raffaela.jpg differ diff --git a/preview-fall2024-info/news/images/Ricarte.jpg b/preview-fall2024-info/news/images/Ricarte.jpg new file mode 100644 index 000000000..4432ccfa1 Binary files /dev/null and b/preview-fall2024-info/news/images/Ricarte.jpg differ diff --git a/preview-fall2024-info/news/images/SagA-black-hole.jpg b/preview-fall2024-info/news/images/SagA-black-hole.jpg new file mode 100644 index 000000000..c44b0658a Binary files /dev/null and b/preview-fall2024-info/news/images/SagA-black-hole.jpg differ diff --git a/preview-fall2024-info/news/images/Science-Gateway-Students.jpeg b/preview-fall2024-info/news/images/Science-Gateway-Students.jpeg new file mode 100644 index 000000000..748a2f3ba Binary files /dev/null and b/preview-fall2024-info/news/images/Science-Gateway-Students.jpeg differ diff --git a/preview-fall2024-info/news/images/Screen Shot 2022-12-19 at 12.34.31 PM.png b/preview-fall2024-info/news/images/Screen Shot 2022-12-19 at 12.34.31 PM.png new file mode 100644 index 000000000..92dd004f6 Binary files /dev/null and b/preview-fall2024-info/news/images/Screen Shot 2022-12-19 at 12.34.31 PM.png differ diff --git a/preview-fall2024-info/news/images/Shailesh-Chandrasekharan-150x150.jpg b/preview-fall2024-info/news/images/Shailesh-Chandrasekharan-150x150.jpg new file mode 100644 index 000000000..4865839fc Binary files /dev/null and b/preview-fall2024-info/news/images/Shailesh-Chandrasekharan-150x150.jpg differ diff --git a/preview-fall2024-info/news/images/Side-by-Side-Madison-Aerial-Photography.jpg b/preview-fall2024-info/news/images/Side-by-Side-Madison-Aerial-Photography.jpg new file mode 100644 index 000000000..9229b1d38 Binary files /dev/null and b/preview-fall2024-info/news/images/Side-by-Side-Madison-Aerial-Photography.jpg differ diff --git a/preview-fall2024-info/news/images/Spectrometer.jpg b/preview-fall2024-info/news/images/Spectrometer.jpg new file mode 100644 index 000000000..9069c7675 Binary files /dev/null and b/preview-fall2024-info/news/images/Spectrometer.jpg differ diff --git a/preview-fall2024-info/news/images/Spencer-Showcase-Banner.jpg b/preview-fall2024-info/news/images/Spencer-Showcase-Banner.jpg new file mode 100644 index 000000000..cf3f88729 Binary files /dev/null and b/preview-fall2024-info/news/images/Spencer-Showcase-Banner.jpg differ diff --git a/preview-fall2024-info/news/images/Spencer-Showcase.jpg b/preview-fall2024-info/news/images/Spencer-Showcase.jpg new file mode 100644 index 000000000..d75e9bc41 Binary files /dev/null and b/preview-fall2024-info/news/images/Spencer-Showcase.jpg differ diff --git a/preview-fall2024-info/news/images/Submodule_Image.png b/preview-fall2024-info/news/images/Submodule_Image.png new file mode 100644 index 000000000..b1d876d4e Binary files /dev/null and b/preview-fall2024-info/news/images/Submodule_Image.png differ diff --git a/preview-fall2024-info/news/images/USGS-collage.jpg b/preview-fall2024-info/news/images/USGS-collage.jpg new file mode 100644 index 000000000..33bc74f42 Binary files /dev/null and b/preview-fall2024-info/news/images/USGS-collage.jpg differ diff --git a/preview-fall2024-info/news/images/User_School_Collage.jpg b/preview-fall2024-info/news/images/User_School_Collage.jpg new file mode 100644 index 000000000..e5eb84963 Binary files /dev/null and b/preview-fall2024-info/news/images/User_School_Collage.jpg differ diff --git a/preview-fall2024-info/news/images/Venkitesh-150x150.jpg b/preview-fall2024-info/news/images/Venkitesh-150x150.jpg new file mode 100644 index 000000000..1acaeb2f9 Binary files /dev/null and b/preview-fall2024-info/news/images/Venkitesh-150x150.jpg differ diff --git a/preview-fall2024-info/news/images/WMS-hours-by-facility-comet.png b/preview-fall2024-info/news/images/WMS-hours-by-facility-comet.png new file mode 100644 index 000000000..1f3521f32 Binary files /dev/null and b/preview-fall2024-info/news/images/WMS-hours-by-facility-comet.png differ diff --git a/preview-fall2024-info/news/images/Wilcots-card.png b/preview-fall2024-info/news/images/Wilcots-card.png new file mode 100644 index 000000000..bca604889 Binary files /dev/null and b/preview-fall2024-info/news/images/Wilcots-card.png differ diff --git a/preview-fall2024-info/news/images/Wilcots-headshot.jpeg b/preview-fall2024-info/news/images/Wilcots-headshot.jpeg new file mode 100644 index 000000000..2d73f6c46 Binary files /dev/null and b/preview-fall2024-info/news/images/Wilcots-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/Wilcots-map.png b/preview-fall2024-info/news/images/Wilcots-map.png new file mode 100644 index 000000000..efda3ef85 Binary files /dev/null and b/preview-fall2024-info/news/images/Wilcots-map.png differ diff --git a/preview-fall2024-info/news/images/Wilcots-planet.png b/preview-fall2024-info/news/images/Wilcots-planet.png new file mode 100644 index 000000000..973272b67 Binary files /dev/null and b/preview-fall2024-info/news/images/Wilcots-planet.png differ diff --git a/preview-fall2024-info/news/images/Wilcots-spiral.png b/preview-fall2024-info/news/images/Wilcots-spiral.png new file mode 100644 index 000000000..d5d4898f6 Binary files /dev/null and b/preview-fall2024-info/news/images/Wilcots-spiral.png differ diff --git a/preview-fall2024-info/news/images/Zack-headshot.jpeg b/preview-fall2024-info/news/images/Zack-headshot.jpeg new file mode 100644 index 000000000..42acfd935 Binary files /dev/null and b/preview-fall2024-info/news/images/Zack-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/amnh.jpg b/preview-fall2024-info/news/images/amnh.jpg new file mode 100644 index 000000000..bfb9eb17b Binary files /dev/null and b/preview-fall2024-info/news/images/amnh.jpg differ diff --git a/preview-fall2024-info/news/images/amnhgroup.jpeg b/preview-fall2024-info/news/images/amnhgroup.jpeg new file mode 100644 index 000000000..8e83f0f03 Binary files /dev/null and b/preview-fall2024-info/news/images/amnhgroup.jpeg differ diff --git a/preview-fall2024-info/news/images/asp-banner.jpeg b/preview-fall2024-info/news/images/asp-banner.jpeg new file mode 100644 index 000000000..2de5da89c Binary files /dev/null and b/preview-fall2024-info/news/images/asp-banner.jpeg differ diff --git a/preview-fall2024-info/news/images/asp-horst.jpg b/preview-fall2024-info/news/images/asp-horst.jpg new file mode 100644 index 000000000..b66ad50e5 Binary files /dev/null and b/preview-fall2024-info/news/images/asp-horst.jpg differ diff --git a/preview-fall2024-info/news/images/asp-students.jpeg b/preview-fall2024-info/news/images/asp-students.jpeg new file mode 100644 index 000000000..a3697875c Binary files /dev/null and b/preview-fall2024-info/news/images/asp-students.jpeg differ diff --git a/preview-fall2024-info/news/images/bannerimagedemo.png b/preview-fall2024-info/news/images/bannerimagedemo.png new file mode 100644 index 000000000..ab261da4a Binary files /dev/null and b/preview-fall2024-info/news/images/bannerimagedemo.png differ diff --git a/preview-fall2024-info/news/images/banq-mw.jpg b/preview-fall2024-info/news/images/banq-mw.jpg new file mode 100644 index 000000000..77dbbadea Binary files /dev/null and b/preview-fall2024-info/news/images/banq-mw.jpg differ diff --git a/preview-fall2024-info/news/images/banq-mw.png b/preview-fall2024-info/news/images/banq-mw.png new file mode 100644 index 000000000..b2a97d94b Binary files /dev/null and b/preview-fall2024-info/news/images/banq-mw.png differ diff --git a/preview-fall2024-info/news/images/banq-patrie.jpg b/preview-fall2024-info/news/images/banq-patrie.jpg new file mode 100644 index 000000000..b91b1d348 Binary files /dev/null and b/preview-fall2024-info/news/images/banq-patrie.jpg differ diff --git a/preview-fall2024-info/news/images/banq-patrie.png b/preview-fall2024-info/news/images/banq-patrie.png new file mode 100644 index 000000000..76aae24e7 Binary files /dev/null and b/preview-fall2024-info/news/images/banq-patrie.png differ diff --git a/preview-fall2024-info/news/images/bat-carousel.jpg b/preview-fall2024-info/news/images/bat-carousel.jpg new file mode 100644 index 000000000..034a58c6b Binary files /dev/null and b/preview-fall2024-info/news/images/bat-carousel.jpg differ diff --git a/preview-fall2024-info/news/images/bat-genomics-Ariadna.png b/preview-fall2024-info/news/images/bat-genomics-Ariadna.png new file mode 100644 index 000000000..2c8b950e4 Binary files /dev/null and b/preview-fall2024-info/news/images/bat-genomics-Ariadna.png differ diff --git a/preview-fall2024-info/news/images/beads.jpg b/preview-fall2024-info/news/images/beads.jpg new file mode 100644 index 000000000..b7bda7b4a Binary files /dev/null and b/preview-fall2024-info/news/images/beads.jpg differ diff --git a/preview-fall2024-info/news/images/brian-speaking.png b/preview-fall2024-info/news/images/brian-speaking.png new file mode 100644 index 000000000..f92873ed2 Binary files /dev/null and b/preview-fall2024-info/news/images/brian-speaking.png differ diff --git a/preview-fall2024-info/news/images/brianbockelman.jpg b/preview-fall2024-info/news/images/brianbockelman.jpg new file mode 100644 index 000000000..1ac0e21f1 Binary files /dev/null and b/preview-fall2024-info/news/images/brianbockelman.jpg differ diff --git a/preview-fall2024-info/news/images/byregion.png b/preview-fall2024-info/news/images/byregion.png new file mode 100644 index 000000000..0f2336f7a Binary files /dev/null and b/preview-fall2024-info/news/images/byregion.png differ diff --git a/preview-fall2024-info/news/images/cartwright-headshot.jpeg b/preview-fall2024-info/news/images/cartwright-headshot.jpeg new file mode 100644 index 000000000..5a6ac61b8 Binary files /dev/null and b/preview-fall2024-info/news/images/cartwright-headshot.jpeg differ diff --git a/preview-fall2024-info/news/images/cattle.png b/preview-fall2024-info/news/images/cattle.png new file mode 100644 index 000000000..61e008495 Binary files /dev/null and b/preview-fall2024-info/news/images/cattle.png differ diff --git a/preview-fall2024-info/news/images/chenimage.png b/preview-fall2024-info/news/images/chenimage.png new file mode 100644 index 000000000..7ef14ac20 Binary files /dev/null and b/preview-fall2024-info/news/images/chenimage.png differ diff --git a/preview-fall2024-info/news/images/christina-koch-chtc-featured.webp b/preview-fall2024-info/news/images/christina-koch-chtc-featured.webp new file mode 100644 index 000000000..90914f9d8 Binary files /dev/null and b/preview-fall2024-info/news/images/christina-koch-chtc-featured.webp differ diff --git a/preview-fall2024-info/news/images/christina-koch-square.jpg b/preview-fall2024-info/news/images/christina-koch-square.jpg new file mode 100644 index 000000000..8b62fe376 Binary files /dev/null and b/preview-fall2024-info/news/images/christina-koch-square.jpg differ diff --git a/preview-fall2024-info/news/images/chtc-facilitation.jpeg b/preview-fall2024-info/news/images/chtc-facilitation.jpeg new file mode 100644 index 000000000..c65782a6c Binary files /dev/null and b/preview-fall2024-info/news/images/chtc-facilitation.jpeg differ diff --git a/preview-fall2024-info/news/images/chtc-map.jpg b/preview-fall2024-info/news/images/chtc-map.jpg new file mode 100644 index 000000000..6183a371d Binary files /dev/null and b/preview-fall2024-info/news/images/chtc-map.jpg differ diff --git a/preview-fall2024-info/news/images/chtc-philosophy-banner.jpg b/preview-fall2024-info/news/images/chtc-philosophy-banner.jpg new file mode 100644 index 000000000..98ba34bc2 Binary files /dev/null and b/preview-fall2024-info/news/images/chtc-philosophy-banner.jpg differ diff --git a/preview-fall2024-info/news/images/chtc-philosophy-server.jpg b/preview-fall2024-info/news/images/chtc-philosophy-server.jpg new file mode 100644 index 000000000..98ba34bc2 Binary files /dev/null and b/preview-fall2024-info/news/images/chtc-philosophy-server.jpg differ diff --git a/preview-fall2024-info/news/images/clark_cropped-150x150.jpeg b/preview-fall2024-info/news/images/clark_cropped-150x150.jpeg new file mode 100644 index 000000000..476d79464 Binary files /dev/null and b/preview-fall2024-info/news/images/clark_cropped-150x150.jpeg differ diff --git a/preview-fall2024-info/news/images/classimage.png b/preview-fall2024-info/news/images/classimage.png new file mode 100644 index 000000000..6f133b0f7 Binary files /dev/null and b/preview-fall2024-info/news/images/classimage.png differ diff --git a/preview-fall2024-info/news/images/classroomimage.jpeg b/preview-fall2024-info/news/images/classroomimage.jpeg new file mode 100644 index 000000000..192c5e891 Binary files /dev/null and b/preview-fall2024-info/news/images/classroomimage.jpeg differ diff --git a/preview-fall2024-info/news/images/cole_beads.jpeg b/preview-fall2024-info/news/images/cole_beads.jpeg new file mode 100644 index 000000000..2a4816e74 Binary files /dev/null and b/preview-fall2024-info/news/images/cole_beads.jpeg differ diff --git a/preview-fall2024-info/news/images/cole_cage.jpg b/preview-fall2024-info/news/images/cole_cage.jpg new file mode 100644 index 000000000..068b017ad Binary files /dev/null and b/preview-fall2024-info/news/images/cole_cage.jpg differ diff --git a/preview-fall2024-info/news/images/cole_math.jpg b/preview-fall2024-info/news/images/cole_math.jpg new file mode 100644 index 000000000..93fc81718 Binary files /dev/null and b/preview-fall2024-info/news/images/cole_math.jpg differ diff --git a/preview-fall2024-info/news/images/cole_pose.jpeg b/preview-fall2024-info/news/images/cole_pose.jpeg new file mode 100644 index 000000000..28dadea37 Binary files /dev/null and b/preview-fall2024-info/news/images/cole_pose.jpeg differ diff --git a/preview-fall2024-info/news/images/cole_terrace.jpg b/preview-fall2024-info/news/images/cole_terrace.jpg new file mode 100644 index 000000000..aed60ab66 Binary files /dev/null and b/preview-fall2024-info/news/images/cole_terrace.jpg differ diff --git a/preview-fall2024-info/news/images/cole_terrace1.jpg b/preview-fall2024-info/news/images/cole_terrace1.jpg new file mode 100644 index 000000000..aed60ab66 Binary files /dev/null and b/preview-fall2024-info/news/images/cole_terrace1.jpg differ diff --git a/preview-fall2024-info/news/images/cole_tree.jpeg b/preview-fall2024-info/news/images/cole_tree.jpeg new file mode 100644 index 000000000..d947c5d0e Binary files /dev/null and b/preview-fall2024-info/news/images/cole_tree.jpeg differ diff --git a/preview-fall2024-info/news/images/collage-header.png b/preview-fall2024-info/news/images/collage-header.png new file mode 100644 index 000000000..64f267c91 Binary files /dev/null and b/preview-fall2024-info/news/images/collage-header.png differ diff --git a/preview-fall2024-info/news/images/core-comp-gpu.jpeg b/preview-fall2024-info/news/images/core-comp-gpu.jpeg new file mode 100644 index 000000000..39f266964 Binary files /dev/null and b/preview-fall2024-info/news/images/core-comp-gpu.jpeg differ diff --git a/preview-fall2024-info/news/images/costume.jpg b/preview-fall2024-info/news/images/costume.jpg new file mode 100644 index 000000000..8ff22fcdf Binary files /dev/null and b/preview-fall2024-info/news/images/costume.jpg differ diff --git a/preview-fall2024-info/news/images/couvares_cropped-150x150.jpeg b/preview-fall2024-info/news/images/couvares_cropped-150x150.jpeg new file mode 100644 index 000000000..2375b4f1f Binary files /dev/null and b/preview-fall2024-info/news/images/couvares_cropped-150x150.jpeg differ diff --git a/preview-fall2024-info/news/images/cow-ml-image-cropped-banner.jpg b/preview-fall2024-info/news/images/cow-ml-image-cropped-banner.jpg new file mode 100644 index 000000000..998532c88 Binary files /dev/null and b/preview-fall2024-info/news/images/cow-ml-image-cropped-banner.jpg differ diff --git a/preview-fall2024-info/news/images/cow-ml-image-cropped.jpg b/preview-fall2024-info/news/images/cow-ml-image-cropped.jpg new file mode 100644 index 000000000..f823a76ff Binary files /dev/null and b/preview-fall2024-info/news/images/cow-ml-image-cropped.jpg differ diff --git a/preview-fall2024-info/news/images/darrylthelen.jpg b/preview-fall2024-info/news/images/darrylthelen.jpg new file mode 100644 index 000000000..870a696a6 Binary files /dev/null and b/preview-fall2024-info/news/images/darrylthelen.jpg differ diff --git a/preview-fall2024-info/news/images/davidswanson.jpg b/preview-fall2024-info/news/images/davidswanson.jpg new file mode 100644 index 000000000..6d6a5b7d2 Binary files /dev/null and b/preview-fall2024-info/news/images/davidswanson.jpg differ diff --git a/preview-fall2024-info/news/images/deforestation.png b/preview-fall2024-info/news/images/deforestation.png new file mode 100644 index 000000000..dcaf103d9 Binary files /dev/null and b/preview-fall2024-info/news/images/deforestation.png differ diff --git a/preview-fall2024-info/news/images/demobanner.png b/preview-fall2024-info/news/images/demobanner.png new file mode 100644 index 000000000..57a133354 Binary files /dev/null and b/preview-fall2024-info/news/images/demobanner.png differ diff --git a/preview-fall2024-info/news/images/demopic.png b/preview-fall2024-info/news/images/demopic.png new file mode 100644 index 000000000..3a1086f41 Binary files /dev/null and b/preview-fall2024-info/news/images/demopic.png differ diff --git a/preview-fall2024-info/news/images/derekweitzel.jpg b/preview-fall2024-info/news/images/derekweitzel.jpg new file mode 100644 index 000000000..ad025794a Binary files /dev/null and b/preview-fall2024-info/news/images/derekweitzel.jpg differ diff --git a/preview-fall2024-info/news/images/dna.jpeg b/preview-fall2024-info/news/images/dna.jpeg new file mode 100644 index 000000000..e025637fa Binary files /dev/null and b/preview-fall2024-info/news/images/dna.jpeg differ diff --git a/preview-fall2024-info/news/images/dog.jpg b/preview-fall2024-info/news/images/dog.jpg new file mode 100644 index 000000000..7c8503930 Binary files /dev/null and b/preview-fall2024-info/news/images/dog.jpg differ diff --git a/preview-fall2024-info/news/images/doit-summary-article.jpeg b/preview-fall2024-info/news/images/doit-summary-article.jpeg new file mode 100644 index 000000000..713adc41c Binary files /dev/null and b/preview-fall2024-info/news/images/doit-summary-article.jpeg differ diff --git a/preview-fall2024-info/news/images/dr-wall.png b/preview-fall2024-info/news/images/dr-wall.png new file mode 100644 index 000000000..fe0cee2bb Binary files /dev/null and b/preview-fall2024-info/news/images/dr-wall.png differ diff --git a/preview-fall2024-info/news/images/early-late-show.png b/preview-fall2024-info/news/images/early-late-show.png new file mode 100644 index 000000000..55eaf3d78 Binary files /dev/null and b/preview-fall2024-info/news/images/early-late-show.png differ diff --git a/preview-fall2024-info/news/images/elevator-picture.png b/preview-fall2024-info/news/images/elevator-picture.png new file mode 100644 index 000000000..8cf911b2f Binary files /dev/null and b/preview-fall2024-info/news/images/elevator-picture.png differ diff --git a/preview-fall2024-info/news/images/eln.jpeg b/preview-fall2024-info/news/images/eln.jpeg new file mode 100644 index 000000000..a689cce8e Binary files /dev/null and b/preview-fall2024-info/news/images/eln.jpeg differ diff --git a/preview-fall2024-info/news/images/epic-eic-collab.jpg b/preview-fall2024-info/news/images/epic-eic-collab.jpg new file mode 100644 index 000000000..3122b5f8b Binary files /dev/null and b/preview-fall2024-info/news/images/epic-eic-collab.jpg differ diff --git a/preview-fall2024-info/news/images/ericjonasheadshot.png b/preview-fall2024-info/news/images/ericjonasheadshot.png new file mode 100644 index 000000000..5c15efb2d Binary files /dev/null and b/preview-fall2024-info/news/images/ericjonasheadshot.png differ diff --git a/preview-fall2024-info/news/images/european-htcondor-week-2023.png b/preview-fall2024-info/news/images/european-htcondor-week-2023.png new file mode 100644 index 000000000..6bf724e3a Binary files /dev/null and b/preview-fall2024-info/news/images/european-htcondor-week-2023.png differ diff --git a/preview-fall2024-info/news/images/fellow-speaking-1.png b/preview-fall2024-info/news/images/fellow-speaking-1.png new file mode 100644 index 000000000..2597dd8f5 Binary files /dev/null and b/preview-fall2024-info/news/images/fellow-speaking-1.png differ diff --git a/preview-fall2024-info/news/images/fellow-speaking-2.png b/preview-fall2024-info/news/images/fellow-speaking-2.png new file mode 100644 index 000000000..286895040 Binary files /dev/null and b/preview-fall2024-info/news/images/fellow-speaking-2.png differ diff --git a/preview-fall2024-info/news/images/fellows.jpeg b/preview-fall2024-info/news/images/fellows.jpeg new file mode 100644 index 000000000..cc1d1a9e9 Binary files /dev/null and b/preview-fall2024-info/news/images/fellows.jpeg differ diff --git a/preview-fall2024-info/news/images/finalosgschool.png b/preview-fall2024-info/news/images/finalosgschool.png new file mode 100644 index 000000000..f5e0531b9 Binary files /dev/null and b/preview-fall2024-info/news/images/finalosgschool.png differ diff --git a/preview-fall2024-info/news/images/firstmldemoimage-2022-12-19 at 12.34.31 PM.png b/preview-fall2024-info/news/images/firstmldemoimage-2022-12-19 at 12.34.31 PM.png new file mode 100644 index 000000000..92dd004f6 Binary files /dev/null and b/preview-fall2024-info/news/images/firstmldemoimage-2022-12-19 at 12.34.31 PM.png differ diff --git a/preview-fall2024-info/news/images/firstmldemoimage.png b/preview-fall2024-info/news/images/firstmldemoimage.png new file mode 100644 index 000000000..92dd004f6 Binary files /dev/null and b/preview-fall2024-info/news/images/firstmldemoimage.png differ diff --git a/preview-fall2024-info/news/images/firstmldemoimage2.png b/preview-fall2024-info/news/images/firstmldemoimage2.png new file mode 100644 index 000000000..424954ade Binary files /dev/null and b/preview-fall2024-info/news/images/firstmldemoimage2.png differ diff --git a/preview-fall2024-info/news/images/gillett-card.jpeg b/preview-fall2024-info/news/images/gillett-card.jpeg new file mode 100644 index 000000000..3d37411d3 Binary files /dev/null and b/preview-fall2024-info/news/images/gillett-card.jpeg differ diff --git a/preview-fall2024-info/news/images/gillett-headshot.png b/preview-fall2024-info/news/images/gillett-headshot.png new file mode 100644 index 000000000..cdb9937e2 Binary files /dev/null and b/preview-fall2024-info/news/images/gillett-headshot.png differ diff --git a/preview-fall2024-info/news/images/gitter.jpeg b/preview-fall2024-info/news/images/gitter.jpeg new file mode 100644 index 000000000..d02b7801d Binary files /dev/null and b/preview-fall2024-info/news/images/gitter.jpeg differ diff --git a/preview-fall2024-info/news/images/google-qvm.jpg b/preview-fall2024-info/news/images/google-qvm.jpg new file mode 100644 index 000000000..1ecef9c2c Binary files /dev/null and b/preview-fall2024-info/news/images/google-qvm.jpg differ diff --git a/preview-fall2024-info/news/images/gpargo.png b/preview-fall2024-info/news/images/gpargo.png new file mode 100644 index 000000000..3a4a122c8 Binary files /dev/null and b/preview-fall2024-info/news/images/gpargo.png differ diff --git a/preview-fall2024-info/news/images/gpargonodes.png b/preview-fall2024-info/news/images/gpargonodes.png new file mode 100644 index 000000000..8860fd951 Binary files /dev/null and b/preview-fall2024-info/news/images/gpargonodes.png differ diff --git a/preview-fall2024-info/news/images/gpupflops.png b/preview-fall2024-info/news/images/gpupflops.png new file mode 100644 index 000000000..9eceeb3f0 Binary files /dev/null and b/preview-fall2024-info/news/images/gpupflops.png differ diff --git a/preview-fall2024-info/news/images/greenbay.jpg b/preview-fall2024-info/news/images/greenbay.jpg new file mode 100644 index 000000000..379655784 Binary files /dev/null and b/preview-fall2024-info/news/images/greenbay.jpg differ diff --git a/preview-fall2024-info/news/images/groom-2.jpg b/preview-fall2024-info/news/images/groom-2.jpg new file mode 100644 index 000000000..adcfa1893 Binary files /dev/null and b/preview-fall2024-info/news/images/groom-2.jpg differ diff --git a/preview-fall2024-info/news/images/groom-title.jpg b/preview-fall2024-info/news/images/groom-title.jpg new file mode 100644 index 000000000..6d5f9879c Binary files /dev/null and b/preview-fall2024-info/news/images/groom-title.jpg differ diff --git a/preview-fall2024-info/news/images/groupphoto.png b/preview-fall2024-info/news/images/groupphoto.png new file mode 100644 index 000000000..918fd7a03 Binary files /dev/null and b/preview-fall2024-info/news/images/groupphoto.png differ diff --git a/preview-fall2024-info/news/images/hannahcard.png b/preview-fall2024-info/news/images/hannahcard.png new file mode 100644 index 000000000..88ea89407 Binary files /dev/null and b/preview-fall2024-info/news/images/hannahcard.png differ diff --git a/preview-fall2024-info/news/images/hannaheadshot.jpg b/preview-fall2024-info/news/images/hannaheadshot.jpg new file mode 100644 index 000000000..9339d4fd2 Binary files /dev/null and b/preview-fall2024-info/news/images/hannaheadshot.jpg differ diff --git a/preview-fall2024-info/news/images/hannahsisters.jpg b/preview-fall2024-info/news/images/hannahsisters.jpg new file mode 100644 index 000000000..7f15c06ce Binary files /dev/null and b/preview-fall2024-info/news/images/hannahsisters.jpg differ diff --git a/preview-fall2024-info/news/images/hannalab.PNG b/preview-fall2024-info/news/images/hannalab.PNG new file mode 100644 index 000000000..153b28649 Binary files /dev/null and b/preview-fall2024-info/news/images/hannalab.PNG differ diff --git a/preview-fall2024-info/news/images/hannalab.jpg b/preview-fall2024-info/news/images/hannalab.jpg new file mode 100644 index 000000000..72a4708f1 Binary files /dev/null and b/preview-fall2024-info/news/images/hannalab.jpg differ diff --git a/preview-fall2024-info/news/images/header.jpeg b/preview-fall2024-info/news/images/header.jpeg new file mode 100644 index 000000000..d2ed9376f Binary files /dev/null and b/preview-fall2024-info/news/images/header.jpeg differ diff --git a/preview-fall2024-info/news/images/headshot-use-of-osdf.png b/preview-fall2024-info/news/images/headshot-use-of-osdf.png new file mode 100644 index 000000000..c64760721 Binary files /dev/null and b/preview-fall2024-info/news/images/headshot-use-of-osdf.png differ diff --git a/preview-fall2024-info/news/images/high-resolution/User_School_Collage.png b/preview-fall2024-info/news/images/high-resolution/User_School_Collage.png new file mode 100644 index 000000000..6a23fa618 Binary files /dev/null and b/preview-fall2024-info/news/images/high-resolution/User_School_Collage.png differ diff --git a/preview-fall2024-info/news/images/high-resolution/bat-carousel.png b/preview-fall2024-info/news/images/high-resolution/bat-carousel.png new file mode 100644 index 000000000..2cd406095 Binary files /dev/null and b/preview-fall2024-info/news/images/high-resolution/bat-carousel.png differ diff --git a/preview-fall2024-info/news/images/high-resolution/cow-ml-image-cropped.png b/preview-fall2024-info/news/images/high-resolution/cow-ml-image-cropped.png new file mode 100644 index 000000000..b04bf5820 Binary files /dev/null and b/preview-fall2024-info/news/images/high-resolution/cow-ml-image-cropped.png differ diff --git a/preview-fall2024-info/news/images/high-resolution/cow-ml-image.png b/preview-fall2024-info/news/images/high-resolution/cow-ml-image.png new file mode 100644 index 000000000..b04bf5820 Binary files /dev/null and b/preview-fall2024-info/news/images/high-resolution/cow-ml-image.png differ diff --git a/preview-fall2024-info/news/images/high-resolution/dr-wall.png b/preview-fall2024-info/news/images/high-resolution/dr-wall.png new file mode 100644 index 000000000..a7447269d Binary files /dev/null and b/preview-fall2024-info/news/images/high-resolution/dr-wall.png differ diff --git a/preview-fall2024-info/news/images/high-resolution/noaa-banner.png b/preview-fall2024-info/news/images/high-resolution/noaa-banner.png new file mode 100644 index 000000000..5fee62d8a Binary files /dev/null and b/preview-fall2024-info/news/images/high-resolution/noaa-banner.png differ diff --git a/preview-fall2024-info/news/images/horus-logo.png b/preview-fall2024-info/news/images/horus-logo.png new file mode 100644 index 000000000..bc7c41858 Binary files /dev/null and b/preview-fall2024-info/news/images/horus-logo.png differ diff --git a/preview-fall2024-info/news/images/htc24-collage.png b/preview-fall2024-info/news/images/htc24-collage.png new file mode 100644 index 000000000..6fea1f4d4 Binary files /dev/null and b/preview-fall2024-info/news/images/htc24-collage.png differ diff --git a/preview-fall2024-info/news/images/intro-collage.jpeg b/preview-fall2024-info/news/images/intro-collage.jpeg new file mode 100644 index 000000000..99033237a Binary files /dev/null and b/preview-fall2024-info/news/images/intro-collage.jpeg differ diff --git a/preview-fall2024-info/news/images/j_ayars_profile_pic.jpg b/preview-fall2024-info/news/images/j_ayars_profile_pic.jpg new file mode 100644 index 000000000..0c9843d32 Binary files /dev/null and b/preview-fall2024-info/news/images/j_ayars_profile_pic.jpg differ diff --git a/preview-fall2024-info/news/images/jobsubmission.png b/preview-fall2024-info/news/images/jobsubmission.png new file mode 100644 index 000000000..a3d6366ae Binary files /dev/null and b/preview-fall2024-info/news/images/jobsubmission.png differ diff --git a/preview-fall2024-info/news/images/jobsubmitfile.png b/preview-fall2024-info/news/images/jobsubmitfile.png new file mode 100644 index 000000000..5f9b298a9 Binary files /dev/null and b/preview-fall2024-info/news/images/jobsubmitfile.png differ diff --git a/preview-fall2024-info/news/images/joes-cat.jpg b/preview-fall2024-info/news/images/joes-cat.jpg new file mode 100644 index 000000000..216a2bd31 Binary files /dev/null and b/preview-fall2024-info/news/images/joes-cat.jpg differ diff --git a/preview-fall2024-info/news/images/jonBlank3[5] copy.jpg b/preview-fall2024-info/news/images/jonBlank3[5] copy.jpg new file mode 100644 index 000000000..4a2910321 Binary files /dev/null and b/preview-fall2024-info/news/images/jonBlank3[5] copy.jpg differ diff --git a/preview-fall2024-info/news/images/jonblank.jpg b/preview-fall2024-info/news/images/jonblank.jpg new file mode 100644 index 000000000..4a2910321 Binary files /dev/null and b/preview-fall2024-info/news/images/jonblank.jpg differ diff --git a/preview-fall2024-info/news/images/kayak-picture.png b/preview-fall2024-info/news/images/kayak-picture.png new file mode 100644 index 000000000..c18a298b6 Binary files /dev/null and b/preview-fall2024-info/news/images/kayak-picture.png differ diff --git a/preview-fall2024-info/news/images/kayaking.jpeg b/preview-fall2024-info/news/images/kayaking.jpeg new file mode 100644 index 000000000..c82361578 Binary files /dev/null and b/preview-fall2024-info/news/images/kayaking.jpeg differ diff --git a/preview-fall2024-info/news/images/kevin.jpeg b/preview-fall2024-info/news/images/kevin.jpeg new file mode 100644 index 000000000..af711297c Binary files /dev/null and b/preview-fall2024-info/news/images/kevin.jpeg differ diff --git a/preview-fall2024-info/news/images/keynote-speaking.png b/preview-fall2024-info/news/images/keynote-speaking.png new file mode 100644 index 000000000..8a52599e1 Binary files /dev/null and b/preview-fall2024-info/news/images/keynote-speaking.png differ diff --git a/preview-fall2024-info/news/images/lacysnapshot.jpg b/preview-fall2024-info/news/images/lacysnapshot.jpg new file mode 100644 index 000000000..07af2e024 Binary files /dev/null and b/preview-fall2024-info/news/images/lacysnapshot.jpg differ diff --git a/preview-fall2024-info/news/images/ligo20160211d-smaller-150x150.jpg b/preview-fall2024-info/news/images/ligo20160211d-smaller-150x150.jpg new file mode 100644 index 000000000..914b2e619 Binary files /dev/null and b/preview-fall2024-info/news/images/ligo20160211d-smaller-150x150.jpg differ diff --git a/preview-fall2024-info/news/images/lindleyimage.png b/preview-fall2024-info/news/images/lindleyimage.png new file mode 100644 index 000000000..0fbedafeb Binary files /dev/null and b/preview-fall2024-info/news/images/lindleyimage.png differ diff --git a/preview-fall2024-info/news/images/lombardihiking.png b/preview-fall2024-info/news/images/lombardihiking.png new file mode 100644 index 000000000..5a0e3c57b Binary files /dev/null and b/preview-fall2024-info/news/images/lombardihiking.png differ diff --git a/preview-fall2024-info/news/images/lombardiuganda.png b/preview-fall2024-info/news/images/lombardiuganda.png new file mode 100644 index 000000000..3eb9dac1b Binary files /dev/null and b/preview-fall2024-info/news/images/lombardiuganda.png differ diff --git a/preview-fall2024-info/news/images/maizespalding.jpg b/preview-fall2024-info/news/images/maizespalding.jpg new file mode 100644 index 000000000..85b0750fe Binary files /dev/null and b/preview-fall2024-info/news/images/maizespalding.jpg differ diff --git a/preview-fall2024-info/news/images/mars-image.jpg b/preview-fall2024-info/news/images/mars-image.jpg new file mode 100644 index 000000000..e7e8232f5 Binary files /dev/null and b/preview-fall2024-info/news/images/mars-image.jpg differ diff --git a/preview-fall2024-info/news/images/materials-science.jpg b/preview-fall2024-info/news/images/materials-science.jpg new file mode 100644 index 000000000..22501de3b Binary files /dev/null and b/preview-fall2024-info/news/images/materials-science.jpg differ diff --git a/preview-fall2024-info/news/images/mattchristie.png b/preview-fall2024-info/news/images/mattchristie.png new file mode 100644 index 000000000..5dda7d777 Binary files /dev/null and b/preview-fall2024-info/news/images/mattchristie.png differ diff --git a/preview-fall2024-info/news/images/mental_health.jpeg b/preview-fall2024-info/news/images/mental_health.jpeg new file mode 100644 index 000000000..0f05402a8 Binary files /dev/null and b/preview-fall2024-info/news/images/mental_health.jpeg differ diff --git a/preview-fall2024-info/news/images/mental_health_banner.jpeg b/preview-fall2024-info/news/images/mental_health_banner.jpeg new file mode 100644 index 000000000..610d7e910 Binary files /dev/null and b/preview-fall2024-info/news/images/mental_health_banner.jpeg differ diff --git a/preview-fall2024-info/news/images/miron-livny-2021.jpeg b/preview-fall2024-info/news/images/miron-livny-2021.jpeg new file mode 100644 index 000000000..2bc48838b Binary files /dev/null and b/preview-fall2024-info/news/images/miron-livny-2021.jpeg differ diff --git a/preview-fall2024-info/news/images/miron-speaking.png b/preview-fall2024-info/news/images/miron-speaking.png new file mode 100644 index 000000000..3f86ad8db Binary files /dev/null and b/preview-fall2024-info/news/images/miron-speaking.png differ diff --git a/preview-fall2024-info/news/images/mironlivny.jpg b/preview-fall2024-info/news/images/mironlivny.jpg new file mode 100644 index 000000000..bba9b716f Binary files /dev/null and b/preview-fall2024-info/news/images/mironlivny.jpg differ diff --git a/preview-fall2024-info/news/images/mldemowhatspossible at 1.05.07 PM.png b/preview-fall2024-info/news/images/mldemowhatspossible at 1.05.07 PM.png new file mode 100644 index 000000000..e6cb99a60 Binary files /dev/null and b/preview-fall2024-info/news/images/mldemowhatspossible at 1.05.07 PM.png differ diff --git a/preview-fall2024-info/news/images/mldemowhatspossible.png b/preview-fall2024-info/news/images/mldemowhatspossible.png new file mode 100644 index 000000000..e6cb99a60 Binary files /dev/null and b/preview-fall2024-info/news/images/mldemowhatspossible.png differ diff --git a/preview-fall2024-info/news/images/molcryst.png b/preview-fall2024-info/news/images/molcryst.png new file mode 100644 index 000000000..a8b2ebfd2 Binary files /dev/null and b/preview-fall2024-info/news/images/molcryst.png differ diff --git a/preview-fall2024-info/news/images/mrion-talking.jpeg b/preview-fall2024-info/news/images/mrion-talking.jpeg new file mode 100644 index 000000000..638f5e518 Binary files /dev/null and b/preview-fall2024-info/news/images/mrion-talking.jpeg differ diff --git a/preview-fall2024-info/news/images/neha.jpeg b/preview-fall2024-info/news/images/neha.jpeg new file mode 100644 index 000000000..3579f0938 Binary files /dev/null and b/preview-fall2024-info/news/images/neha.jpeg differ diff --git a/preview-fall2024-info/news/images/new-award.jpeg b/preview-fall2024-info/news/images/new-award.jpeg new file mode 100644 index 000000000..76acd234d Binary files /dev/null and b/preview-fall2024-info/news/images/new-award.jpeg differ diff --git a/preview-fall2024-info/news/images/noaa-banner.png b/preview-fall2024-info/news/images/noaa-banner.png new file mode 100644 index 000000000..5fee62d8a Binary files /dev/null and b/preview-fall2024-info/news/images/noaa-banner.png differ diff --git a/preview-fall2024-info/news/images/nrao-clean-image.png b/preview-fall2024-info/news/images/nrao-clean-image.png new file mode 100644 index 000000000..70e233135 Binary files /dev/null and b/preview-fall2024-info/news/images/nrao-clean-image.png differ diff --git a/preview-fall2024-info/news/images/nrao-dirty-image.png b/preview-fall2024-info/news/images/nrao-dirty-image.png new file mode 100644 index 000000000..1c5195445 Binary files /dev/null and b/preview-fall2024-info/news/images/nrao-dirty-image.png differ diff --git a/preview-fall2024-info/news/images/nrao-map-clear.jpeg b/preview-fall2024-info/news/images/nrao-map-clear.jpeg new file mode 100644 index 000000000..1c67a4fd3 Binary files /dev/null and b/preview-fall2024-info/news/images/nrao-map-clear.jpeg differ diff --git a/preview-fall2024-info/news/images/nrao-vla.png b/preview-fall2024-info/news/images/nrao-vla.png new file mode 100644 index 000000000..1236fa431 Binary files /dev/null and b/preview-fall2024-info/news/images/nrao-vla.png differ diff --git a/preview-fall2024-info/news/images/nrao_chtc_collab_map.jpeg b/preview-fall2024-info/news/images/nrao_chtc_collab_map.jpeg new file mode 100644 index 000000000..47ad49f92 Binary files /dev/null and b/preview-fall2024-info/news/images/nrao_chtc_collab_map.jpeg differ diff --git a/preview-fall2024-info/news/images/nsf-speaking.png b/preview-fall2024-info/news/images/nsf-speaking.png new file mode 100644 index 000000000..9b7cdd549 Binary files /dev/null and b/preview-fall2024-info/news/images/nsf-speaking.png differ diff --git a/preview-fall2024-info/news/images/nt.jpeg b/preview-fall2024-info/news/images/nt.jpeg new file mode 100644 index 000000000..2c76d1473 Binary files /dev/null and b/preview-fall2024-info/news/images/nt.jpeg differ diff --git a/preview-fall2024-info/news/images/numgpus.png b/preview-fall2024-info/news/images/numgpus.png new file mode 100644 index 000000000..4e4523762 Binary files /dev/null and b/preview-fall2024-info/news/images/numgpus.png differ diff --git a/preview-fall2024-info/news/images/osg-school-2023-01.jpg b/preview-fall2024-info/news/images/osg-school-2023-01.jpg new file mode 100644 index 000000000..66f806ba2 Binary files /dev/null and b/preview-fall2024-info/news/images/osg-school-2023-01.jpg differ diff --git a/preview-fall2024-info/news/images/osgmap.png b/preview-fall2024-info/news/images/osgmap.png new file mode 100644 index 000000000..10a205cb2 Binary files /dev/null and b/preview-fall2024-info/news/images/osgmap.png differ diff --git a/preview-fall2024-info/news/images/osgschool2023 b/preview-fall2024-info/news/images/osgschool2023 new file mode 100644 index 000000000..02db855a4 Binary files /dev/null and b/preview-fall2024-info/news/images/osgschool2023 differ diff --git a/preview-fall2024-info/news/images/osgschool2023.png b/preview-fall2024-info/news/images/osgschool2023.png new file mode 100644 index 000000000..cf5392979 Binary files /dev/null and b/preview-fall2024-info/news/images/osgschool2023.png differ diff --git a/preview-fall2024-info/news/images/ospool-comp.jpg b/preview-fall2024-info/news/images/ospool-comp.jpg new file mode 100644 index 000000000..842cb536e Binary files /dev/null and b/preview-fall2024-info/news/images/ospool-comp.jpg differ diff --git a/preview-fall2024-info/news/images/ospool-con-map.png b/preview-fall2024-info/news/images/ospool-con-map.png new file mode 100644 index 000000000..7ef0923fd Binary files /dev/null and b/preview-fall2024-info/news/images/ospool-con-map.png differ diff --git a/preview-fall2024-info/news/images/pekowsy-150x150.jpeg b/preview-fall2024-info/news/images/pekowsy-150x150.jpeg new file mode 100644 index 000000000..3208bc702 Binary files /dev/null and b/preview-fall2024-info/news/images/pekowsy-150x150.jpeg differ diff --git a/preview-fall2024-info/news/images/pnas.2312909120fig01.jpg b/preview-fall2024-info/news/images/pnas.2312909120fig01.jpg new file mode 100644 index 000000000..aca51b81a Binary files /dev/null and b/preview-fall2024-info/news/images/pnas.2312909120fig01.jpg differ diff --git a/preview-fall2024-info/news/images/pp.jpeg b/preview-fall2024-info/news/images/pp.jpeg new file mode 100644 index 000000000..61ea60870 Binary files /dev/null and b/preview-fall2024-info/news/images/pp.jpeg differ diff --git a/preview-fall2024-info/news/images/pratham.jpeg b/preview-fall2024-info/news/images/pratham.jpeg new file mode 100644 index 000000000..7d9bb561a Binary files /dev/null and b/preview-fall2024-info/news/images/pratham.jpeg differ diff --git a/preview-fall2024-info/news/images/presentations.jpeg b/preview-fall2024-info/news/images/presentations.jpeg new file mode 100644 index 000000000..698677d1a Binary files /dev/null and b/preview-fall2024-info/news/images/presentations.jpeg differ diff --git a/preview-fall2024-info/news/images/rachellombardi.jpg b/preview-fall2024-info/news/images/rachellombardi.jpg new file mode 100644 index 000000000..acac1723d Binary files /dev/null and b/preview-fall2024-info/news/images/rachellombardi.jpg differ diff --git a/preview-fall2024-info/news/images/recordcores.png b/preview-fall2024-info/news/images/recordcores.png new file mode 100644 index 000000000..2264cee59 Binary files /dev/null and b/preview-fall2024-info/news/images/recordcores.png differ diff --git a/preview-fall2024-info/news/images/research-computing-partnership.jpeg b/preview-fall2024-info/news/images/research-computing-partnership.jpeg new file mode 100644 index 000000000..a5fedcd96 Binary files /dev/null and b/preview-fall2024-info/news/images/research-computing-partnership.jpeg differ diff --git a/preview-fall2024-info/news/images/researchcomp-pelican.jpg b/preview-fall2024-info/news/images/researchcomp-pelican.jpg new file mode 100644 index 000000000..8e9539fbf Binary files /dev/null and b/preview-fall2024-info/news/images/researchcomp-pelican.jpg differ diff --git a/preview-fall2024-info/news/images/resilience-hero-large.jpeg b/preview-fall2024-info/news/images/resilience-hero-large.jpeg new file mode 100644 index 000000000..394ebebf6 Binary files /dev/null and b/preview-fall2024-info/news/images/resilience-hero-large.jpeg differ diff --git a/preview-fall2024-info/news/images/robotsoccer.jpeg b/preview-fall2024-info/news/images/robotsoccer.jpeg new file mode 100644 index 000000000..874304c3b Binary files /dev/null and b/preview-fall2024-info/news/images/robotsoccer.jpeg differ diff --git a/preview-fall2024-info/news/images/school.png b/preview-fall2024-info/news/images/school.png new file mode 100644 index 000000000..02db855a4 Binary files /dev/null and b/preview-fall2024-info/news/images/school.png differ diff --git a/preview-fall2024-info/news/images/sidebyside.jpg b/preview-fall2024-info/news/images/sidebyside.jpg new file mode 100644 index 000000000..9fefe67c4 Binary files /dev/null and b/preview-fall2024-info/news/images/sidebyside.jpg differ diff --git a/preview-fall2024-info/news/images/sites-use-of-osdf.png b/preview-fall2024-info/news/images/sites-use-of-osdf.png new file mode 100644 index 000000000..77daa15d2 Binary files /dev/null and b/preview-fall2024-info/news/images/sites-use-of-osdf.png differ diff --git a/preview-fall2024-info/news/images/spaldinglab.jpg b/preview-fall2024-info/news/images/spaldinglab.jpg new file mode 100644 index 000000000..d1b2a3216 Binary files /dev/null and b/preview-fall2024-info/news/images/spaldinglab.jpg differ diff --git a/preview-fall2024-info/news/images/submittingjobs.png b/preview-fall2024-info/news/images/submittingjobs.png new file mode 100644 index 000000000..96f7feeb5 Binary files /dev/null and b/preview-fall2024-info/news/images/submittingjobs.png differ diff --git a/preview-fall2024-info/news/images/terawagner.jpg b/preview-fall2024-info/news/images/terawagner.jpg new file mode 100644 index 000000000..156482911 Binary files /dev/null and b/preview-fall2024-info/news/images/terawagner.jpg differ diff --git a/preview-fall2024-info/news/images/todd-t-article-0.jpg b/preview-fall2024-info/news/images/todd-t-article-0.jpg new file mode 100644 index 000000000..f828a121f Binary files /dev/null and b/preview-fall2024-info/news/images/todd-t-article-0.jpg differ diff --git a/preview-fall2024-info/news/images/todd-t-article-1.png b/preview-fall2024-info/news/images/todd-t-article-1.png new file mode 100644 index 000000000..bc7fd09d0 Binary files /dev/null and b/preview-fall2024-info/news/images/todd-t-article-1.png differ diff --git a/preview-fall2024-info/news/images/todd-t-article-2.jpg b/preview-fall2024-info/news/images/todd-t-article-2.jpg new file mode 100644 index 000000000..d41710a5d Binary files /dev/null and b/preview-fall2024-info/news/images/todd-t-article-2.jpg differ diff --git a/preview-fall2024-info/news/images/todd-t-article-6.jpg b/preview-fall2024-info/news/images/todd-t-article-6.jpg new file mode 100644 index 000000000..d6760216b Binary files /dev/null and b/preview-fall2024-info/news/images/todd-t-article-6.jpg differ diff --git a/preview-fall2024-info/news/images/tree.jpg b/preview-fall2024-info/news/images/tree.jpg new file mode 100644 index 000000000..2c0f8e391 Binary files /dev/null and b/preview-fall2024-info/news/images/tree.jpg differ diff --git a/preview-fall2024-info/news/images/ucsd-public-relations.png b/preview-fall2024-info/news/images/ucsd-public-relations.png new file mode 100644 index 000000000..e76d80ca2 Binary files /dev/null and b/preview-fall2024-info/news/images/ucsd-public-relations.png differ diff --git a/preview-fall2024-info/news/images/unnamed_01.png b/preview-fall2024-info/news/images/unnamed_01.png new file mode 100644 index 000000000..fa6d90b52 Binary files /dev/null and b/preview-fall2024-info/news/images/unnamed_01.png differ diff --git a/preview-fall2024-info/news/images/veritas_1.png b/preview-fall2024-info/news/images/veritas_1.png new file mode 100644 index 000000000..a9eda03a9 Binary files /dev/null and b/preview-fall2024-info/news/images/veritas_1.png differ diff --git a/preview-fall2024-info/news/images/veritas_2.png b/preview-fall2024-info/news/images/veritas_2.png new file mode 100644 index 000000000..857e73cc3 Binary files /dev/null and b/preview-fall2024-info/news/images/veritas_2.png differ diff --git a/preview-fall2024-info/news/images/vla-hubble-ultra-deep.png b/preview-fall2024-info/news/images/vla-hubble-ultra-deep.png new file mode 100644 index 000000000..1236fa431 Binary files /dev/null and b/preview-fall2024-info/news/images/vla-hubble-ultra-deep.png differ diff --git a/preview-fall2024-info/news/images/wilcram.jpeg b/preview-fall2024-info/news/images/wilcram.jpeg new file mode 100644 index 000000000..e9aed2abf Binary files /dev/null and b/preview-fall2024-info/news/images/wilcram.jpeg differ diff --git a/preview-fall2024-info/news/robotsoccer.jpeg b/preview-fall2024-info/news/robotsoccer.jpeg new file mode 100644 index 000000000..874304c3b Binary files /dev/null and b/preview-fall2024-info/news/robotsoccer.jpeg differ diff --git a/preview-fall2024-info/newsletter.html b/preview-fall2024-info/newsletter.html new file mode 100644 index 000000000..6a351693d --- /dev/null +++ b/preview-fall2024-info/newsletter.html @@ -0,0 +1,471 @@ + + + + + + +CHTC Newsletter + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ CHTC Newsletter +

+

+ The CHTC Newsletter is a quarterly email that includes information about upcoming events, + training opportunities, and other news from the CHTC. If you would like to receive + the CHTC Newsletter, please fill out the form below. +

+ +
+ + + + +
+ + +
+
+ + +
+
+ + +
+
+ +
+ +
+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/noaa-on-the-ospool.html b/preview-fall2024-info/noaa-on-the-ospool.html new file mode 100644 index 000000000..7795e73ea --- /dev/null +++ b/preview-fall2024-info/noaa-on-the-ospool.html @@ -0,0 +1,373 @@ + + + + + + +NOAA funded marine scientist uses OSPool access to high throughput computing to explode her boundaries of research + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ NOAA funded marine scientist uses OSPool access to high throughput computing to explode her boundaries of research +

+

Dr. Carrie Wall, a research scientist at the University of Colorado Boulder, shares how access to OSPool resources has allowed her team to expand the scope of their research and to fail, unconstrained by the cost of computing in the cloud and the associated restraints that places on research.

+ +
+Water column sonar data collected on the NOAA Okeanos Explorer in the North Atlantic Ocean: One of NOAA’s research cruises. +
Water column sonar data collected on the NOAA Okeanos Explorer in the North Atlantic Ocean: One of NOAA’s research cruises.
+
+ +

A marine scientist faced the daunting challenge of processing sonar data from 65 research cruises spanning 20 years, totaling over 100,000 files. The researcher, Dr. Carrie Wall, braced herself for a grueling 30-week endeavor of single stream, desktop-based processing. However, good fortune intervened at a National Discovery Cloud for Climate (NDC-C) conference in January 2024 when she crossed paths with Brian Bockelman, the principal investigator (PI) of the Pelican Project and a Co-PI of the PATh Project.

+ +

Wall discussed with Bockelman the challenges of converting decades’ worth of sonar datasets into a format suitable for AI analysis —a crucial step for her NSF-funded project through the NDC-C. This initiative aimed to develop the cyberinfrastructure essential for implementing scalable self-supervised machine learning on the extensive water column sonar data accumulated over the years.“We all went around and did five-minute presentations explaining ‘here’s what I do, here’s what I work on,’ almost like speed dating,” recounted Bockelman. “Listening to her talk, it was like, ‘this is a great high throughput computing example.’” Recognizing the volume of Wall’s project, Bockelman introduced her to the OSPool, a shared computing resource freely available to researchers affiliated with US academic institutions. He observed that Wall’s computing style aligned seamlessly with OSPool’s capabilities and would address Wall’s sonar processing bottleneck.

+ +

With Bockelman’s encouragement, Wall and her team’s software developer, Rudy Klucik, easily created accounts and began modifying their computing workflow for high throughput computing.”The process was super easy and very accommodating. Rachel Lombardi, a Research Computing Facilitator for the Center for High Throughput Computing, walked me through all the details, answered my technical questions, and was very welcoming. It was a really nice onboarding,” enthused Klucik. What followed was nothing short of a paradigm shift.

+ +
+ CIRES research scientist Dr. Carrie Wall +
CIRES research scientist Dr. Carrie Wall
+
+ +

Within the walls of the University of Colorado Boulder lies CIRES: The Cooperative Institute for Research in Environmental Sciences, a partnership between the National Oceanic and Atmospheric Administration (NOAA) and the university itself. CIRES employs a workforce of over 800 scientists and staff, actively involved in various aspects of NOAA’s mission-critical endeavors. Among them are Wall and Klucik, both members of NOAA’s team. NOAA’s mission centers on supporting healthy oceans. Dr. Wall has dedicated the past 11 years to leading the development of national archives for water column sonar data, a task undertaken through the National Centers for Environmental Information (NCEI), NOAA’s archival arm.

+ +

Wall and her team have archived over 280TB of water column sonar data at NCEI, which serves not only NOAA’s scientists but also other agencies and academic institutions. However, there was a significant issue: it existed solely in its native, proprietary, and exceedingly complex industry format. Despite being hosted on Amazon Web Services (AWS) for accessibility, as Wall explained, “a lot of expert knowledge is needed to even open and read these files.”

+ +

“NOAA scientists, mostly from the National Marine Fisheries Service (NOAA Fisheries), have collected in all U.S. waters - from the Arctic Ocean to the Caribbean and off the entire U.S. coastline. In just the Gulf of Maine alone, NOAA Fisheries scientists have collected over 20 years of data going back to 1998. All of these data have been archived so not only do we have a very large volume of data, but also a very long time series covering critical habitats,” Wall explained. “The majority of these fascinating data have been used to support fishery stock assessments,” Wall emphasized. “There’s a lot of these data, and in collaboration with experts we want to find out more about them.”

+ +

With the help of the OSPool, Klucik has been able to successfully develop a workflow that he now executes smoothly. This involves reading files from an AWS bucket, processing and converting them into a cloud native Zarr format, and then writing that data out to a publicly accessible bucket, available under the NOAA Open Data Dissemination program. Wall added, “This will now serve as our input for the AI model.”

+ +

Before discovering the OSPool, the original plan was to “fully utilize a cloud native processing pipeline, mainly composed of AWS Lambdas to do all our data conversion” described Klucik. “One common misconception is that cloud computing is cheaper than traditional computing, if the technical elements are all aligned properly it can be cheap, but in a lot of situations it’s still extremely expensive; in the back of our minds we were afraid that it might even be cost prohibitive to process the archive as a whole.”

+ +

However, it is important to acknowledge that before being set up with the OSPool, there was a bit of a learning curve. “I was completely new to high throughput computing infrastructure and didn’t understand how the processing worked,” recalled Klucik. “So, a lot of my initial time was spent running ‘hello world’ examples to better understand the functionality. I started with one job, then scaled up to 100 and eventually 1,000 to get the concurrency we were looking to achieve. It involved a lot of trial and error to get everything right. It took about a month before I finally managed to run the full catalog of data properly.” Klucik noted that he was aware of the available resources, saying, “The OSPool documentation served as an invaluable resource for getting me oriented in a new computing environment.”

+ +

Although initially tasked with processing 100,000 files, their workflow using the OSPool has since surged beyond 400,000 files—an accomplishment that would have been financially daunting in a traditional cloud environment. Wall emphasized that “what OSPool has allowed us to do is fail, which is really, really good. Before [using the] OSPool, we started by processing a couple of cruises with a very small number of files in the cloud to be cost-effective; we didn’t want to make costly mistakes. Being able to use OSPool to iterate and strengthen our process, allowed us to then scale to the volume of data and number of files that we need to process. I don’t know where we would be without OSPool but it would’ve cost us tens of thousands of dollars. We didn’t have to sacrifice for a lesser workflow, one that we didn’t improve upon because it would have cost us more money. I’m really excited about where OSPool has allowed us to go, and now we can take that next step to say ‘okay, we have our foundation, which is our data and a great format, and we can build our models and additional workflows.’”

+ +

Wall’s testimony underscores OSPool’s role not just as computing capacity but as a catalyst for innovation, enabling teams to push boundaries and realize their full potential in research and model development.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/nrao.html b/preview-fall2024-info/nrao.html new file mode 100644 index 000000000..059cfe3ca --- /dev/null +++ b/preview-fall2024-info/nrao.html @@ -0,0 +1,421 @@ + + + + + + +Through the use of high throughput computing, NRAO delivers one of the deepest radio images of space + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Through the use of high throughput computing, NRAO delivers one of the deepest radio images of space +

+

The National Radio Astronomy Observatory’s collaboration with the NSF-funded Partnership to Advance Throughput Computing +(PATh; NSF grant #2030508) and the Pelican Project (NSF grant #2331480) leads to successfully imaged deep +space and creates a first-of-its-kind nationally distributed workflow model for data-intensive scientific investigations.

+ +

Ten years ago, the National Radio Astronomy Observatory (NRAO) pointed its +Very Large Array (VLA) telescopes toward a well-studied portion of the sky, searching for the +oldest view of the universe. Deeper structures reflect older structures in space, as their light takes longer to travel through space +and be picked up by telescopes. Radio astronomy can go even further, detecting structures beyond visible light. The VLA +telescopes generated enough data that a single image of a portion of the sky resulted in two terabytes of data. Without the +computing capacity to image the complete data set, it sat largely unprocessed — until now.

+ +

Researchers at NRAO knew that attempting to process this entire data set in-house was impractical. A previous computing run in 2016 using only a subset of this data took nearly two weeks of active processing. The high sensitivity of radio images requires a vast amount of computing to reach a final product, noted Felipe Madsen, an +NRAO software engineer. The VLA telescopes are interferometers, meaning they point two antennas at the same portion of the +sky; the differences in what these antennas provide eventually result in an image, Madsen explains. NRAO models and re-models +the data to decrease the noise level until the noise is indistinguishable from structures in space. “This project is a lot +more data-intensive than most other projects,” Madsen said.

+ +

Curious about how high-throughput computing (HTC) could enhance its capacity to process data from the VLA, NRAO joined +forces with the Center for High Throughput Computing (CHTC) in 2018. After learning about +what HTC could accomplish, NRAO began executing trial runs in 2019, experimenting with HTC. “Four years ago, we were +beginning to use GPU software to process our data,” Madsen explained. “From the beginning, we understood that to be +compatible with HTC we needed to make changes to our systems.”

+ +

Each team learned from and made improvements based on insights from each other. Greg Thain, +an HTCondor Core Developer for the CHTC, met with NRAO weekly to discuss HTC and changes both parties +could make. These weekly meetings resulted in the HTCondor team making changes to the software, eventually improving the +experience of other users, he said. OSG Software Area Coordinator of CHTC Brian Lin +helped NRAO manage their distributed infrastructure of resources across the country and transition workflows from CPUs to GPUs +to make their workflows more compatible with HTC. Through distributed HTC, NRAO was able to run workflows across the country through the +Open Science Pool (OSPool) and +PATh Facility.

+ +

At NRAO, Madsen developed the software to interface the scientific software in the LibRA package +developed by NRAO Algorithms Research & Development Group with the CHTC infrastructure software. This separation of software +allowed the two teams to solve problems that arose in real-time as the data began to transfer across sites nationwide.

+ +

By December 2023, both parties were ready to tackle the VLA telescope deep sky data using HTC. Transitioning workflows to +nationwide resources led to data movement issues, struggling to move efficiently from distributed resources. The December +2023 image processing run relied upon resources from the Open Science Data Federation +(OSDF) and the recently funded Pelican Project to speed up data +transfers across sites. Brian Bockelman, PI of the +Pelican Project, and his team helped NRAO improve data movement using the OSDF. “Both teams +were working to solve problems as they were happening,” Madsen recounted. “That made for a very successful collaboration +in this process.”

+ +
+ Image of
+    space +
The final product, looking into deep space.
+
+ +

Ultimately, the imaging process was 300 times faster than without using HTC, NRAO reported in +a press release describing +the project. What had previously taken two weeks now took only two hours to create the final result. The final image turned nine terabytes of data into a single +product of one gigabyte. By +the end, the collaboration resulted in one of the earliest radio images of the +Hubble Ultra Deep Field.

+ +

The collaboration that led to this imaging is even bigger than NRAO and CHTC. +The OSPool, which provided some of the computing capacity for the project, +is supported by campuses and institutions across the country that share their excess capacity with the pool +that NRAO utilized. For this project, 13 campuses contributed computing capacity, from small institutions +like Emporia State University to larger ones like San Diego State University.

+ +
+ Map of United States, line connecting 13 locations involved in data processing. +
A map of contributors across the OSPool and PATh Facility. Image courtesy of S. Dagnello, NRAO/AUI/NSF +
+
+


+ +

The December 2023 run and the working relationship between CHTC and NRAO revolutionized information available to astronomers +and proved that HTC is a viable option for the field. “It’s useful to do this run once. What’s exciting is doing it +30,000 times for the entire sky,” Bockelman said. Although previous radio astronomy imaging workflows utilized HTC, +this run was the first to image data on a distributed workflow nationwide from start to finish. Moving forward, NRAO +and CHTC will continue covering the entire area of the sky seen by the VLA telescopes.

+ +

Madsen is enthusiastic about continuing this project, and how the use of HTC is revolutionizing astronomy, “I’ve always felt +like, in this project, we are at the cutting edge of the current knowledge for making this kind of imaging. +On the astronomy side, we can access a lot of new information with this image,” he said. “We have also imaged a data set that was +previously impractical to image.”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/nsdf/.well-known/issuer.jwks b/preview-fall2024-info/nsdf/.well-known/issuer.jwks new file mode 100644 index 000000000..ef85b011e --- /dev/null +++ b/preview-fall2024-info/nsdf/.well-known/issuer.jwks @@ -0,0 +1,12 @@ +{ + "keys": [ + { + "alg": "ES256", + "crv": "P-256", + "kid": "INxI6xRWvyqKUtgHIxx8ZZJZII_zh69o_LSue0e5tSc", + "kty": "EC", + "x": "q5Y-Da52P9b0TFwCf80ewZrn9-nkmWWeHW1orQ6ik_A", + "y": "FQgrlQun7AsfIHm8RpQFaSMn7TYMJTowRGl6yUsvziQ" + } + ] +} diff --git a/preview-fall2024-info/nsdf/.well-known/openid-configuration b/preview-fall2024-info/nsdf/.well-known/openid-configuration new file mode 100644 index 000000000..442a86a0e --- /dev/null +++ b/preview-fall2024-info/nsdf/.well-known/openid-configuration @@ -0,0 +1,4 @@ +{ + "issuer": "https://chtc.cs.wisc.edu/nsdf", + "jwks_uri": "https://chtc.cs.wisc.edu/nsdf/.well-known/issuer.jwks" +} diff --git a/preview-fall2024-info/osg-helps-ligo-scientists-confirm-einsteins-last-unproven-theory.html b/preview-fall2024-info/osg-helps-ligo-scientists-confirm-einsteins-last-unproven-theory.html new file mode 100644 index 000000000..2e111bc47 --- /dev/null +++ b/preview-fall2024-info/osg-helps-ligo-scientists-confirm-einsteins-last-unproven-theory.html @@ -0,0 +1,420 @@ + + + + + + +OSG helps LIGO scientists confirm Einstein's unproven theory + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ OSG helps LIGO scientists confirm Einstein's unproven theory +

+

Albert Einstein first posed the idea of gravitational waves in his general theory of relativity just over a century ago. But until now, they had never been observed directly. For the first time, scientists with the Laser Interferometer Gravitational-Wave Observatory (LIGO) Scientific Collaboration (LSC) have observed ripples in the fabric of spacetime called gravitational waves.

+ +
+
+
+ ligo20160211d-smaller +
+ Image Courtesy Caltech/MIT/LIGO Laboratory +
+
+ +

LIGO consists of two observatories within the United States—one in Hanford, Washington and the other in Livingston, Louisiana—separated by 1,865 miles. LIGO’s detectors search for gravitational waves from deep space. With two detectors, researchers can use differences in the wave’s arrival times to constrain the source location in the sky. LIGO’s first data run of its advanced gravitational wave detectors began in September 2015 and ran through January 12, 2016. The first gravitational waves were detected on September 14, 2015 by both detectors.

+ +

The LIGO project employs many concepts that the OSG promotes—resource sharing, aggregating opportunistic use across a variety of resources—and adds two twists: First, this experiment ran across LIGO Data Grid (LDG), OSPool and Extreme Science and Engineering Discovery Environment (XSEDE)-based resources, all managed from a single HTCondor-based system to take advantage of dedicated LDG, opportunistic OSG and NSF eXtreme Digital (XD) allocations. Second, workflows analyzing LIGO detector data proved more data-intensive than many opportunistic OSG workflows. Despite these challenges, LIGO scientists were able to manage workflows with the same tools they use to run on dedicated LDG systems—Pegasus and HTCondor.

+ +

Peter Couvares, data analysis computing manager for the Advanced LIGO project at Caltech, specializes in distributed computing problems. He and colleagues James Clark (Georgia Tech) and Larne Pekowsky (Syracuse University) explained LIGO’s computing needs and environment: The main focus is on optimization of data analysis codes, where optimization is broadly defined to encompass the overall performance and efficiency of their computing. While they use traditional optimization techniques to make things run faster, they also pursue more efficient resource management, and opportunistic resources—if there are computers available, they try to use them—thus the collaboration with OSG.

+ +
+
+
+ couvares_cropped +
+ Peter Couvares, courtesy photo +
+
+
+ clark_cropped +
+ James Clark, courtesy photo +
+
+
+ pekowsy +
+ Larne Pekowsky, courtesy photo +
+
+ +

“When a workflow might consist of 600,000 jobs, we don’t want to rerun them if we make a mistake. So we use DAGMan (Directed Acyclic Graph Manager, a meta-scheduler for HTCondor) and Pegasus workflow manager to optimize changes,” added Couvares. “The combination of Pegasus, Condor, and OSG work great together.” Keeping track of what has run and how the workflow progresses, Pegasus translates the abstract layer of what needs to be done into actual jobs for Condor, which then puts them out on OSG.

+ +

The computing model

+ +

Since this work encompasses four types of computing – volunteer, dedicated, opportunistic (OSG), and allocated (XSEDE XD via OSG) – everything needs to be very efficient. Couvares helps with coordination, Pekowsky with optimization, and Clark with using OSG. In particular, OSG also enabled access to allocation-based resources from XSEDE. Allocations allow LIGO to get fixed amounts of time on dedicated NSF-funded supercomputers Comet and Stampede. While Stampede looks and behaves very much like a traditional supercomputer resource (batch, login node, shared file system), Comet has a new virtualization-based interface that eliminates the need to submit to a batch system. OSG provides this through a virtual machine (VM) image, then LIGO simply uses the OSG environment.

+ +

LIGO consumed 3,956,910 hours on OSG, out of which 628,602 hours were on the Comet and 430,960 on the Stampede XD resources. OSG’s Brian Bockelman (University of Nebraska-Lincoln) and Edgar Fajardo (UC San Diego/San Diego Supercomputer Center) used HTCondor to help LIGO implement their Pegasus workflow transparently across 16 clusters at universities and national labs across the US, including on the NSF-funded Comet and Stampede supercomputers.

+ +

“Normally our computing is done on dedicated clusters on the LIGO Data Grid,” said Couvares, “but we are moving toward also using outside and more elastic resources like OSG. OSG allows more flexibility as we add in systems that aren’t part of our traditional dedicated systems. The combination of OSG for unusual or dynamic workloads, and the LIGO Data Grid for regular workloads keeping up with new observational data is very powerful. In addition, Berkely Open Infrastructure for Network Computer (BOINC) allows us to use volunteers’ home computers when they are idle, running Pulsar searches around the world in the Einstein@Home project (E@H). The aggregated cycles from E@H are quite large but it is well-suited to only some kinds of searches where a computer must process a smaller amount of data for a longer amount of time.” We must rely on traditional HTC resources for our data-intensive analysis codes.

+ +

LIGO codes cannot all run as-is on OSG. The majority of codes are highly optimized for the LDG environment, so they identified the most compute-intensive and high science priority code to run on OSG. Of about 100 different data analysis codes, only a small handful are running on OSG so far. However, the research team started with the hardest code, their highest priority, which means they are now doing some of LIGO’s most important computing on OSG. Other low latency codes must run on dedicated local resources where they might need to be done in seconds or minutes.

+ +

“It is important that LIGO has a broad set of resources and an increasingly diverse set of resources. OSG is almost like a universal adapter for us,” said Couvares. “It is very powerful, users don’t need to care where a job runs, and it is another step toward that old promise of grid computing.

+ +

The importance of OSG and NSF support

+ +

Using data analysis run on the OSG, the LIGO team looked for a compact binary coalescence, that is, the merger of binary neutron stars or black holes. Couvares called it a modeled search—they have a signal that they believe is a strong indicator, they know what it’s going to look like, and they have optimal match filters to compare data with the signal they expect. But the search is computationally expensive because it’s not just one signal they are looking for: The parameters of the source may change or the objects may spin differently. The degree of match requires a search on the order of 100,000 different models/waveforms. This makes the OSG very valuable, because it can split up many of the match filters.

+ +

“The parallel nature of the OSG is what’s valuable,” said Couvares. “It is well suited to a high throughput environment. We would like to use more OSG resources because we could expand the parameter space of our searches beyond what is possible with dedicated resources. We need two things, really. We obviously need resources, but we also need people who can be a bridge between the data analysts/scientists and the computing resources. Resources alone are not enough. LIGO will always need dedicated in-house computing for low latency searches that need to be done quickly, and for our steady-state offline computing, but now we have the potential elasticity of OSG.”

+ +

“The nature of our collaboration with OSG has been absolutely great for a number of reasons,” said Couvares. “The OSG people have been extremely helpful. They are really unselfish and technical. That’s not always there in the open-source world. The basic idea of OSG has been good for LIGO—their willingness OSG services for LIGO, to reduce the barrier to entry, setting up computing elements, and so on. The barrier otherwise would have been too high. We couldn’t be happier with our partnership.”

+ +

Another big step has been the increase in network speed. The data was cached at the University of Nebraska and streamed to on-demand worker nodes that are able to read from a common location. This project benefited greatly from the NSF’s Campus Cyberinfrastructure – Network Infrastructure and Engineering (CC-NIE) program, which helped provide a hardware upgrade from 10Gbps to 100Gbps WAN connectivity. Receiving NSF support to upgrade to 100Gbps has enabled huge gains in workflow throughput.

+ +

WMS hours by facility-comet

+ +

The LIGO analysis ran across 16 different OSG resources, for a total of 4M CPU hours:

+
    +
  • 1M CPU hour (25%) XSEDE contribution<
  • +
  • 5 TB total input data, cached at the Holland Computing Center (HCC) at the University of Nebraska-Lincoln
  • +
  • 1 PB total data volume distributed to jobs from Nebraska
  • +
  • 10 Gbps sustained data rates from Nebraska storage to worker nodes
  • +
+ +

Couvares concluded, “What we are doing is pure science. We are trying to understand the universe, trying to do what people have wanted to do for 100 years. We are extending the reach of human understanding. It’s very exciting and the science is that much easier with the OSG.”

+ + + +

– Greg Moore

+ +

– Brian Bockelman (OSG, University of Nebraska at Lincoln) contributed to this story

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/ospool-computation.html b/preview-fall2024-info/ospool-computation.html new file mode 100644 index 000000000..6402a66a2 --- /dev/null +++ b/preview-fall2024-info/ospool-computation.html @@ -0,0 +1,372 @@ + + + + + + +OSPool As a Tool for Advancing Research in Computational Chemistry + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ OSPool As a Tool for Advancing Research in Computational Chemistry +

+

Assistant Professor Eric Jonas uses OSG resources to understand the structure of molecules based on their measurements and derived properties.

+
+ Microscope beside computer by Tima Miroshnichenko from Pexels. +
Microscope beside computer by Tima Miroshnichenko from Pexels.
+
+ +
+ Eric Jonas, Assistant Professor at UChicago +
Eric Jonas, Assistant professor at UChicago
+
+ +

Picture this: You have just developed a model that predicts the properties of some molecules and plan to include this model in a section of a research paper. However, just a few days before the paper is to be published on your professional website, you discover an error in the data generation process, which requires you to compute your work again and quickly! +This scenario was the case with Assistant Professor Eric Jonas, who works in the Department of Computer Science at the University of Chicago (UChicago). +While this process is normally tedious, he noted how the OSPool helped streamline the steps needed to regenerate results: “The OSPool made it easy to go back and regenerate the data set with about 70 million new molecules in just a matter of days.”

+ +

Although this was a fairly recent incident for Jonas, he is not new to high throughput computing or the OSPool. With usage reaching as far back as his graduate school days, Jonas has utilized resources ranging from cloud computing infrastructures like Amazon Web Services to the National Supercomputing Center for his work with biological signal acquisition, molecular inverse problems, machine learning, and other ways of exploiting scalable computation.

+ +

He soon realized, though, that although these other resources could run large amounts of data in a relatively short time, they required a long, drawn-out sequence of actions to provide results – creating an application, waiting for it to be accepted, and then waiting in line for long periods for a job to run. Faced with this problem in 2021, Jonas found a solution with the OSG Consortium and its OSPool, OSG’s distributed pool of computing resources for running high-throughput jobs.

+ +

In April of 2021, he enlisted the help of HTCondor and the OSPool to run pre-exising computations that allow for the generation of training data and the development of new machine learning techniques to determine molecular structures in mixtures, chemical structures in new plant species, and other related queries.

+ +

Jonas’ decision to transition to the OSPool boiled down to three simple reasons: +Less red tape involved in getting started. +Better communication and assistance from staff. +Greater flexibility with running other people’s software to generate data for his specific research, which, in his words, are a much better fit for his specific research which would otherwise have been too computationally bulky to handle alone.

+ +

In terms of challenges with OSPool utilization, Jonas’ only point of concern is the amount of time it takes for code that has been uploaded to reach the OSPool. “It takes between 8 and 12 hours for that code to get to OSG. The time-consuming containerization process means that any bug in code that prevents it from running isn’t discovered and resolved as quickly, and takes quite a while, sometimes overnight.”

+ +

He and his research team have since continued to utilize OSPool to generate output and share data with other users. They have even become advocates for the resource: “After we build our models, as a next step, we’re like, let’s run our model on the OSPool to allow the community (which constitutes the entirety of OSPool users) also to generate their datasets. I guess my goal, in a way, is to help OSG grow any way I can, whether that involves sharing my output with others or encouraging people to look into it more.”

+ +

Jonas spoke about how he hopes more people would take advantage of OSPool: +“We’re already working on expanding our use of it at UChicago, but I want even more people to know that OSPool is out there and to know what kind of jobs it’s a good fit for because if it fits the kind of work you’re doing, it’s like having a superpower!”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/past_team_pictures.html b/preview-fall2024-info/past_team_pictures.html new file mode 100644 index 000000000..3d0689e0b --- /dev/null +++ b/preview-fall2024-info/past_team_pictures.html @@ -0,0 +1,460 @@ + + + + + + +HTCondor Team Pictures + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ HTCondor Team Pictures +

+ +

2024

+

The CHTC Team at Throughput Computing 2024

+

HTCondor Team May 2024

+ + +

2023

+

The CHTC Team at Throughput Computing 2023

+

HTCondor Team May 2023

+ +

2022

+

The CHTC Team at HTCondor Week 2022

+

HTCondor Team May 2023

+ +

2021

+

The HTCondor Team at HTCondor Week 2021, May 2020.

+

HTCondor Team May 2021

+ +

2020

+

The HTCondor Team at HTCondor Week 2020, May 2020.

+

HTCondor Team May 2020

+ +

2019

+

The HTCondor Team at HTCondor Week 2019, May 2019.

+

HTCondor Team May 2019

+ +

2018

+

The HTCondor Team at HTCondor Week 2018, May 2018.

+

HTCondor Team May 2018

+ +

2017

+

The HTCondor Team at HTCondor Week 2017, May 2017 (but photo is missing Miron Livny!).

+

HTCondor Team May 2017

+ +

2016

+

The HTCondor Team at HTCondor Week 2016, May 2016.

+

HTCondor Team May 2016

+ +

2015

+

The HTCondor Team at HTCondor Week 2015, May 2015.

+

HTCondor Team May 2015

+ +

2014

+

The HTCondor Team at HTCondor Week 2014, May 2014.

+

HTCondor Team May 2014

+ +

2013

+

The HTCondor Team at HTCondor Week 2013, May 2013.

+

HTCondor Team May 2013

+ +

2012

+

The HTCondor Team at Condor Week 2012, May 2012.

+

HTCondor Team May 2012

+ +

2011

+

The HTCondor Team at Condor Week 2011, May 2011.

+

HTCondor Team May 2011

+ +

2010

+

The HTCondor Team at Condor Week 2010, May 2010.

+

HTCondor Team May 2010

+ +

2009

+

The HTCondor Team at Condor Week 2009, May 2009.

+

HTCondor Team May 2009

+ +

2008

+

The HTCondor Team at Condor Week 2008, May 2008.

+

HTCondor Team May 2008

+ +

2007

+

The HTCondor Team at Condor Week 2007, May 2007.

+

HTCondor Team May 2007

+ +

2006

+

The HTCondor Team at Condor Week 2006, April 2006.

+

HTCondor Team April 2006

+ +

2003

+

HTCondor Team October 2003

+

HTCondor Team 2003

+ +

2002

+

Taken in October.

+

HTCondor Team October 2002

+ + +

1999

+ +

+ + + + + + + + + + + + 1999 Team Picture +

+ +

1998

+ +

HTCondor Team 1998

+
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/path-facility.html b/preview-fall2024-info/path-facility.html new file mode 100644 index 000000000..fa7168248 --- /dev/null +++ b/preview-fall2024-info/path-facility.html @@ -0,0 +1,434 @@ + + + + + + +Advancing computational throughput of NSF funded projects with the PATh Facility + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Advancing computational throughput of NSF funded projects with the PATh Facility +

+

Since 2022, the Partnership to Advance Throughput Computing (PATh) Facility has provided dedicated high throughput +computing (HTC) +capacity to researchers nationwide. Following a year of expansion, here’s a look into the researchers’ work and how it has been enabled by +the PATh Facility.

+ +

Searching for more computing capacity, Dr. Himadri Chakraborty of +Northwest Missouri State University first heard of the PATh Facility, a +purpose-built, +national-scale distributed high throughput computing (HTC) resource, from his NSF program director. After approaching PATh Research +Facilitators to +acquire an account and computing “credits,” Chakraborty’s team was able to advance their work in physics using computing resources from the +PATh Facility. +Christina Koch, Lead Research Facilitator at CHTC guided Chakraborty’s team in +transitioning workflows to run within HTCondor.

+ +

“As our ambition grew, we were looking out for a larger system, PATh came as a blessing,” Chakraborty reflected. “The ultimate satisfaction is to get +some new understanding and learning of the science we are working on. We hope that this will be one of our first major achievements using +the PATh Facility.”

+ +
+ Himadri Chakraborty +
Himadri Chakraborty.
+
+ +

The PATh Facility, created through the Partnership to Advance Throughput Computing (PATh; NSF grant #2030508), enables researchers of an NSF-funded project +to use its dedicated distributed high-throughput computing (HTC) capacity. PATh is an ongoing collaboration between the Center for High Throughput Computing (CHTC) +and the OSG Consortium to support HTC-enabled research by improving national cyberinfrastructure. The PATh Facility acquires capacity +from six institutions — the University of Nebraska-Lincoln, Syracuse University, Florida International University, +the San Diego Supercomputing Center, the University of Wisconsin-Madison and the Texas Advanced Computing Center +— integrated into a single pool of capacity available to users. After becoming fully operational in mid-2022, it saw “the most growth in terms of new projects +joining and getting started in 2023,” Koch said.

+ +

The PATh Facility guarantees users access through credits. Credits operate as a stand-in unit to ensure no +one user is monopolizing the Facility’s capacity. Users receive 1,000 start-up credits to test if the PATh Facility is a good fit for them, available as Central +Processing Unit (CPU) or Graphic Processing Unit (GPU) charges. After this initial testing period, they can apply for supplemental credits by contacting PATh +Facilitators and their NSF officer. If users run through all their credits, they are still able to keep running and facilitators will work with them to request +additional credits.

+ +

In comparison to the PATh Facility, the Open Science Pool (OSPool) — another distributed HTC resource created +by the OSG Consortium — acquires available capacity from idle resources across 70 institutions +nationwide. Projects may be better suited for the PATh Facility than the OSPool if they need additional cores, memory, data or dedicated time. “Since the PATh +Facility is hardware owned and operated by PATh, we can make more guarantees about how long individual computations can run, that people will be able to get certain +resources and run computations of a certain size,” Koch explained.

+ +

Following the PATh Facility’s growth, some OSPool users have begun to use the Facility’s dedicated capacity in tandem. One example is North Carolina State University +Ph.D. candidate Matthew Dorsey, who relied on capacity from the OSPool for two years before expanding his research to the newer PATh Facility. +In doing so, he was able to run larger jobs without worrying about running out of capacity. “The transition to the PATh Facility was extremely easy,” Dorsey said. +“I was pleased with how seamless it was.”

+ +

Dorsey became interested in the OSPool after attending OSG School in the summer of 2022. There, he learned the basics of +HTCondor and got to know Koch and other facilitators. Dorsey’s research specializes in statistical physics. He uses computational models +to study magnetic materials and how magnetic fields can be used to alter properties made from different kinds of magnetic nanoparticles. His work benefits from the +consistent access to computing for runs that accumulate over a long period of time.

+ +
+ Matthew Dorsey +
Matthew Dorsey.
+
+ +

Dorsey acknowledges that each system has its advantages and disadvantages. For Dorsey, the PATh Facility is better equipped for more complex jobs due to capacity and +allocated time, while the OSPool is better for testing out comparatively smaller runs. “It was really easy to translate what I was doing on the OSPool to the PATh +Facility and quickly scale it up,” Dorsey said.

+ +

A testament to the strength of the PATh Facility, the National Radio Astronomy Observatory (NRAO) used its capacity with the OSPool to +develop one of the oldest radio images of a well-studied area in space. Working alongside PATh and CHTC staff, the capacity +from the PATh Facility was instrumental in planning when certain jobs would run, without the risk of reduced capacity from the OSPool.

+ +

The PATh Facility makes it possible to support projects with larger computational requirements. Dr. Vladan Stevanovic +of the Colorado School of Mines is studying computational material science and relies heavily on the PATh Facility to plan and run data workflows. +Stevanovic became familiar with the PATh Facility after receiving a Dear Colleague Letter from the NSF.

+ +

His work requires more cores than what the OSPool alone could offer, and he was drawn to the PATh Facility due to its specialization in HTC and ability to guarantee +availability. Stevanovic and his team hope to develop computational tools to reliably predict the metastable states of solid matter. He describes this work as very +computational, and has worked with HTC workflows for over 12 years. “PATh is amazing for my research because its primary purpose is HTC, which I rely on,” he said. +“I’m grateful because my project critically depends on PATh,” he said.

+ +

Stevanovic also appreciates how easy it was to start using the PATh Facility. Start-up credits are typically granted while or directly after meeting with the Facilitation +team, and Koch’s team continues to support researchers as they ask the NSF for more credits. “The onboarding process was great, and the support from Christina was amazing. +We were able to get running and get up to speed quickly.”

+ +

Chakraborty’s team faced some initial challenges in switching workflows from in-house to distributed, but coming to the PATh Facility nonetheless expanded the capacity +available to his team. He recounted that his previous in-house system provided about 28 CPUs per node, while the PATh Facility offers up to 150 CPUs. Overall, Chakraborty +is optimistic that the new capacity will improve his findings for the future. “We are happy we took that plunge because we went through some difficult times and got help +from the PATh folks,” he said. “We’ve made some pretty good progress in making our codes run on PATh. It was mostly to be able to use a larger pool of computer power.”

+ +

His work focuses on the simulation of electronic and phonontonic coupled ultrafast relaxation of photoexcited large molecules. The PATh Facility’s new capacity allowed +his team to make new advances “of a polymer functional system that has a lot of applications,” he said. “It’s not the end, it’s still preliminary and intermediate, and +they are so exciting. We are looking forward to the final results and finding out new physics.”

+ +

Interested PIs can submit an interest form on the PATh website, to then meet a research computing facilitator for a consultation. +If the researcher is a good fit, PATh Facilitators help the researcher log in and begin using their start-up credits. If they wish to continue, the researcher begins +drafting a proposal letter to the NSF, requesting credits. Koch notes that the credit proposal is simpler than a typical project proposal, and the Facilitation team provides +a multitude of resources such as credit calculators and proposal templates. For users who encounter issues, the facilitation team is available through support email address, +and weekly support hours, as well as maintaining documentation on the website.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/people.html b/preview-fall2024-info/people.html new file mode 100644 index 000000000..883491a25 --- /dev/null +++ b/preview-fall2024-info/people.html @@ -0,0 +1,2070 @@ + + + + + + +Our Staff + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+ 2024 CHTC Team Photo +
2024 CHTC Team Picture. View old team photos⬏
+
+
+ + + + +
+
+

Leadership

+
+ + +
+ +
+ +
+ +
+ +
+ +
+ +
+

Staff

+
+ + +
+
+
+
+ + + Headshot for Aaron Moate + +
+
+
+

+ + Aaron Moate + +

+

Lead Systems Administrator

+
+
+ University of Wisconsin–Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Aaryan Patel + +
+
+
+

+ + Aaryan Patel + +

+

Research Computing Facilitation Assistant

+
+
+ Morgridge Insititute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Abhinandan Saha + +
+
+
+

+ + Abhinandan Saha + +

+

Systems Administration Intern

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Amber Lim + +
+
+
+

+ + Amber Lim + +

+

Research Computing Facilitator

+
+
+ University of Wisconsin–Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Andrew Owen + +
+
+
+

+ + Andrew Owen + +

+

Research Computing Facilitator

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Brian Aydemir + +
+
+
+

+ + Brian Aydemir + +

+

Systems Integration Developer

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Brian Lin + +
+
+
+

+ + Brian Lin + +

+

OSG Software Area Coordinator

+
+
+ University of Wisconsin–Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Cannon Lock + +
+
+
+

+ + Cannon Lock + +

+

Web Developer

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + + Headshot for Christina Koch + + +
+
+
+

+ + Christina Koch + +

+

Lead Research Computing Facilitator

+
+
+ University of Wisconsin - Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Cole Bollig + +
+
+
+

+ + Cole Bollig + +

+

HTCondor Core Developer

+
+
+ University of Wisconsin - Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for David Baik + +
+
+
+

+ + David Baik + +

+

System Administrator

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Emile Turatsinze + +
+
+
+

+ + Emile Turatsinze + +

+

Systems Administrator

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Emma Turetsky + +
+
+
+

+ + Emma Turetsky + +

+

Research Software Engineer

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Greg Thain + +
+
+
+

+ + Greg Thain + +

+

HTCondor Core Developer

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Ian Ross + +
+
+
+

+ + Ian Ross + +

+

Systems Integration Developer

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Irene Landrum + +
+
+
+

+ + Irene Landrum + +

+

Project Manager

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Jaime Frey + +
+
+
+

+ + Jaime Frey + +

+

HTCondor Core Developer

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Janet Stathas + +
+
+
+

+ + Janet Stathas + +

+

Project Manager

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Jason Patton + +
+
+
+

+ + Jason Patton + +

+

Software Integration Developer

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + + Headshot for Jeff Peterson + + +
+
+
+

+ + Jeff Peterson + +

+

System Administrator

+
+
+ Morgridge Institute +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Joe Bartkowiak + +
+
+
+

+ + Joe Bartkowiak + +

+

Systems Administrator

+
+
+ University of Wisconsin Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for John TJ Knoeller + +
+
+
+

+ + John TJ Knoeller + +

+

HTCondor Core Developer

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Justin Hiemstra + +
+
+
+

+ + Justin Hiemstra + +

+

Research Software Engineer

+
+
+ Morgridge Institute For Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Kent Cramer III + +
+
+
+

+ + Kent Cramer III + +

+

Network Infrastructure Support Specialist

+
+
+ Morgridge Institute For Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Matt Westphall + +
+
+
+

+ + Matt Westphall + +

+

Research Cyberinfrastructure Specialist

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Mátyás Selmeci + +
+
+
+

+ + Mátyás Selmeci + +

+

Software Integration Developer

+
+
+ University of Wisconsin–Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Rachel Lombardi + +
+
+
+

+ + Rachel Lombardi + +

+

Research Computing Facilitator

+
+
+ University of Wisconsin–Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Tae Kidd + +
+
+
+

+ + Tae Kidd + +

+

Project Manager

+
+
+ Morgridge Institute For Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Theng Vang + +
+
+
+

+ + Theng Vang + +

+

System Administrator

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + + Headshot for Tim Cartwright + + +
+
+
+

+ + Tim Cartwright + +

+

OSG Deputy Director/XO

+
+
+ University of Wisconsin–Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Tim Theisen + +
+
+
+

+ + Tim Theisen + +

+

Release Manager

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Todd L Miller + +
+
+
+

+ + Todd L Miller + +

+

HTCondor Core Developer

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for William Swanson + +
+
+
+

+ + William Swanson + +

+

Research Cyberinfrastructure Specialist

+
+
+ University of Wisconsin–Madison +
+
+
+
+
+ +
+

Students

+
+ + +
+
+
+
+ + + Headshot for Ben Staehle + +
+
+
+

+ + Ben Staehle + +

+

Fellow

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Bocheng Zou + +
+
+
+

+ + Bocheng Zou + +

+

System Administrator Intern

+
+
+ University of Wisconsin–Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Cristina Encarnacion + +
+
+
+

+ + Cristina Encarnacion + +

+

Student Science Writer

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Frank Zhang + +
+
+
+

+ + Frank Zhang + +

+

System Administrator Intern

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Jordan Sklar + +
+
+
+

+ + Jordan Sklar + +

+

Student Science Writer

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Kristina Zhao + +
+
+
+

+ + Kristina Zhao + +

+

Fellow

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Marissa (Yujia) Zhang + +
+
+
+

+ + Marissa (Yujia) Zhang + +

+

System Administrator Intern

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Neha Talluri + +
+
+
+

+ + Neha Talluri + +

+

Fellow

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Patrick Brophy + +
+
+
+

+ + Patrick Brophy + +

+

Fellow

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Pratham Patel + +
+
+
+

+ + Pratham Patel + +

+

Fellow

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Ryan Boone + +
+
+
+

+ + Ryan Boone + +

+

Fellow

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Thinh Nguyen + +
+
+
+

+ + Thinh Nguyen + +

+

Fellow

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Wil Cram + +
+
+
+

+ + Wil Cram + +

+

Fellow

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+

Previous Staff

+
+ + +
+
+
+
+ + + Headshot for Alperen Bakirci + +
+
+
+

+ + Alperen Bakirci + +

+

Student Web Developer

+
+
+ Morgridge Institute For Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Bryna Goeking + +
+
+
+

+ + Bryna Goeking + +

+

Student Writer

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Cameron Abplanalp + +
+
+
+

+ + Cameron Abplanalp + +

+

Research Computing Facilitation Assistant

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Emily Yao + +
+
+
+

+ + Emily Yao + +

+

System Administrator Intern

+
+
+ University on Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Haoming Meng + +
+
+
+

+ + Haoming Meng + +

+

Research Software Engineer

+
+
+ Morgridge Institute For Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Joe Reuss + +
+
+
+

+ + Joe Reuss + +

+

Software Engineer

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for John Parsons + +
+
+
+

+ + John Parsons + +

+

System Administrator Intern

+
+
+ University of Wisconsin Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Lili Bicoy + +
+
+
+

+ + Lili Bicoy + +

+

Student Science Writer

+
+
+ Morgridge Institute For Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Max Hartke + +
+
+
+

+ + Max Hartke + +

+

Student Programming Intern

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Michael Collins + +
+
+
+

+ + Michael Collins + +

+

Systems Administrator

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Mihir Manna + +
+
+
+

+ + Mihir Manna + +

+

System Administrator Intern

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Molly McCarthy + +
+
+
+

+ + Molly McCarthy + +

+

Student Web Developer

+
+
+ Morgridge Institute for Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Rishideep Rallabandi + +
+
+
+

+ + Rishideep Rallabandi + +

+

Student Programming Intern

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Ryan Jacob + +
+
+
+

+ + Ryan Jacob + +

+

System Administrator Intern

+
+
+ University of Wisconsin-Madison +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Shirley Obih + +
+
+
+

+ + Shirley Obih + +

+

Communications Specialist

+
+
+ Morgridge Institute For Research +
+
+
+
+
+ +
+
+
+
+ + + Headshot for Yuxiao Qu + +
+
+
+

+ + Yuxiao Qu + +

+

Research Software Engineer

+
+
+ Morgridge Institute For Research +
+
+
+
+
+ +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/precision-mental-health.html b/preview-fall2024-info/precision-mental-health.html new file mode 100644 index 000000000..95887a543 --- /dev/null +++ b/preview-fall2024-info/precision-mental-health.html @@ -0,0 +1,387 @@ + + + + + + +Harnessing HTC-enabled precision mental health to capture the complexity of smoking cessation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Harnessing HTC-enabled precision mental health to capture the complexity of smoking cessation +

+

Collaborating with CHTC research computing facilitation staff, UW-Madison researcher Gaylen Fronk is using HTC to improve cigarette cessation treatments by accounting for the complex differences among patients.

+ +
+ Gaylen Fronk +
Gaylen Fronk. Image credit: UW ARC.
+
+ +

Working at the crossroads of mental health and computing is Gaylen Fronk, a graduate student at the University of Wisconsin-Madison’s Addiction Research Center. By examining treatments for substance use disorders with machine learning models that are enabled by High Throughput Computing (HTC), Fronk captures the array of differences among individuals while still ensuring that her models are applicable to new patients. Her work is embedded within the larger context of precision mental health, an emerging field that relies on computational tools to evaluate complex, individual-level data in determining the fastest and most effective treatment plan for a given patient.

+ +

Fronk’s pursuit of precision mental health has recently led her to the world of computing that involves high-throughput workloads. Currently, she’s using HTC to predict treatment responses for people who are quitting cigarette smoking.

+ +

“I feel like [HTC] has been critical for my entire project,” Fronk reasons. “It removes so many constraints from how I have to think about my research. It keeps so many possibilities open because, within reason, I just don’t have to worry about computational time –– it allows me to explore new questions and test out ideas. It allows me to think bigger and add complexity rather than only having to constrain.”

+ +

Embarking on this project in August of 2019, Fronk began by reaching out to the research computing facilitators at UW-Madison’s Center for High Throughput Computing (CHTC). Dedicated to bringing the power of HTC to all fields of research, CHTC staff provided Fronk with the advice and resources she needed to get up and running. Soon, she was able to access hundreds of concurrent cores on CHTC’s HTC system through the HTCondor Software Suite (HTCSS), which was developed at UW-Madison and is used internationally for automating and managing batch HTC workloads. This computing capacity has been undeniably impactful on Fronk’s research, yet when reflecting on the beginnings of her project today, Fronk considers the collaborative relationships she’s developed along the way to be particularly powerful.

+ +

“I am never going to be a computer scientist,” explains Fronk. “I’m doing my best and I’m learning, but that’s not what my focus is and that’s never going to be my area of expertise. I think it’s really wonderful to be able to lean on people for whom that is their area of expertise, and have those collaborative relationships.” This type of collaboration among computing experts and researchers will be vital as computational advances continue to spread throughout the social sciences. Computing staff like CHTC’s research computing facilitators help researchers to transform, expand, and accelerate their work; and specialized researchers like Fronk provide their domain expertise to ensure these computational methods are incorporated in ways that preserve the conceptual and theoretical basis of their discipline.

+ +
+ Christina Koch +
Christina Koch.
+
+ +

CHTC research computing facilitator Christina Koch has worked closely with Fronk since the beginning of her project, and elaborates on the benefits arising from this synergistic relationship: “Instead of every research group on campus needing to have their own in-house large-scale computing expert, they can meet with our facilitation team and we provide them with the information they need to expand their research computing vision and apply it to their work. But we also learn a lot ourselves from the wide variety of researchers we consult with. Since our experience isn’t siloed to a particular research domain, we take lessons learned from one group and share them with another group, where normally those groups would never have thought to connect with each other.”

+ +

For fellow social scientists who are considering reaching out to people like Christina and incorporating HTC into their work, Fronk urges them to do just that: “There’s a lot you can teach yourself, but you also don’t have to be on your own. Reach out to the people who know more than you. For me, people like Christina and others on the CHTC team have been invaluable.”

+ +

Fronk’s collaborations with Christina all have revolved around the ongoing project that she first began in August of 2019 –– predicting which cigarette cessation treatments will be most effective for a given individual. Data from a Center for Tobacco Research and Intervention (CTRI) 6-month clinical trial serve as a rich and comprehensive foundation to begin building machine learning models from. With the CTRI data in hand, Fronk not only has access to the treatment type and whether it was successful at the end of the trial, but also to approximately 400 characteristics that capture the fine-tuned individual differences among patients. These include demographic information, physical and mental health records, smoking histories, and social pressures, such as the smoking habits of a patient’s friends, roommates, or spouse.

+ +

All these individual-level differences paint valuable complexity onto the picture, and Fronk is able to embrace and dive into that complexity with the help of HTC. Each job she sends over to CHTC’s cores contains a unique model configuration run against a single cross-validation iteration, meaning that part of the CTRI data is used for model fitting while the unused, ‘new’ data is used for model evaluation. For instance, Fronk might start with as many as 200 unique configurations for a given model. If each of these model configurations is fit and evaluated using a cross-validation technique that has 100 unique splits of data, Fronk would then submit the resulting 20,000 jobs to CHTC.

+ +

Before submitting, Fronk alters her code so that each job runs just a single configuration, single iteration context; effectively breaking the comprehensive CTRI data down into small, manageable pieces. Ultimately, when delegated to hundreds of CHTC cores in concurrent use, Fronk’s largest submissions finish in mere hours, as opposed to days on a local computer.

+ +

Thousands of small jobs are handled easily by HTCSS and CHTC’s distributed resources, after which Fronk can aggregate this multitude of output files on her own computer to average the performance of the model configuration across the array of cross-validation iterations. This aggregated output represents how accurately the model predicts whether a certain cigarette cessation treatment will work for a specific individual. After receiving the output, Fronk evaluates it, learns from it, and repeats –– but this time with new insight.

+ +

After her experience with HTC, Fronk now sees computing as an integral part of her work. In fact, the ideal of precision mental health as a compelling alternative to traditional treatment methods has actually been around for a while –– though scalable computing methods that enable it are just beginning to enter the toolboxes of mental health researchers everywhere. “I feel like high-throughput computing really fills a lot of the holes that are needed to move precision mental health forward,” Fronk expresses. “It makes me really excited to be working at that intersection.”

+ +

And at that intersection, Fronk isn’t alone. As computational resources are becoming more accessible, increasingly more researchers are investigating the frontiers of precision mental health and its potential to improve treatment success. But before this approach moves from the research space and into a clinical setting, careful thought is needed to assess how these experimental models will fare in the real world.

+ +

Approaches that require intensive and expensive data, like neuroimaging or genetic analysis for instance, may not be feasible –– especially for clinics located in low-income communities. Elaborating on this idea, Fronk explains, “It’s really exciting to think that neuroimaging or genetic data might hold a lot of predictive potential –– yet if a person can’t get genotyped or imaged, then they’re not going to be able to be split into treatments. And those problems get compounded in lower income areas, or for people who have been historically excluded and underrepresented both in terms of existing research and access to healthcare.”

+ +

It will take time, research, and ethical forethought before precision mental health approaches can reach local clinics, but when that time comes –– the impact will ripple through the lives of people seeking treatment everywhere. “I think precision mental health can really help people on a much shorter timeline than traditional treatment approaches, and that feels very meaningful to me,” says Fronk. In terms of her focus on cigarette smoking cessation, timing is everything. Cigarette smoking –– as well as other substance use disorders like it –– have extremely high costs of failed treatments at both the personal and societal level. If someone is given the right treatment from the start when they’re most motivated to quit, it mitigates not only their own health and financial risks, but also those of society’s.

+ +

Ultimately, these impacts stem from the collaborative relationships seen today between researchers like Fronk and computing facilitators like Christina at CHTC. There’s still much to be done before precision mental health approaches can be brought to bear in the clinical world, but high-throughput computing is powering the research to move that direction in a way that never was possible before. Complexity –– which used to limit Fronk’s research –– now seems to be absolutely central to it.

+ +

+ +

A research article about Fronk’s project is forthcoming. In the meantime, watch her presentation from HTCondor Week 2021 or check out the UW-Madison Addiction Research Center to learn more.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/projects.html b/preview-fall2024-info/projects.html new file mode 100644 index 000000000..793832f11 --- /dev/null +++ b/preview-fall2024-info/projects.html @@ -0,0 +1,547 @@ + + + + + + +CHTC Partners Using CHTC Technologies and Services + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+

+ CHTC Partners Using CHTC Technologies and Services +

+ +
+
+
+ + + + + + +
+
+

IceCube and Francis Halzen

+

+ Francis Halzen, principal investigator of IceCube and the Hilldale and Gregory Breit Distinguished Professor of Physics.

IceCube has transformed a cubic kilometer of natural Antarctic ice into a neutrino detector. We have discovered a flux of high-energy neutrinos of cosmic origin, with an energy flux that is comparable to that of high-energy photons. We have also identified its first source: on September 22, 2017, following an alert initiated by a 290-TeV neutrino, observations by other astronomical telescopes pinpointed a flaring active galaxy, powered by a supermassive black hole. We study the neutrinos themselves, some with energies exceeding by one million those produced by accelerators. The IceCube Neutrino Observatory is managed and operated by the Wisconsin IceCube Astroparticle Physics Center (WIPAC) in the Office of the Vice Chancellor of Graduate Education and Research and funded by a cooperative agreement with the National Science Foundation. We have used CHTC and the Open Science Pool for over a decade to perform all large-scale data analysis tasks and generate Monte Carlo simulations of the instrument's performance. Without CHTC and OSP resources we would simply be unable to make any of IceCube's groundbreaking discoveries. Francis Halzen is the Principal Investigator of IceCube. See the IceCube web site for project details. + +

+
+
+

 

+
+ +
+
+
+ +
+
+

David O’Connor

+

+ David H. O’Connor, Ph.D., UW Medical Foundation (UWMF) Professor, Department of Pathology and Laboratory Medicine at the AIDS Vaccine Research Laboratory.

Computational workflows that analyze and process genomics sequencing data have become the standard in Virology and genomics research. The resources provided by CHTC allow us to scale up the amount of sequence analysis performed while decreasing sequence processing time. An example of how we use CHTC is generating consensus sequences for COVID-19 samples. Part of this is a step that separates and sorts a multisample sequencing run into individual samples, maps the reads of these individual samples to a reference sequence, and then forms a consensus sequence for each sample. Simultaneously, different metadata and other accessory files are generated on a per-sample basis, and data is copied to and from local machines. This workflow is cumbersome when there are, for example, 4 sequencing runs with each run containing 96 samples. CHTC allows us to cut the processing time from 16 hours to 40 minutes due to the distribution of jobs to different CPUs. Overall, being able to use CHTC resources gives us a major advantage in producing results faster for large-scale and/or time-sensitive projects. + +

+
+
+

 

+
+ +
+
+
+ +
+
+

Small Molecule Screening Facility and Spencer Ericksen

+

+ Spencer Ericksen, Scientist II at the Small Molecule Screening Facility, part of the Drug Development Core – a shared resource in the UW Carbone Cancer Center.

I have been working on computational methods for predicting biomolecular recognition processes. The motivation is to develop reliable models for predicting binding interactions between drug-like small molecules and therapeutic target proteins. Our team at SMSF works with campus investigators on early-stage academic drug discovery projects. Computational models for virtual screening could prioritize candidate molecules for faster, cheaper focused screens on just tens of compounds. To perform a virtual screen, the models evaluate millions to billions of molecules, a computationally daunting task. But CHTC facilitators have been with us through every obstacle, helping us to effectively scale through parallelization over HTC nodes, matching appropriate resources to specific modeling tasks, compiling software, and using Docker containers. Moreover, CHTC provides access to vast and diverse compute resources. + +

+
+
+

 

+
+ +
+
+
+ +
+
+

Natalia de Leon

+

+ Natalia de Leon, Professor of Agronomy, Department of Agronomy.

The goal of her research is to identify efficient mechanisms to better understand the genetic constitution of economically relevant traits and to improve plant breeding efficiency. Her research integrates genomic, phenomic, and environmental information to accelerate translational research for enhanced sustainable crop productivity. + +

+
+
+

 

+
+ +
+
+
+ +
+
+

xDD project and Shanan Peters

+

+ Shanan Peters, project lead for xDD, Dean L. Morgridge Professor of Geology, Department of Geoscience.

Shanan’s primary research thrust involves quantifying the spatial and temporal distribution of rocks in the Earth’s crust in order to constrain the long-term evolution of life and Earth’s surface environment. Compiling data from scientific publications is a key component of this work and Peters and his collaborators are developing machine reading systems deployed over the xDD digital library ad cyberinfrastructure hosted in the CHTC for this purpose. + +

+
+
+

 

+
+ +
+
+
+ +
+
+

Susan Hagness

+

+ In our research we're working on a novel computational tool for THz-frequency characterization of materials with high carrier densities, such as highly-doped semiconductors and metals. The numerical technique tracks carrier-field dynamics by combining the ensemble Monte Carlo simulator of carrier dynamics with the finite-difference time-domain technique for Maxwell's equations and the molecular dynamics technique for close-range Coulomb interactions. This technique is computationally intensive and each test runs long enough (12-20 hours) that our group's cluster isn't enough. This is why we think CHTC can help, to let us run more jobs than we're able to run now. + +

+
+
+

 

+
+ +
+
+
+ +
+
+

Joao Dorea

+

+ Joao Dorea, Assistant Professor in the Department of Animal and Dairy Sciences/Department of Biological Systems Engineering.

The Digital Livestock Lab develops research focused on high-throughput phenotyping strategies to optimize farm management decisions. Our research group is interested in the large-scale development and implementation of computer vision systems, wearable sensors, and infrared spectroscopy (NIR and MIR) to monitor animals in livestock systems. We have a large computer vision system implemented in two UW research farms that generate large datasets. With the help of CHTC, we can train deep learning algorithms with millions of parameters using large image datasets and evaluate their performance in farm settings in a timely manner. We use these algorithms to monitor animal behavior, growth development, social interaction, and to build predictive models for early detection of health issues and productive performance. Without access to the GPU cluster and the facilitation made by CHTC, we would not be able to quickly implement AI technologies in livestock systems. + +

+
+
+

 

+
+ +
+
+
+ +
+
+

Paul Wilson

+

+ Paul Wilson, head of The Computational Nuclear Engineering Research Group (CNERG), the Grainger Professor for Nuclear Engineering, and the current chair of the Department of Engineering Physics.

CNERG’s mission is to foster the development of new generations of nuclear engineers and scientists through the development and deployment of open and reliable software tools for the analysis of complex nuclear energy systems. Our inspiration and motivation come from performing those analyses on large, complex systems. Such simulations require ever-increasing computational resources and CHTC has been our primary home for both HPC and HTC computation for nearly a decade. In addition to producing our results faster and without the burden of administering our computer hardware, we rely on CHTC resources to demonstrate performance improvements that arise from our methods development. The role-defining team of research computing facilitators has ensured a smooth onboarding of each of them and helped them find the resources they need to be most effective. + +

+
+
+

 

+
+ +
+
+
+ +
+
+

Barry Van Veen

+

+ The bio-signal processing laboratory develops statistical signal processing methods for biomedical problems. We use CHTC for casual network modeling of brain electrical activity. We develop methods for identifying network models from noninvasive measures of electric/ magnetic fields at the scalp, or invasive measures of the electric fields at or in the cortex, such as electrocorticography. Model identification involves high throughput computing applied to large datasets consisting of hundreds of spatial channels each containing thousands of time samples. +

+
+
+

 

+
+ +
+
+
+ +
+
+

Biomagnetic Resonance Data Bank

+

+ The Biomagnetic Resonance Data Bank (BMRB) is headquarted within UW-Madison's National Magnetic Resonance Facility at Madison (NMRFAM) and uses the CHTC for research in connection with the Biological Magnetic Resonance Data Bank (BMRB). + +

+
+
+

 

+
+ +
+
+
+ +
+
+

CMS LHC Compact Muon Solenoid

+

+ The UW team participating in the Compact Muon Solenoid (CMS) experiment analyzes petabytes of data from proton-proton collisions in the Large Hadron Collider (LHC). We use the unprecedented energies of the LHC to study Higgs Boson signatures, Electroweak Physics, and the possibility of exotic particles beyond the Standard Model of Particle Physics. Important calculations are also performed to better tune the experiment's trigger system, which is responsible for making nanosecond-scale decisions about which collisions in the LHC should be recorded for further analysis. +

+
+
+

 

+
+ +
+
+
+ +
+
+

Atlas Experiment

+

+ Atlas Experiment +

+
+
+

 

+
+ +
+
+
+ +
+
+

Phil Townsend

+

+ Professor Phil Townsend of Forestry and Wildlife Ecology says Our research (NASA & USDA Forest Service funded) strives to understand the outbreak dynamic of major forest insect pests in North America through simulation modeling. As part of this effort, we map forest species and their abundance using multi-temporal Landsat satellite data. My colleagues have written an automatic variable selection routine in MATLAB to preselect the most important image variables to model and map forest species abundance. However, depending on the number of records and the initial variables, this process can take weeks to run. Hence, we seek resources to speed up this process. +

+
+
+

 

+
+ +
+
+
+ +
+
+ + +
+ + + + + + + + + diff --git a/preview-fall2024-info/redirects.json b/preview-fall2024-info/redirects.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/preview-fall2024-info/redirects.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/preview-fall2024-info/research-facilitation.html b/preview-fall2024-info/research-facilitation.html new file mode 100644 index 000000000..93fcd75ad --- /dev/null +++ b/preview-fall2024-info/research-facilitation.html @@ -0,0 +1,410 @@ + + + + + + +Research Facilitation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ Research Facilitation +

+ +

RC facilitators serve as proactive and personalized guides, helping researchers +identify and implement computational approaches that result in the greatest impact to their projects. Rather +than possessing a significant depth of expertise in computational technologies, RC facilitators build and +leverage their team of expert technical staff and translate the details of computational options for individual +researchers. Through this two-way relationship-building approach, dedicated RC facilitators have enabled +previously unimagined and significant scholarship outcomes of scale and scope across a variety of research +domains, especially within the space of campus-supported research computing centers.

+ +

Impact

+ +

Since the hiring of the first Research Computing Facilitator in 2013 usage of computing services by previously underserved researchers +increased significantlyy (see figure below). Importantly, more than 95% of usage from the life sciences and +social sciences has been on an HTC-optimized compute configuration rather than a traditional HPC +cluster, emphasizing the applicability of multiple compute configurations to meet needs across domains.

+ +

Figure 1

+ +

Goals

+ +

The following outlines the primary goals (the needs) of successful RC facilitation and identifies the related +major activities for achieving those goals.

+ +
    +
  • Proactive Engagement
  • +
  • Personalized Guidance
  • +
  • Teaching Researchers to Fish
  • +
  • Building Relationships
  • +
  • Advocating for Research Needs
  • +
  • Developing Connections among Staff
  • +
+ +

Skills and Backgrounds

+ +

Three key areas of experience and interest are relevant for successful RC facilitators: individual interests +and motivation, communication and interpersonal skills, and technical knowledge.

+ +

Interests and Motivation

+ +
    +
  • A desire to enable and support the scholarly work of others
  • +
  • Interest in a wide set of research domains beyond their own area of expertise
  • +
  • The ability and the desire to work in a team environment
  • +
  • A desire to further develop the skills and interests relevant to effective facilitation
  • +
+ +

Communication and Interpersonal Skills

+ +
    +
  • Excellent written and verbal communication, including active and empathetic listening skills and an +ability to translate complex and domain-specific information for nonspecialists
  • +
  • Demonstrated effectiveness and comfort in teaching and public speaking
  • +
  • Success and demonstrated interest in interpersonal networking and liaising
  • +
  • The desire to work in a team environment, where staff frequently depend on one another
  • +
  • Leadership skills that inspire action and coordinate the activities of shared contributions
  • +
+ +

Technical Abilities

+ +
    +
  • Prior experience conducting research projects or other significant scholarly work with some +integration of relevant computational systems and tools
  • +
  • A demonstrated ability to understand multiple aspects of a problem and identify appropriate solutions
  • +
  • The ability to provide solution-agnostic support by focusing on research requirements and desired +outcomes
  • +
  • A desire for continuous learning of relevant technology topics
  • +
+ +

More information

+ +

More information on Research Facilitators can be in the paper +“Research Computing Facilitators: The Missing Human Link in Needs-Based Research Cyberinfrastructure” +co-written by CHTC’s own Lauren Micheal.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/research.html b/preview-fall2024-info/research.html new file mode 100644 index 000000000..61e7f6fe2 --- /dev/null +++ b/preview-fall2024-info/research.html @@ -0,0 +1,1525 @@ + + + + + + +Research Publications and Technical Information + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ +
+
+
+ +

Overview

+
    +
  • + Douglas Thain, Todd Tannenbaum, and Miron Livny, + "Distributed Computing in Practice: The Condor Experience" + Concurrency and Computation: Practice and Experience, + Vol. 17, No. 2-4, pages 323-356, February-April, 2005. + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Douglas Thain and Miron Livny, + "Building Reliable Clients and Servers", + in Ian Foster and Carl Kesselman, editors, + The Grid: Blueprint for a New Computing Infrastructure, + Morgan Kaufmann, 2003, 2nd edition. ISBN: 1-55860-933-4. + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Douglas Thain, Todd Tannenbaum, and Miron Livny, + "Condor and the Grid", + in Fran Berman, Anthony J.G. Hey, Geoffrey Fox, editors, + Grid Computing: Making The Global Infrastructure a Reality, + John Wiley, 2003. + ISBN: 0-470-85319-0 + [PDF] + + [BibTeX Source for Citation] +
  • +
  • + Todd Tannenbaum, Derek Wright, Karen Miller, and Miron Livny, + "Condor - A Distributed Job Scheduler", + in Thomas Sterling, editor, + Beowulf Cluster Computing with Linux, + The MIT Press, 2002. + ISBN: 0-262-69274-0 + [Postscript] + [PDF] + [BibTeX Source for Citation] + [MIT Press' Web Page] + + The MIT Press is pleased to present material from a preliminary draft of + Beowulf Cluster Computing with Linux. + This material is Copyright 2002 Massachusetts Institute of Technology, and + may not be used or distributed for any commercial purpose without the + express written consent of The MIT Press. Because + this material was a draft chapter, neither + The MIT Press nor the authors can be held liable for changes or + alternations in the final edition. +
  • +
  • + Jim Basney and Miron Livny, + "Deploying a High Throughput Computing Cluster", + High Performance Cluster Computing, Rajkumar Buyya, Editor, + Vol. 1, Chapter 5, Prentice Hall PTR, May 1999. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Miron Livny, Jim Basney, Rajesh Raman, and Todd Tannenbaum, + "Mechanisms for High Throughput Computing", + SPEEDUP Journal, Vol. 11, No. 1, June 1997. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Jim Basney, Miron Livny, and Todd Tannenbaum, + "High Throughput Computing with Condor", + HPCU news, Volume 1(2), June 1997. + +
  • +
  • + D. H. J Epema, Miron Livny, R. van Dantzig, X. Evers, and Jim Pruyne, + "A Worldwide Flock of Condors : Load Sharing among Workstation Clusters" + Journal on Future Generations of Computer Systems, Volume 12, 1996 + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Scott Fields, + "Hunting for Wasted Computing Power", + 1993 Research Sampler, University of Wisconsin-Madison. + [HTML] +
  • +
  • + Michael Litzkow, Miron Livny, and Matt Mutka, + "Condor - A Hunter of Idle Workstations", + Proceedings of the 8th International Conference of Distributed Computing Systems, + pages 104-111, June, 1988. + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Michael Litzkow, + "Remote Unix - Turning Idle Workstations into Cycle Servers", + Proceedings of Usenix Summer Conference, pages 381-384, 1987. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Matchmaking and ClassAds

+
    +
  • + Nicholas Coleman, "Distributed Policy Specification and Interpretation with + Classified Advertisements", Practical Aspects of Declarative Languages, Lecture + Notes in Computer Science Volume 7149, 2012, pp 198-211, January 2012. + [PDF] +
  • + +
  • + Rajesh Raman, Miron Livny, and Marvin Solomon, + "Policy Driven Heterogeneous Resource Co-Allocation with Gangmatching", + Proceedings of the Twelfth IEEE International Symposium on + High-Performance Distributed Computing, June, 2003, Seattle, WA + [Postscript] + [PDF] +
  • +
  • + Nicholas Coleman, Rajesh Raman, Miron Livny and Marvin Solomon, + "Distributed Policy Management and Comprehension with Classified + Advertisements", + University of Wisconsin-Madison Computer Sciences Technical Report #1481, + April 2003. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Nicholas Coleman, "An Implementation of Matchmaking Analysis in Condor", + Masters' Project report,University of Wisconsin, Madison, May 2001. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Rajesh Raman, Miron Livny, and Marvin Solomon, + "Resource Management through Multilateral Matchmaking", + Proceedings of the Ninth IEEE Symposium on High Performance Distributed Computing (HPDC9), + Pittsburgh, Pennsylvania, August 2000, pp 290-291. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Rajesh Raman, Miron Livny, and Marvin Solomon, + "Matchmaking: Distributed Resource Management for High Throughput Computing", + Proceedings of the Seventh IEEE International Symposium on High Performance Distributed Computing, July 28-31, 1998, Chicago, IL. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Workflow and DAGMan

+
    +
  • Peter Couvares, Tevik Kosar, Alain Roy, Jeff Weber and Kent + Wenger, "Workflow in Condor", in In Workflows for e-Science, Editors: I.Taylor, E.Deelman, D.Gannon, + M.Shields, Springer Press, January 2007 (ISBN: 1-84628-519-4) + [PDF] +
  • +
+ + +

Resource Management

+
    +
  • + Zhe Zhang, Brian Bockelman, Dale Carder, and Todd Tannenbaum, + "Lark: Bringing Network Awareness to High Throughput Computing", + Proceedings of the 15th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGrid 2015), Shenzhen, Guangdong, China, May 2015. + [PDF] +
  • +
  • + Jim Basney and Miron Livny, + "Managing Network Resources in Condor", + Proceedings of the Ninth IEEE Symposium on High Performance Distributed Computing (HPDC9), + Pittsburgh, Pennsylvania, August 2000, pp 298-299. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Jim Basney and Miron Livny, + "Improving Goodput by Co-scheduling CPU and Network Capacity", + International Journal of High Performance Computing Applications, + Volume 13(3), Fall 1999. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Miron Livny and Rajesh Raman, + "High Throughput Resource Management", + chapter 13 in The Grid: Blueprint for a New Computing Infrastructure, + Morgan Kaufmann, San Francisco, California, 1999. + [Postscript] + [PDF] + [BibTeX Source for Citation] + + Morgan Kaufmann is pleased to present material from a preliminary draft of + High Performance Distributed Computing: Building a Computational Grid; + the material is Copyright 1997 Morgan Kaufmann Publishers. This + material may not be used or distributed for any commercial purpose without the + express written consent of Morgan Kaufmann Publishers. Please note that + this material is a draft of forthcoming publication, and as such neither + Morgan Kaufmann nor the author can be held liable for changes or + alternations in the final edition. +
  • +
  • + Matt Mutka and Miron Livny, + "The Available Capacity of a Privately Owned Workstation Environment", + Performance Evaluation, vol. 12, no. 4 pp. 269-284, July, 1991. + [BibTeX Source for Citation] +
  • +
  • + Matt Mutka and Miron Livny, + "Profiling Workstations' Available Capacity for Remote Execution", + Performance '87,12th IFIP WG 7.3, pp. 529-544, December 1987. + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Checkpointing

+
    +
  • + Joe Meehean and Miron Livny, + "A Service Migration Case Study: Migrating the Condor Schedd", + Midwest Instruction and Computing Symposium, April 2005. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Jim Basney, Miron Livny, and Paolo Mazzanti, + "Utilizing Widely Distributed Computational Resources Efficiently with Execution Domains", + Computer Physics Communications, 2001. + (This is an extended version of the CHEP 2000 paper below.) + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Jim Basney, Miron Livny, and Paolo Mazzanti, + "Harnessing the Capacity of Computational Grids for High Energy Physics", + Proceedings of the International Conference on Computing in High Energy and + Nuclear Physics (CHEP 2000), + February 2000, Padova, Italy. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Michael Litzkow, Todd Tannenbaum, Jim Basney, and Miron Livny, + "Checkpoint and Migration of UNIX Processes in the Condor Distributed Processing System", + University of Wisconsin-Madison Computer Sciences Technical Report #1346, + April 1997. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Jim Pruyne and Miron Livny, + "Managing Checkpoints for Parallel Programs", + Workshop on Job Scheduling Strategies for Parallel Processing IPPS '96. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Todd Tannenbaum and Michael Litzkow, + "Checkpointing and Migration of UNIX Processes in the Condor Distributed Processing System", + Dr Dobbs Journal, Feb 1995. + [HTML] + [Postscript] + [BibTeX Source for Citation] +
  • +
  • + Michael Litzkow and Marvin Solomon, + "Supporting Checkpointing and Process Migration Outside the UNIX Kernel", + Usenix Conference Proceedings, + San Francisco, CA, January 1992, pages 283-290. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Data Intensive Computing

+
    + +
  • + Parag Mhashilkar, Zachary Miller, Rajkumar Kettimuthu, Gabriele Garzoglio, Burt + Holzman, Cathrin Weiss, Xi Duan, and Lukasz Lacinski, "End-To-End Solution for + Integrated Workload and Data Management using GlideinWMS and Globus Online", + Journal of Physics: Conference Series, Volume 396, Issue 3, Year 2012 + [PDF] +
  • + +
  • + Ian T. Foster, Josh Boverhof, Ann Chervenak, Lisa Childers, Annette DeSchoen, + Gabriele Garzoglio, Dan Gunter, Burt Holzman, Gopi Kandaswamy, Raj Kettimuthu, + Jack Kordas, Miron Livny, Stuart Martin, Parag Mhashilkar, Zachary Miller, + Taghrid Samak, Mei-Hui Su, Steven Tuecke, Vanamala Venkataswamy, Craig Ward, + Cathrin Weiss, + "Reliable high-performance data transfer via Globus Online", + in Proc. SciDAC 2011, Denver, CO, July 10-14. + [PDF] +
  • + +
  • + Ann Chervenak, Ewa Deelman, Miron Livny, Mei-Hui Su, Rob Schuler, Shishir Bharathi, Gaurang Mehta, Karan Vahi, + "Data Placement for Scientific Applications in Distributed Environments", + In Proceedings of the 8th IEEE/ACM International Conference on Grid + Computing (Grid 2007), Austin, TX, September 2007. + [PDF] + [BibTeX Source for Citation] +
  • + + +
  • + George Kola, Tevfik Kosar, Jaime Frey, Miron Livny, Robert J. Brunner and Michael Remijan, + "DISC: A System for Distributed Data Intensive Scientific Computing", + In Proceedings of the First Workshop on Real, Large Distributed Systems (WORLDS'04), San Francisco, CA, December 2004, in conjunction with OSDI'04 + [PostScript] + [PDF] +
  • + + +
  • + George Kola, Tevfik Kosar and Miron Livny, + "Profiling Grid Data Transfer Protocols and Servers", + In Euro-Par 2004, Pisa, Italy, September 2004. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + George Kola, Tevfik Kosar and Miron Livny, + "A Fully Automated Fault-tolerant System for Distributed Video Processing and Off-site Replication", + In + The 14th ACM International Workshop on Network and Operating Systems Support for Digital Audio and Video (NOSSDAV 2004), Kinsale, Ireland, June 2004. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + George Kola, Tevfik Kosar and Miron Livny, + "Run-time Adaptation of Grid Data-placement Jobs", + In Parallel and Distributed Computing Practices, 2004. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Tevfik Kosar and Miron Livny, "Stork: Making Data Placement a First Class Citizen in the Grid", + In Proceedings of 24th IEEE Int. Conference on Distributed Computing Systems (ICDCS2004), Tokyo, Japan, March 2004. + [PDF] +
  • + +
  • + Tevfik Kosar, George Kola and Miron Livny, "A Framework for Self-optimising, Fault-tolerant, High Performance Bulk Data Transfers in a Heterogeneous Grid Environment", + Proceedings of 2nd Int. Symposium on Parallel and Distributed Computing (ISPDC2003), Ljubljana, Slovenia, October 2003. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + George Kola, Tevfik Kosar and Miron Livny, + "Run-time Adaptation of Grid Data-placement Jobs", + Proceedings of Int. Workshop on Adaptive Grid Middleware (AGridM2003), New Orleans, LA, September 2003. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Tevfik Kosar, George Kola and Miron Livny, + "Building Data Pipelines for High Performance Bulk Data Transfers in a Heterogeneous Grid Environment", + Technical Report CS-TR-2003-1487, University of Wisconsin-Madison Computer Sciences, August 2003. + [PDF] +
  • +
+ +

Grid Computing

+
    +
  • + C. Acosta-Silva, A. Delgado Peris, J. Flix, J. Frey, J.M. Hernández, A. Pérez-Calero Yzquierdo, and T. Tannenbaum + "Exploitation of network-segregated CPU resources in CMS", + Proceedings of the 25th International Conference on Computing in High Energy and Nuclear Physics (CHEP 2021), May 2021. + [PDF] +
  • +
  • + Brian Bockelman, Miron Livny, Brian Lin, Francesco Prelz + "Principles, technologies, and time: The translational journey of the HTCondor-CE", + Journal of Computational Science, 2020 + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + B Bockelman, T Cartwright, J Frey, E M Fajardo, B Lin, M Selmeci, T Tannenbaum and M Zvada + "Commissioning the HTCondor-CE for the Open Science Grid", + Journal of Physics: Conference Series, Vol. 664, 2015 + [PDF] + [BibTeX /Source for Citation] +
  • + +
  • + I Sfiligoi, D C Bradley, Z Miller, B Holzman, F Würthwein, J M Dost, K Bloom, + and C Grandi, "glideinWMS experience with glexec", + Journal of Physics: Conference Series, Volume 396, Issue 3, Year 2012 + [PDF] +
  • + +
  • + W Andrews, B Bockelman, D Bradley, J Dost, D Evans, I Fisk, J Frey, B Holzman, M Livny, T Martin, A McCrea, A Melo, S Metson, H Pi, I Sfiligoi, P Sheldon, T Tannenbaum, A Tiradani, F Würthwein and D Weitzel, + "Early experience on using glideinWMS in the cloud", + Journal of Physics: Conference Series, Vol. 331, No. 6, 2011 + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Igor Sfiligoi, Greg Quinn, Chris Green, Greg Thain, + "Pilot Job Accounting and Auditing in Open Science Grid", + The 9th IEEE/ACM International Conference on Grid Computing, + Tsukuba, Japan, 2008 + [PDF] +
  • + +
  • + Alexandru Iosup, Dick H.J. Epema, Todd Tannenbaum, Matthew Farrellee, Miron Livny, + "Inter-Operating Grids through Delegated MatchMaking", + in proceedings of the International Conference for High Performance + Computing, Networking, Storage and Analysis (SC07), + Reno, Nevada, November 2007. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Sander Klous, Jamie Frey, Se-Chang Son, Douglas Thain, Alain Roy, + Miron Livny, and Jo van den Brand, "Transparent Access to Grid + Resources for User Software", in Concurrency and Computation: + Practice and Experience, Volume 18, Issue 7, pages 787-801, 2006. +
  • + +
  • + Sechang Son, Matthew Farrellee, and Miron Livny, + "A Generic Proxy Mechanism for Secure Middlebox Traversal", + CLUSTER 2005, + Boston, MA, September 26-30, 2005. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Bruce Beckles, Sechang Son, and John Kewley, + "Current methods for negotiating firewalls for the Condor system", + Proceedings of the 4th UK e-Science All Hands Meeting 2005, + Nottingham, UK, September 19-22, 2005. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Sechang Son, Bill Allcock and Miron Livny, + "CODO: Firewall Traversal by Cooperative On-Demand Opening", + Proceedings of the 14th IEEE Symposium on High Performance Distributed Computing (HPDC14), + Research Triangle Park, NC, July 24-27, 2005. + [PDF] + [MS Word] + [BibTeX Source for Citation] +
  • + +
  • + Clovis Chapman, Paul Wilson, Todd Tannenbaum, Matthew Farrellee, Miron Livny, John Brodholt, and Wolfgang Emmerich, + "Condor services for the global grid: Interoperability between Condor and OGSA", + Proceedings of the 2004 UK e-Science All Hands Meeting, ISBN 1-904425-21-6, pages 870-877, Nottingham, UK, August 2004. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Clovis Chapman, Charaka Goonatilake, Wolfgang Emmerich, Matthew Farrellee, Todd Tannenbaum, Miron Livny, Mark Calleja, and Martin Dove, + "Condor BirdBath: Web Service interfaces to Condor", + Proceedings of the 2005 UK e-Science All Hands Meeting, ISBN 1-904425-53-4, pages 737-744, Nottingham, UK, September 2005. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Sriya Santhanam, Pradheep Elango, Andrea Arpaci-Dusseau, and Miron Livny, + "Deploying Virtual Machines as Sandboxes for the Grid", + WORLDS 2005, San Francisco, CA, December 2004 + [PDF] + [BibTeX Source for Citation] +
  • + + +
  • + George Kola, Tevfik Kosar and Miron Livny, + "Phoenix: Making Data-intensive Grid Applications Fault-tolerant", + In Grid 2004, Pittsburgh, PA, November 2004 + [PostScript] + [PDF] +
  • + + +
  • + Andrew Baranovski, Gabriele Garzoglio, Igor Terekhov, Alain Roy and Todd Tannenbaum, + "Management of Grid Jobs and Data within SAMGrid", + Proceedings of the 2004 IEEE International Conference on Cluster Computing, + pages 353-360, + San Diego, CA, September 2004. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + George Kola, Tevfik Kosar and Miron Livny, + "A Client-centric Grid Knowledgebase", + Proceedings of the 2004 IEEE International Conference on Cluster Computing, + pages 431-438, + San Diego, CA, September 2004. + [PostScript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + George Kola, Tevfik Kosar and Miron Livny, + "Profiling Grid Data Transfer Protocols and Servers", + In Euro-Par 2004, Pisa, Italy, September 2004. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + John Bent, Douglas Thain, Andrea Arpaci-Dusseau, Remzi Arpaci-Dusseau, and Miron Livny, + "Explicit Control in a Batch Aware Distributed File System", + Proceedings of the First USENIX/ACM Conference on Networked Systems Design and Implementation, + San Francisco, CA, March 2004. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Sechang Son and Miron Livny, "Recovering Internet Symmetry in Distributed Computing", + Proceedings of the 3rd International Symposium on Cluster Computing and the Grid, Tokyo, Japan, May 2003. + [PDF] + [MS Word] + [BibTeX Source for Citation] +
  • + +
  • + Douglas Thain, John Bent, Andrea Arpaci-Dusseau, Remzi Arpaci-Dusseau and Miron Livny, + "Pipeline and Batch Sharing in Grid Workloads", + in Proceedings of the Twelfth IEEE Symposium on High Performance Distributed Computing, + Seattle, WA, 2003. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Douglas Thain and Miron Livny, + "The Ethernet Approach to Grid Computing", + in Proceedings of the Twelfth IEEE Symposium on High Performance Distributed Computing, + Seattle, WA, 2003. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + John Bent, Venkateshwaran Venkataramani, Nick LeRoy, + Alain Roy, Joseph Stanley, Andrea Arpaci-Dusseau, + Remzi Arpaci-Dusseau, and Miron Livny,   + "NeST - A Grid Enabled Storage Appliance", + in Jan Weglarz and Jarek Nabrzyski and Jennifer Schopf and + Macief Stroinkski, editors, + Grid Resource Management, + Kluwer Academic Publishers, 2003. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Douglas Thain, Todd Tannenbaum, and Miron Livny, + "Condor and the Grid", + in Fran Berman, Anthony J.G. Hey, Geoffrey Fox, editors, + Grid Computing: Making The Global Infrastructure a Reality, + John Wiley, 2003. + ISBN: 0-470-85319-0 + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Francesco Giacomini, + Francesco Prelz, + Massimo Sgaravatto, + Igor Terekhov, + Gabriele Garzoglio, + and Todd Tannenbaum, + "Planning on the Grid: A Status Report [DRAFT]", + Technical Report PPDG-20, + Particle Physics Data Grid collaboration (http://www.ppdg.net), + October 2002. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + John Bent, Venkateshwaran Venkataramani, + Nick LeRoy, + Alain Roy, + Joseph Stanley, + Andrea Arpaci-Dusseau, + Remzi H. Arpaci-Dusseau, + and Miron Livny,   + "Flexibility, Manageability, and Performance in a Grid Storage Appliance", + Proceedings of the Eleventh IEEE Symposium on High Performance Distributed Computing, + Edinburgh, Scotland, July 2002. + [Abstract]   + [Postscript]   + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Douglas Thain and Miron Livny, + "Error Scope on a Computational Grid: Theory and Practice", + Proceedings of the Eleventh IEEE Symposium on High Performance Distributed Computing (HPDC11), + Edinburgh, Scotland, July 2002. + [Postscript] + [PDF] + [BibTeX Source for Citation] + (This paper also describes aspects of Condor's Java Universe) +
  • + +
  • + Douglas Thain, John Bent, Andrea Arpaci-Dusseau, Remzi Arpaci-Dusseau, and Miron Livny, + "Gathering at the Well: Creating Communities for Grid I/O", + in Proceedings of Supercomputing 2001, + Denver, Colorado, November 2001. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + James Frey, Todd Tannenbaum, Ian Foster, Miron Livny, and Steven Tuecke, + "Condor-G: A Computation Management Agent for Multi-Institutional Grids", + Journal of Cluster Computing + volume 5, pages 237-246, 2002. + [BibTeX Source for Citation] +
  • + +
  • + James Frey, Todd Tannenbaum, Ian Foster, Miron Livny, and Steven Tuecke, + "Condor-G: A Computation Management Agent for Multi-Institutional Grids", + Proceedings of the Tenth IEEE Symposium on High Performance Distributed Computing (HPDC10) + San Francisco, California, August 7-9, 2001. + [Postscript] + [PDF] + [MS Word] + [BibTeX Source for Citation] +
  • + +
  • + Douglas Thain, Jim Basney, Se-Chang Son, and Miron Livny, + "The Kangaroo Approach to Data Movement on the Grid", + in Proceedings of the Tenth IEEE Symposium on High Performance Distributed Computing (HPDC10), + San Francisco, California, August 7-9, 2001. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + John Bent, "Building Storage Appliances for the Grid and Beyond", + Masters' Project report, University of Wisconsin, Madison, May 2001. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Master-Worker Computing (MW, PVM, MPI, CARMI)

+
    + +
  • Elisa Heymann, Miquel A. Senar, Emilio Luque, and Miron Livny, + "Adaptive Scheduling for Master-Worker Applications on the Computational Grid". + in Proceedings of the First IEEE/ACM International Workshop on Grid Computing (GRID 2000), Bangalore, India, December 17, 2000. + [Postscript] + [PDF] + [MS Word] + [BibTeX Source for Citation] +
  • + +
  • Elisa Heymann, Miquel A. Senar, Emilio Luque, and Miron Livny, + "Evaluation of an Adaptive Scheduling Strategy for Master-Worker Applications on Clusters of Workstations". + in Proceedings of the 7th International Conference on High Performance Computing (HiPC 2000), Bangalore, India, December 17, 2000. + [Postscript] + [PDF] + [MS Word] + [BibTeX Source for Citation] +
  • + +
  • Jeff Linderoth, Sanjeev Kulkarni, Jean-Pierre Goux, and Michael Yoder, + "An Enabling Framework for Master-Worker Applications on the Computational Grid", + Proceedings of the Ninth IEEE Symposium on High Performance Distributed Computing (HPDC9), + Pittsburgh, Pennsylvania, August 2000, pp 43-50. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Jeff Linderoth, Jean-Pierre Goux, and Michael Yoder, + "Metacomputing and the Master-Worker Paradigm", + Preprint ANL/MCS-P792-0200, + Mathematics and Computer Science Division, Argonne National Laboratory, February 2000. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Jim Pruyne and Miron Livny, + "Providing Resource Management Services to Parallel Applications", + Proceedings of the Second Workshop on Environments and Tools for Parallel Scientific Computing, May, 1994. + [Postscript] + [BibTeX Source for Citation] +
  • +
+ + +

Java

+
    +
  • + Douglas Thain and Miron Livny, + "Error Scope on a Computational Grid: Theory and Practice", + Proceedings of the Eleventh IEEE Symposium on High Performance Distributed Computing (HPDC11), + Edinburgh, Scotland, July 2002. + [Postscript] + [PDF] + [BibTeX Source for Citation] + (This paper describes aspects of error handling in Condor's Java Universe) +
  • + +
  • + Al Globus, Eric Langhirt, Miron Livny, Ravishankar Ramamurthy, Marvin Solomon, and Steve Traugott, + "JavaGenes and Condor: cycle-scavenging genetic algorithms", + Proceedings of the ACM Conference on JavaGrande, + San Francisco, California, 2000. + [PDF] + [BibTeX Source for Citation] + (This paper describes checkpointing Java applications for opportunistic computing.) +
  • +
+ + +

Remote Execution and Interposition Agents

+
    + +
  • Douglas Thain and Miron Livny, + "Parrot: Transparent User-Level Middleware for Data-Intensive + Computing", + Scalable Computing: Practice and Experience, + Volume 6, Number 3, Pages 9-18, 2005. + [PDF] +
  • + + + +
  • + Douglas Thain and Miron Livny, + "Parrot: Transparent User-Level Middleware for Data-Intensive Computing", + Workshop on Adaptive Grid Middleware, + New Orleans, Louisiana, + September 2003. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Douglas Thain and Miron Livny, + "Error Management in the Pluggable File System", + Technical Report 1448, + Computer Sciences Department, University of Wisconsin, October 2002. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Douglas Thain and Miron Livny, + "Multiple Bypass: Interposition Agents for Distributed Computing", + The Journal of Cluster Computing, + Volume 4, 2001, pp 39-47. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Douglas Thain and Miron Livny, + "Bypass: A Tool for Building Split Execution Systems", + Proceedings of the Ninth IEEE Symposium on High Performance Distributed Computing (HPDC9), + Pittsburgh, Pennsylvania, August 2000, pp 79-86. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Victor C. Zandy, Barton P. Miller, and Miron Livny, + "Process Hijacking", + The Eighth IEEE International Symposium on High Performance Distributed Computing (HPDC8), + Redondo Beach, California, August 1999, pp. 177-184. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Miron Livny and Michael Litzkow, + "Making Workstations a Friendly Environment for Batch Jobs", + Third IEEE Workshop on Workstation Operating Systems, + April 1992, Key Biscayne, Florida. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Security

+
    + +
  • + Zach Miller, Dan Bradley, Todd Tannenbaum, Igor Sfiligoi, + "Flexible Session Management in a Distributed Environment", + Journal of Physics: Conference Series Volume 219, Issue 4, Year 2010., + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Gabriele Garzoglio, Ian Alderman, Mine Altunay, Rachana Ananthakrishnan, Joe + Bester, Keith Chadwick, Vincenzo Ciaschini, Yuri Demchenko, Andrea Ferraro, + Alberto Forti, David L. Groep, Ted Hesselroth, John Hover, Oscar Koeroo, Chad + La Joie, Tanya Levshina, Zach Miller, Jay Packard, Håkon Sagehaug, Valery + Sergeev, Igor Sfiligoi, Neha Sharma, Frank Siebenlist, Valerio Venturi, John + Weigand, + "Definition and Implementation of a SAML-XACML Profile for Authorization + Interoperability Across Grid Middleware in OSG and EGEE", + Journal of Grid Computing, Volume 7, Issue 3, Year 2009. + [PDF] +
  • + +
  • + Hao Wang, Somesh Jha, Miron Livny, and Patrick D. McDaniel, + "Security Policy Reconciliation in Distributed Computing Environments", + IEEE Fifth International Workshop on Policies for Distributed + Systems and Networks (POLICY 2004), + June 2004, Yorktown Heights, New York. + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Scalability and Performance

+
    + +
  • + E M Fajardo, J M Dost, B Holzman, T Tannenbaum, J Letts, A Tiradani, B Bockelman, J Frey and D Mason, + "How much higher can HTCondor fly?", + Journal of Physics: Conference Series, Vol. 664, 2015 + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Dan Bradley, Timothy St Clair, Matthew Farrellee, Ziliang Guo, Miron Livny, Igor Sfiligoi, + and Todd Tannenbaum, + "An update on the scalability limits of the Condor batch system", + Journal of Physics: Conference Series, Vol. 331, No. 6, 2011. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + D Bradley, I Sfiligoi, S Padhi, J Frey and T Tannenbaum, + "Scalability and interoperability within glideinWMS", + Journal of Physics: Conference Series, Vol. 219, No. 6, 2010 + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + D Bradley, S Dasu, M Livny, A Mohapatra, T Tannenbaum and G Thain, + "Condor enhancements for a rapid-response adaptive computing environment for LHC", + Journal of Physics: Conference Series Vol. 219, No. 6, 2010. + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Experience

+
    +
  • + Michael Litzkow and Miron Livny, + "Experience With The Condor Distributed Batch System", + IEEE Workshop on Experimental Distributed Systems, Oct 1990, Huntsville, Al. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Scientific Applications

+
    + +
  • + Douglas Thain, John Bent, Andrea Arpaci-Dusseau, Remzi Arpaci-Dusseau and Miron Livny, + "Pipeline and Batch Sharing in Grid Workloads", + in Proceedings of the Twelfth IEEE Symposium on High Performance Distributed Computing, + Seattle, WA, 2003. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Jim Basney, Rajesh Raman, and Miron Livny, + "High Throughput Monte Carlo", + Proceedings of the Ninth SIAM Conference on Parallel Processing for Scientific Computing, + March 22-24, 1999, San Antonio, Texas. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
  • + Chungmin Chen, Kenneth Salem, and Miron Livny, + "The DBC: Processing Scientific Data Over the Internet", + 16th International Conference on Distributed Computing Systems, + May 1996. + [Postscript] + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Scheduling

+
    + + +
  • + Mark Silberstein, Dan Geiger, Assaf Schuster, and Miron Livny, + "Scheduling Mixed Workloads in Multi-grids: The Grid Execution Hierarchy", + Proceedings of the 15th IEEE Symposium on High Performance Distributed Computing (HPDC), + city, state, month 2006. + [PDF] + [BibTeX Source for Citation] +
  • +
  • Derek Wright, "Cheap cycles from the desktop to the dedicated + cluster: combining opportunistic and dedicated scheduling with + Condor", Conference on Linux Clusters: The HPC Revolution, + June, 2001, Champaign - Urbana, IL. + [Postscript] + [PDF] + [BibTeX Source for Citation] + [Power Point slides of talk presentation] +
  • + +
  • + P. E. Krueger and Miron Livny, + "A Comparison of Preemptive and Non-Preemptive Load Distributing", + Proc. of the 8th International Conference on Distributed Computing Systems, + pp. 123-130, June 1988. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • + Matt Mutka and Miron Livny, + "Scheduling Remote Processing Capacity In A Workstation-Processing Bank Computing System", + Proceedings of the 7th International Conference of Distributed Computing Systems, + pp. 2-9, September, 1987. + [PDF] + [BibTeX Source for Citation] +
  • + +
  • Alain Roy and Miron Livny, "Condor and Preemptive Resume + Scheduling", Published in Grid Resource Management: State of the Art and + Future Trends, Fall 2003, pages 135-144, Fall 2003, Edited by Jarek + Nabrzyski, Jennifer M. Schopf and Jan Weglarz, published by Kluwer + Academic Publishers. + [PDF] +
  • + +
+ + +

NMI Build & Test Laboratory

+
    + +
  • Andrew Pavlo, Peter Couvares, Rebekah Gietzel, Anatoly Karp, Ian D. Alderman, Miron Livny, and Charles Bacon, + "The NMI Build & Test Laboratory: Continuous Integration Framework for Distributed Computing Software", + Proceedings of LISA '06: Twentieth Systems Administration Conference, + Washington, DC, December 2006, pp. 263 - 273. + [Postscript] + [PDF] + [BibTeX Source for Citation] + [PDF of presentation slides] +
  • +
  • A. Iosup, D.H.J. Epema, P. Couvares, A. Karp, and M. Livny, + "Build-and-Test Workloads for Grid Middleware: Problem, Analysis, and Applications", + Seventh IEEE International Symposium on Cluster Computing and the Grid (CCGRID), + IEEE Computer Society, Pages 205-213, May 2007. + [PDF] + [BibTeX Source for Citation] + [PDF of presentation slides] +
  • +
+ + +

Background Work

+
    + +
  • + Miron Livny and Myron Melman, + "Load Balancing in Homogeneous Broadcast Distributed Systems", + Proceedings of Computer Network Performance Symposium, + April 13-14, 1982, + College Park, Maryland. + [PDF] + [BibTeX Source for Citation] +
  • +
+ + +

Miscellaneous

+
    +
  • Douglas Thain, Todd Tannenbaum, and Miron Livny, "How to Measure a + Large Open Source Distributed System", in Concurrency and + Computation: Practice and Experience, to appear in 2006. +
  • +
  • Zach Miller, Todd Tannenbaum, and Ben Liblit, "Enforcing Murphy's + Law for Advance Identification of Run-time Failures", Proceedings of + USENIX 2012. + [PDF] +
  • +
+ + +

PhD Dissertations from HTCondor team members at UW-Madison

+ +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/script/cibuild b/preview-fall2024-info/script/cibuild new file mode 100755 index 000000000..4543aff21 --- /dev/null +++ b/preview-fall2024-info/script/cibuild @@ -0,0 +1,11 @@ +#!/bin/sh +set -e + +if [ "$1" = "production" ]; then + echo "Building Site For Production..."; + JEKYLL_ENV=production bundle exec jekyll build +else + echo "Building Site For Development..."; + bundle exec jekyll build +fi; + diff --git a/preview-fall2024-info/script/cideploy b/preview-fall2024-info/script/cideploy new file mode 100755 index 000000000..7761c16df --- /dev/null +++ b/preview-fall2024-info/script/cideploy @@ -0,0 +1,46 @@ +#!/bin/bash +set -x +set -e + +: "${BRANCH:=master}" +: "${TARGET_REPO=CHTC/chtc.github.io.git}" +JEKYLL_OUTPUT_FOLDER=_site + +echo "$GITHUB_EVENT_NAME" + +commitmessage=$(mktemp) +trap 'rm -f "$commitmessage"' EXIT + +git log -1 --pretty=format:"GHA $GITHUB_RUN_NUMBER: \"%s\" by %an [%h] pushed to GitHub Pages + +%b" > "$commitmessage" + +echo -e "Starting to deploy to Github Pages\n" + +if [[ $GITHUB_ACTIONS == true ]]; then + git config --global user.email "actions@github.com" + git config --global user.name "GitHub Actions" +fi + +#using token clone target repository +git clone --quiet --branch=$BRANCH git@github.com:$TARGET_REPO built_website > /dev/null +#go into directory and copy data we're interested in to that directory +mkdir -p built_website +cd built_website +rsync -av --delete --include-from=../script/rsync_include_list --exclude-from=../script/rsync_exclude_list "../$JEKYLL_OUTPUT_FOLDER/" . +# We need this single well-known directory to support OIDC metadata discovery for SciTokens. +rsync -av --delete ../$JEKYLL_OUTPUT_FOLDER/.well-known . +for issuer in icecube nsdf; do + mkdir -p ./$issuer + rsync -av --delete ../$JEKYLL_OUTPUT_FOLDER/$issuer/.well-known ./$issuer +done +# Prevent GitHub from trying to render files as MarkDown - we're just static HTML! +touch .nojekyll +set -x +#add, commit and push files +git add -f -A +git commit -F "$commitmessage" +if [[ -z $NO_PUSH ]]; then + git push -fq origin $BRANCH > /dev/null +fi +echo -e "Deploy completed\n" diff --git a/preview-fall2024-info/script/rsync_exclude_list b/preview-fall2024-info/script/rsync_exclude_list new file mode 100644 index 000000000..1b8e43913 --- /dev/null +++ b/preview-fall2024-info/script/rsync_exclude_list @@ -0,0 +1,7 @@ +.* +Vagrantfile +*.log +*.enc +script/ +rsync_exclude_list +rsync_include_list diff --git a/preview-fall2024-info/script/rsync_include_list b/preview-fall2024-info/script/rsync_include_list new file mode 100644 index 000000000..c4c43085c --- /dev/null +++ b/preview-fall2024-info/script/rsync_include_list @@ -0,0 +1,2 @@ +.htaccess +.gitignore diff --git a/preview-fall2024-info/services.html b/preview-fall2024-info/services.html new file mode 100644 index 000000000..6e768e27c --- /dev/null +++ b/preview-fall2024-info/services.html @@ -0,0 +1,357 @@ + + + + + + +Services + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ Services +

+ +
+
+
+ +
+
+
+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/spalding-lab.html b/preview-fall2024-info/spalding-lab.html new file mode 100644 index 000000000..fc9d31743 --- /dev/null +++ b/preview-fall2024-info/spalding-lab.html @@ -0,0 +1,366 @@ + + + + + + +Plant physiologists used high throughput computing to remedy research “bottleneck” + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Plant physiologists used high throughput computing to remedy research “bottleneck” +

+

HTC resources increased the efficiency of the Spalding group’s data analyses, which enabled an increase in the scope of their research.

+ +

Enhancing his research with high throughput computing was a pivotal moment for University of Wisconsin–Madison molecular plant physiologist Edgar Spalding when his +research group adopted it in 2006. Over the past five years, the research group has used more than 200,000 computing hours, including to facilitate “the development of the measurement algorithm and the automatic processing of tens-of-thousands of images” of maize seedling root growth, Spalding says.

+ +
+ A graph of the average gravitropic response of each of the maize types +
The graph shows the average gravitropic response of each of the maize types. Statistical genetics techniques mapped variation in these curves (the phenotype) to genes that control the process. HTCondor scheduling software and computing resources at CHTC were used to develop the measurement algorithm and to process the tens of thousands of images automatically.
+
+ +

Spalding’s research group was studying Arabidopsis plant populations with genetically diverse members and tracking their response to light or gravity due to a mutation — one seedling at a time. Since Arabidopsis seedlings are only a few millimeters tall, Spalding says his research group found that obtaining high-resolution digital images was the best approach to measure the direction of their growth. A computer collected images every few minutes as the seedlings grew. “If we could characterize this whole genetically diverse population, we could use the powerful techniques of statistical genetics to track down the genes affecting the process. That meant we now had thousands and thousands of images to measure,” Spalding explains.

+ +

The thousands of digital images to measure created a bottleneck in Spalding’s research. That was before he led an effort with the Center for High Throughput Computing (CHTC) Director Miron Livny, other plant biologists, and computer scientists to develop a proposal for a competitive National Science Foundation (NSF) grant that would produce cyberinfrastructure to support plant biology research. Though the application wasn’t successful, the connections Spalding made from that meeting were meaningful nonetheless.

+ +

Speaking with Livny at the meeting — from whom he learned about the capabilities of the HTC approach that was pioneered on our campus — helped Spalding realize the inefficiencies of his group in analyzing thousands of seedlings. “[O]ur research up until that point had been focused on one seedling at a time. Faced with large numbers of seedlings to do a broader scale of investigation meant that we had to find computing methodologies that matched our new data type, which was tens of thousands of images instead of a couple of dozen. That drove our need for a different way of computing,” Spalding describes.

+ +

When asked about which accomplishment using HTC was most impactful, Spalding said “The way we measure yield-related features from maize ears and several thousand kernels has had a large impact.” Others from around the world began asking for their help with making similar measurements. “In many cases, we can use our workflow [algorithms] running on CHTC to process their images of maize ears and kernels and return data that helps them answer their scientific or crop breeding questions,” Spalding says.

+ +

Since the goals of the experiments determine the type of data the researchers collect, they did not need to adjust the type of data they collected. Rather, adopting the HTC approach changed the way they created tools to analyze the data. Today, Spalding says his research group continues to use HTC in three ways: “from tool development to extracting the features from the images with the tool that you developed to applying it in the challenge of statistically matching it to elements of the results to elements of the genome.” As his team became more experienced in writing new algorithms to make measurements, they realized that HTC was useful in developing new methodologies; it was more than just more automation and increased computing capacity.

+ +

In other words, HTC is useful as both a development resource and a production resource. Making measurements on seedlings and then matching processes to the genome elements that control those processes involved an ever-growing amount of computing capacity. “We realized that statistical modeling of the measurements from the biology to the genetic information in the population also benefited from high throughput computing.” HTC in all these cases, Spalding elaborates, “was beneficial and changed the way we work. It changed the nature of the questions we asked.” In addition to these uses of HTC, the research group’s uses of machine learning (ML) also continue to become a bigger part of the tool development stage and in driving the methods to train a model to recognize a feature in a seedling.

+ +

Spalding has also shared his HTC experience with the attendees of the annual OSG School. Spalding emphasizes that students “should not hold back on doing something because they think computing will be a bottleneck. There are ways to bring the computing they need to their problem and they should not shy away from a question just because they think it might be difficult to compute. There are people like the CHTC staff that can remove that bottleneck if the person’s willing to learn about it.”

+ +

“Engaged and motivated collaborators like Spalding and his group is what guides CHTC in advancing the state of the art of HTC and drives our commitment to bring these advances to researchers on the UW-Madison campus and around the world,” says Livny.

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/staff-list/README.md b/preview-fall2024-info/staff-list/README.md new file mode 100644 index 000000000..5fb553667 --- /dev/null +++ b/preview-fall2024-info/staff-list/README.md @@ -0,0 +1,93 @@ + +# Staff List Submodule + +Welcome to the `staff-list` submodule. This submodule is designed to manage and display information about the staff members in a structured and consistent manner. It includes details such as names, roles, images, and affiliations. To ensure uniformity and ease of management, please adhere to the guidelines provided below. + +## File Naming Conventions + +### YML Files + +Each staff member should have a corresponding `.yml` file named according to the following convention: + +``` +firstName_lastName.yml +``` + +This file contains structured data about the staff member, such as their name, image path, title, and more. + +### Image Files + +Staff member images should be stored in the `images/` directory and named following this convention: + +``` +images/firstName_lastName.jpg +``` + +or + +``` +images/firstName_lastName.png +``` + +Please ensure that the image file extension matches the one referenced in the staff member's `.yml` file. + +## YML File Format + +Each `.yml` file should adhere to the following structure: + +```yaml +name: "John Doe" +image: "images/john_doe.jpg" +title: "Lead Software Engineer" +website: "https://johndoe.com" +institution: "Morgridge Institute for Research" +promoted: true +weight: 3 +description: "John Doe is a brilliant software engineer." +status: Staff +organizations: + - path + - chtc + - osg + - pelican +``` + +### Fields Explanation + +- `name`: Full name of the staff member. +- `image`: Relative path to the staff member's image within the submodule. +- `title`: The staff member's role or title within the organization. +- `website`: (Optional) A URL to the staff member's professional or personal webpage. +- `institution`: The name of the institution to which the staff member belongs. +- `promoted`: (Optional) A boolean value indicating if the staff member is part of the executive team. Only use if true. +- `weight`: (Optional) Used to order executive staff members if `promoted` is set to `true`. +- `description`: (Optional) A brief description or bio of the staff member. +- `status`: Indicates the current status of the staff member within the organization (e.g., Leadership, Staff, Student, Past). +- `organizations`: Lists the organizations the staff member is associated with. If the correct values are not provided, the staff member will not be displayed on the respective organization's website. + +## Additional Organization-Specific Information + +For staff members associated with specific organizations (e.g., `osg`, `chtc`, `pelican`), additional information can be provided under `osg/chtc/pelican/path` with an alternative title for that organization. +See below for the example: + +```yaml +name: "John Doe" +image: "images/john_doe.jpg" +title: "Lead Software Engineer" +osg: + title: "Software Engineer" +status: Staff +organizations: + - path + - chtc + - osg +``` + +## Contribution Guidelines + +- Ensure all information is accurate and up-to-date. +- Images should be clear and professional, preferably in a uniform size or aspect ratio. +- Follow the file naming conventions strictly to avoid any inconsistencies. +- For any updates or changes, please submit a pull request for review. + +Thank you for contributing to the `staff-list` submodule and helping maintain a consistent and professional presentation of our staff members. \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/aaron_moate.yml b/preview-fall2024-info/staff-list/aaron_moate.yml new file mode 100644 index 000000000..b770a002d --- /dev/null +++ b/preview-fall2024-info/staff-list/aaron_moate.yml @@ -0,0 +1,14 @@ +name: Aaron Moate +date: 2020-09-28T19:31:00-05:00 +draft: false +image: "images/aaron_moate.png" +title: "Systems Administrator" +status: "Staff" +institution: "University of Wisconsin–Madison" +weight: 5 +chtc: + title: Lead Systems Administrator +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/aaryan_patel.yml b/preview-fall2024-info/staff-list/aaryan_patel.yml new file mode 100644 index 000000000..acf944243 --- /dev/null +++ b/preview-fall2024-info/staff-list/aaryan_patel.yml @@ -0,0 +1,7 @@ +name: Aaryan Patel +title: Research Computing Facilitation Assistant +institution: Morgridge Insititute for Research +status: Staff +organizations: + - chtc +image: images/aaryan_patel.jpeg diff --git a/preview-fall2024-info/staff-list/abhinandan_saha.yml b/preview-fall2024-info/staff-list/abhinandan_saha.yml new file mode 100644 index 000000000..e629bd170 --- /dev/null +++ b/preview-fall2024-info/staff-list/abhinandan_saha.yml @@ -0,0 +1,7 @@ +image: images/abhinandan_saha.jpg +institution: University of Wisconsin-Madison +title: Systems Administration Intern +name: Abhinandan Saha +status: Staff +organizations: + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/adrian_crenshaw.yml b/preview-fall2024-info/staff-list/adrian_crenshaw.yml new file mode 100644 index 000000000..4a0ab40df --- /dev/null +++ b/preview-fall2024-info/staff-list/adrian_crenshaw.yml @@ -0,0 +1,8 @@ +name: "Adrian Crenshaw" +image: "images/adrian_crenshaw.jpeg" +title: "Security Analyst" +institution: "Indiana University" +website: https://cacr.iu.edu/about/people/Adrian-Crenshaw.html +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/alja_tadel.yml b/preview-fall2024-info/staff-list/alja_tadel.yml new file mode 100644 index 000000000..83ee25fd6 --- /dev/null +++ b/preview-fall2024-info/staff-list/alja_tadel.yml @@ -0,0 +1,10 @@ +image: images/alja_tadel.jpg +institution: University of California San Diego +title: Analytic Programmer +name: Alja Mrak Tadel +status: Staff +website: null +pelican: + weight: 9 +organizations: + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/alperen_bakirci.yml b/preview-fall2024-info/staff-list/alperen_bakirci.yml new file mode 100644 index 000000000..6b03715e4 --- /dev/null +++ b/preview-fall2024-info/staff-list/alperen_bakirci.yml @@ -0,0 +1,13 @@ +image: images/alperen_bakirci.jpg +institution: Morgridge Institute For Research +title: Student Web Developer +name: Alperen Bakirci +status: Past +website: null +pelican: + weight: 18 +organizations: + - path + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/amber_lim.yml b/preview-fall2024-info/staff-list/amber_lim.yml new file mode 100644 index 000000000..281834f9d --- /dev/null +++ b/preview-fall2024-info/staff-list/amber_lim.yml @@ -0,0 +1,9 @@ +name: Amber Lim +title: Research Computing Facilitator +institution: "University of Wisconsin–Madison" +status: Staff +organizations: + - chtc + - osg + - path +image: images/amber_lim.jpg diff --git a/preview-fall2024-info/staff-list/andrew_owen.yml b/preview-fall2024-info/staff-list/andrew_owen.yml new file mode 100644 index 000000000..96622aeb7 --- /dev/null +++ b/preview-fall2024-info/staff-list/andrew_owen.yml @@ -0,0 +1,12 @@ +image: images/andrew_owen.jpg +institution: University of Wisconsin-Madison +title: Research Computing Facilitator +is_facilitator: 1 +name: Andrew Owen +status: Staff +website: null +organizations: + - path + - chtc + - osg + - pelican diff --git a/preview-fall2024-info/staff-list/ashton_graves.yml b/preview-fall2024-info/staff-list/ashton_graves.yml new file mode 100644 index 000000000..030d34286 --- /dev/null +++ b/preview-fall2024-info/staff-list/ashton_graves.yml @@ -0,0 +1,9 @@ +image: images/ashton_graves.jpeg +institution: University of Lincoln-Nebraska +title: DevOps Engineer +name: Ashton Graves +status: Staff +website: null +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/ben_staehle.yml b/preview-fall2024-info/staff-list/ben_staehle.yml new file mode 100644 index 000000000..693c764ca --- /dev/null +++ b/preview-fall2024-info/staff-list/ben_staehle.yml @@ -0,0 +1,20 @@ +name: Ben Staehle +title: Fellow +institution: Morgridge Institute for Research +status: Student +organizations: + - chtc +image: images/ben_staehle.jpg + +fellowship: + name: Tracking server inventory and elevation + description: | + The CHTC maintains over 1,000 servers on the UW–Madison campus and + across the country. Keeping track of server elevation (datacenter + and rack location), serial numbers, asset tags is a challenge that + is always in need of improvement. This project will focus on taking + existing data from the CHTC hardware monitoring system and automatically + exporting it to other systems such as Google spreadsheets or ITAdvisor. + After a successful summer, the student fellow will gain skills in + Python and monitoring and Google Docs APIs. + mentor: Joe Bartowiak diff --git a/preview-fall2024-info/staff-list/bocheng_zou.yaml b/preview-fall2024-info/staff-list/bocheng_zou.yaml new file mode 100644 index 000000000..130df536e --- /dev/null +++ b/preview-fall2024-info/staff-list/bocheng_zou.yaml @@ -0,0 +1,8 @@ +name: Bocheng Zou +image: "images/bocheng_zou.png" +title: "System Administrator Intern" +status: "Student" +institution: "University of Wisconsin–Madison" +weight: 5 +organizations: + - chtc diff --git a/preview-fall2024-info/staff-list/brian_aydemir.yml b/preview-fall2024-info/staff-list/brian_aydemir.yml new file mode 100644 index 000000000..76f1b3c1e --- /dev/null +++ b/preview-fall2024-info/staff-list/brian_aydemir.yml @@ -0,0 +1,8 @@ +image: images/brian_aydemir.jpeg +institution: University of Wisconsin-Madison +title: Systems Integration Developer +name: Brian Aydemir +status: Staff +website: null +organizations: + - chtc diff --git a/preview-fall2024-info/staff-list/brian_bockelman.yml b/preview-fall2024-info/staff-list/brian_bockelman.yml new file mode 100644 index 000000000..3d3388a6d --- /dev/null +++ b/preview-fall2024-info/staff-list/brian_bockelman.yml @@ -0,0 +1,23 @@ +name: "Brian Bockelman" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/brian_bockelman.jpg" +title: "FoCaS co-lead" +institution: "Morgridge Institute for Research" +promoted: true +weight: 4 +description: Bockelman is an Investigator at the Morgridge Institute for Research and co-lead of the FoCaS area. +status: Leadership +osg: + title: OSG Technology Lead + website: "https://opensciencegrid.org" + promoted: true + weight: 3 +pelican: + title: Principal Investigator + weight: 1 +organizations: + - path + - chtc + - osg + - pelican diff --git a/preview-fall2024-info/staff-list/brian_lin.yml b/preview-fall2024-info/staff-list/brian_lin.yml new file mode 100644 index 000000000..7035f18d8 --- /dev/null +++ b/preview-fall2024-info/staff-list/brian_lin.yml @@ -0,0 +1,22 @@ +name: "Brian Lin" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/brian_lin.jpg" +title: "Infrastructure Services Lead" +institution: "University of Wisconsin–Madison" +#website: "" +linkedinurl: "" +weight: 5 +status: Staff +chtc: + title: OSG Software Area Coordinator +osg: + title: Software Area Coordinator +pelican: + title: OSG Software Area Coordinator + weight: 13 +organizations: + - path + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/bryna_goeking.yml b/preview-fall2024-info/staff-list/bryna_goeking.yml new file mode 100644 index 000000000..8509a8ca1 --- /dev/null +++ b/preview-fall2024-info/staff-list/bryna_goeking.yml @@ -0,0 +1,10 @@ +name: "Bryna Goeking" +image: "images/bryna_goeking.jpg" +title: "Student Writer" +institution: "Morgridge Institute for Research" +weight: 5 +status: Past +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/cameron_abplanalp.yml b/preview-fall2024-info/staff-list/cameron_abplanalp.yml new file mode 100644 index 000000000..28f2d2d87 --- /dev/null +++ b/preview-fall2024-info/staff-list/cameron_abplanalp.yml @@ -0,0 +1,8 @@ +image: images/cameron_abplanalp.png +institution: University of Wisconsin-Madison +title: Research Computing Facilitation Assistant +name: Cameron Abplanalp +status: Past +website: null +organizations: + - chtc diff --git a/preview-fall2024-info/staff-list/cannon_lock.yml b/preview-fall2024-info/staff-list/cannon_lock.yml new file mode 100644 index 000000000..33692fe6b --- /dev/null +++ b/preview-fall2024-info/staff-list/cannon_lock.yml @@ -0,0 +1,16 @@ +name: "Cannon Lock" +draft: false +image: "images/cannon_lock.jpg" +title: "Web Developer" +institution: "Morgridge Institute for Research" +status: Staff +linkedinurl: "" +weight: 5 +pelican: + title: "Web Developer" + weight: 6 +organizations: + - path + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/chris_lauderbaugh.yml b/preview-fall2024-info/staff-list/chris_lauderbaugh.yml new file mode 100644 index 000000000..d007b42ea --- /dev/null +++ b/preview-fall2024-info/staff-list/chris_lauderbaugh.yml @@ -0,0 +1,9 @@ +image: images/chris_lauderbaugh.jpg +institution: Indiana University +title: Security Analyst +name: Chris Lauderbaugh +status: Staff +website: null +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/christina_koch.yml b/preview-fall2024-info/staff-list/christina_koch.yml new file mode 100644 index 000000000..080136484 --- /dev/null +++ b/preview-fall2024-info/staff-list/christina_koch.yml @@ -0,0 +1,25 @@ +name: "Christina Koch" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/christina_koch.jpg" +title: "Research Facilitator Manager" +institution: "University of Wisconsin - Madison" +website: https://wid.wisc.edu/people/christina-koch/ +is_facilitator: 1 +status: Staff +linkedinurl: "" +weight: 5 +chtc: + title: Lead Research Computing Facilitator +pelican: + title: Lead Research Computing Facilitator + weight: 14 +osg: + title: OSG Research Facilitation Lead + promoted: true + weight: 7 +organizations: + - path + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/colby_walsworth.yml b/preview-fall2024-info/staff-list/colby_walsworth.yml new file mode 100644 index 000000000..8b803727c --- /dev/null +++ b/preview-fall2024-info/staff-list/colby_walsworth.yml @@ -0,0 +1,9 @@ +name: "Colby Walsworth" +image: "images/colby_walsworth.jpg" +title: "Software Integration Developer" +status: Staff +institution: "University of California - San Diego" +weight: 5 +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/cole_bollig.yml b/preview-fall2024-info/staff-list/cole_bollig.yml new file mode 100644 index 000000000..fabeb4f59 --- /dev/null +++ b/preview-fall2024-info/staff-list/cole_bollig.yml @@ -0,0 +1,10 @@ +name: "Cole Bollig" +status: Staff +image: "images/cole_bollig.jpg" +title: "Systems Software Developer" +institution: "University of Wisconsin - Madison" +chtc: + title: HTCondor Core Developer +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/cristina_encarnacion.yml b/preview-fall2024-info/staff-list/cristina_encarnacion.yml new file mode 100644 index 000000000..09d88acdf --- /dev/null +++ b/preview-fall2024-info/staff-list/cristina_encarnacion.yml @@ -0,0 +1,12 @@ +name: "Cristina Encarnacion" +image: "images/cristina_encarnacion.jpeg" +title: "Student Science Writer" +institution: "Morgridge Institute for Research" +website: null +weight: 3 +status: Student +organizations: + - path + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/david_baik.yaml b/preview-fall2024-info/staff-list/david_baik.yaml new file mode 100644 index 000000000..2a67fb236 --- /dev/null +++ b/preview-fall2024-info/staff-list/david_baik.yaml @@ -0,0 +1,8 @@ +image: images/default.jpg +institution: University of Wisconsin-Madison +title: System Administrator +name: David Baik +status: Staff +website: null +organizations: + - chtc diff --git a/preview-fall2024-info/staff-list/david_jordan.yml b/preview-fall2024-info/staff-list/david_jordan.yml new file mode 100644 index 000000000..48ea8a8aa --- /dev/null +++ b/preview-fall2024-info/staff-list/david_jordan.yml @@ -0,0 +1,7 @@ +name: David Jordan +image: "images/david_jordan.jpg" +title: "Systems Administrator" +status: Staff +institution: "University of Chicago" +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/derek_weitzel.yml b/preview-fall2024-info/staff-list/derek_weitzel.yml new file mode 100644 index 000000000..80a3a329f --- /dev/null +++ b/preview-fall2024-info/staff-list/derek_weitzel.yml @@ -0,0 +1,15 @@ +name: "Derek Weitzel" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/derek_weitzel.png" +title: "Institutional PI" +institution: "University of Nebraska-Lincoln" +status: Staff +website: "https://derekweitzel.com" +description: Derek Weitzel is an Assistant Research Professor at the Univeristy of Nebraska-Lincoln's Computer Science and Engineering Department. +osg: + title: Software Integration Developer + website: https://github.com/djw8605 +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/emile_turatsinze.yml b/preview-fall2024-info/staff-list/emile_turatsinze.yml new file mode 100644 index 000000000..ae8c8fe1d --- /dev/null +++ b/preview-fall2024-info/staff-list/emile_turatsinze.yml @@ -0,0 +1,7 @@ +image: images/emile_turatsinze.jpg +institution: Morgridge Institute for Research +title: Systems Administrator +name: Emile Turatsinze +status: Staff +organizations: + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/emily_yao.yml b/preview-fall2024-info/staff-list/emily_yao.yml new file mode 100644 index 000000000..44d2441d3 --- /dev/null +++ b/preview-fall2024-info/staff-list/emily_yao.yml @@ -0,0 +1,7 @@ +image: images/emily_yao.jpg +institution: University on Wisconsin-Madison +title: System Administrator Intern +name: Emily Yao +status: Past +organizations: + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/emma_turetsky.yml b/preview-fall2024-info/staff-list/emma_turetsky.yml new file mode 100644 index 000000000..a46affc3e --- /dev/null +++ b/preview-fall2024-info/staff-list/emma_turetsky.yml @@ -0,0 +1,10 @@ +image: images/emma_turetsky.jpg +institution: Morgridge Institute for Research +title: Research Software Engineer +name: Emma Turetsky +status: Staff +pelican: + weight: 7 +organizations: + - chtc + - pelican diff --git a/preview-fall2024-info/staff-list/ewa_deelman.yml b/preview-fall2024-info/staff-list/ewa_deelman.yml new file mode 100644 index 000000000..e1619983f --- /dev/null +++ b/preview-fall2024-info/staff-list/ewa_deelman.yml @@ -0,0 +1,6 @@ +name: "Ewa Deelman" +image: "images/ewa_deelman.jpeg" +title: "Institutional PI" +institution: "University of Southern California" +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/fabio_andrijauska.yml b/preview-fall2024-info/staff-list/fabio_andrijauska.yml new file mode 100644 index 000000000..9239314f9 --- /dev/null +++ b/preview-fall2024-info/staff-list/fabio_andrijauska.yml @@ -0,0 +1,6 @@ +name: Fabio Andrijauskas +image: "images/fabio_andrijauskas.jpeg" +title: "Senior Software Developer" +institution: "University of California San Diego" +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/farnaz_golnaraghi.yml b/preview-fall2024-info/staff-list/farnaz_golnaraghi.yml new file mode 100644 index 000000000..1811e84c2 --- /dev/null +++ b/preview-fall2024-info/staff-list/farnaz_golnaraghi.yml @@ -0,0 +1,6 @@ +name: "Farnaz Golnaraghi" +image: "images/farnaz_golnaraghi.jpeg" +title: "Systems Administrator" +institution: "University of Chicago" +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/frank_wuerthwein.yml b/preview-fall2024-info/staff-list/frank_wuerthwein.yml new file mode 100644 index 000000000..e55dc7a69 --- /dev/null +++ b/preview-fall2024-info/staff-list/frank_wuerthwein.yml @@ -0,0 +1,21 @@ +name: "Frank Wuerthwein" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/frank_wuerthwein.jpg" +title: "OSG Executive Director" +website: +institution: "University of California San Diego" +promoted: true +weight: 2 +description: Wuerthwein is a Professor of Physics at UCSD and the Executive Director of the OSG. +pelican: + title: Co-Principal Investigator + weight: 3 +osg: + title: OSG Executive Director + promoted: true + weight: 2 +organizations: + - path + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/frank_zhang.yaml b/preview-fall2024-info/staff-list/frank_zhang.yaml new file mode 100644 index 000000000..26443fda9 --- /dev/null +++ b/preview-fall2024-info/staff-list/frank_zhang.yaml @@ -0,0 +1,8 @@ +image: images/default.jpg +institution: University of Wisconsin-Madison +title: System Administrator Intern +name: Frank Zhang +status: Student +website: null +organizations: + - chtc diff --git a/preview-fall2024-info/staff-list/greg_thain.yml b/preview-fall2024-info/staff-list/greg_thain.yml new file mode 100644 index 000000000..a7635ff30 --- /dev/null +++ b/preview-fall2024-info/staff-list/greg_thain.yml @@ -0,0 +1,14 @@ +name: "Greg Thain" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/greg_thain.jpg" +title: "Senior Systems Software Developer" +#website: "" +institution: "University of Wisconsin-Madison" +status: Staff +weight: 5 +chtc: + title: HTCondor Core Developer +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/hannah_cheren.yml b/preview-fall2024-info/staff-list/hannah_cheren.yml new file mode 100644 index 000000000..a94b6206b --- /dev/null +++ b/preview-fall2024-info/staff-list/hannah_cheren.yml @@ -0,0 +1,12 @@ +name: "Hannah Cheren" +date: 2021-11-017T09:00:00+10:00 +draft: false +image: "images/hannah_cheren.jpg" +title: "Communications Specialist" +institution: "University of Wisconsin–Madison" +#website: "" +linkedinurl: "" +weight: 5 +organizations: + - path +status: Past \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/haoming_meng.yml b/preview-fall2024-info/staff-list/haoming_meng.yml new file mode 100644 index 000000000..4803f26f3 --- /dev/null +++ b/preview-fall2024-info/staff-list/haoming_meng.yml @@ -0,0 +1,11 @@ +image: images/haoming_meng.jpg +institution: Morgridge Institute For Research +title: Research Software Engineer +name: Haoming Meng +status: Past +website: null +pelican: + weight: 12 +organizations: + - chtc + - pelican diff --git a/preview-fall2024-info/staff-list/ian_ross.yml b/preview-fall2024-info/staff-list/ian_ross.yml new file mode 100644 index 000000000..a0c5ef4df --- /dev/null +++ b/preview-fall2024-info/staff-list/ian_ross.yml @@ -0,0 +1,7 @@ +image: images/ian_ross.jpg +institution: University of Wisconsin-Madison +title: Systems Integration Developer +name: Ian Ross +status: Staff +organizations: + - chtc diff --git a/preview-fall2024-info/staff-list/igor_sfiligoi.yml b/preview-fall2024-info/staff-list/igor_sfiligoi.yml new file mode 100644 index 000000000..2562325cc --- /dev/null +++ b/preview-fall2024-info/staff-list/igor_sfiligoi.yml @@ -0,0 +1,11 @@ +name: "Igor Sfiligoi" +date: 2020-09-28T05:00:00-05:00 +draft: false +image: "images/igor_sfiligoi.jpg" +title: "Lead Scientific Software Developer and Researcher" +institution: "University of California San Diego" +#website: "" +linkedinurl: "https://www.linkedin.com/in/igor-sfiligoi-73982a78/" +weight: 5 +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/images/aaron_moate.png b/preview-fall2024-info/staff-list/images/aaron_moate.png new file mode 100644 index 000000000..036d9098c Binary files /dev/null and b/preview-fall2024-info/staff-list/images/aaron_moate.png differ diff --git a/preview-fall2024-info/staff-list/images/aaryan_patel.jpeg b/preview-fall2024-info/staff-list/images/aaryan_patel.jpeg new file mode 100644 index 000000000..223d00609 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/aaryan_patel.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/abhinandan_saha.jpg b/preview-fall2024-info/staff-list/images/abhinandan_saha.jpg new file mode 100644 index 000000000..9a9adc10f Binary files /dev/null and b/preview-fall2024-info/staff-list/images/abhinandan_saha.jpg differ diff --git a/preview-fall2024-info/staff-list/images/adrian_crenshaw.jpeg b/preview-fall2024-info/staff-list/images/adrian_crenshaw.jpeg new file mode 100644 index 000000000..a69b9cb4b Binary files /dev/null and b/preview-fall2024-info/staff-list/images/adrian_crenshaw.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/alja_tadel.jpg b/preview-fall2024-info/staff-list/images/alja_tadel.jpg new file mode 100644 index 000000000..2b1db9b86 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/alja_tadel.jpg differ diff --git a/preview-fall2024-info/staff-list/images/alperen_bakirci.jpg b/preview-fall2024-info/staff-list/images/alperen_bakirci.jpg new file mode 100644 index 000000000..a9c1186e3 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/alperen_bakirci.jpg differ diff --git a/preview-fall2024-info/staff-list/images/amber_lim.jpg b/preview-fall2024-info/staff-list/images/amber_lim.jpg new file mode 100644 index 000000000..4e5f2162d Binary files /dev/null and b/preview-fall2024-info/staff-list/images/amber_lim.jpg differ diff --git a/preview-fall2024-info/staff-list/images/andrew_owen.jpg b/preview-fall2024-info/staff-list/images/andrew_owen.jpg new file mode 100644 index 000000000..4617647df Binary files /dev/null and b/preview-fall2024-info/staff-list/images/andrew_owen.jpg differ diff --git a/preview-fall2024-info/staff-list/images/ashton_graves.jpeg b/preview-fall2024-info/staff-list/images/ashton_graves.jpeg new file mode 100644 index 000000000..06e38c7d9 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/ashton_graves.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/ben_staehle.jpg b/preview-fall2024-info/staff-list/images/ben_staehle.jpg new file mode 100644 index 000000000..c3e7c0d85 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/ben_staehle.jpg differ diff --git a/preview-fall2024-info/staff-list/images/bocheng_zou.png b/preview-fall2024-info/staff-list/images/bocheng_zou.png new file mode 100644 index 000000000..9d49e5f85 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/bocheng_zou.png differ diff --git a/preview-fall2024-info/staff-list/images/brian_aydemir.jpeg b/preview-fall2024-info/staff-list/images/brian_aydemir.jpeg new file mode 100644 index 000000000..7cd690dcb Binary files /dev/null and b/preview-fall2024-info/staff-list/images/brian_aydemir.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/brian_bockelman.jpg b/preview-fall2024-info/staff-list/images/brian_bockelman.jpg new file mode 100644 index 000000000..0ebb6eb0d Binary files /dev/null and b/preview-fall2024-info/staff-list/images/brian_bockelman.jpg differ diff --git a/preview-fall2024-info/staff-list/images/brian_lin.jpg b/preview-fall2024-info/staff-list/images/brian_lin.jpg new file mode 100644 index 000000000..8fa6934ec Binary files /dev/null and b/preview-fall2024-info/staff-list/images/brian_lin.jpg differ diff --git a/preview-fall2024-info/staff-list/images/bryna_goeking.jpg b/preview-fall2024-info/staff-list/images/bryna_goeking.jpg new file mode 100644 index 000000000..69f1d7fbb Binary files /dev/null and b/preview-fall2024-info/staff-list/images/bryna_goeking.jpg differ diff --git a/preview-fall2024-info/staff-list/images/cameron_abplanalp.png b/preview-fall2024-info/staff-list/images/cameron_abplanalp.png new file mode 100644 index 000000000..982b1977e Binary files /dev/null and b/preview-fall2024-info/staff-list/images/cameron_abplanalp.png differ diff --git a/preview-fall2024-info/staff-list/images/cannon_lock.jpg b/preview-fall2024-info/staff-list/images/cannon_lock.jpg new file mode 100644 index 000000000..31afa7caf Binary files /dev/null and b/preview-fall2024-info/staff-list/images/cannon_lock.jpg differ diff --git a/preview-fall2024-info/staff-list/images/chris_lauderbaugh.jpg b/preview-fall2024-info/staff-list/images/chris_lauderbaugh.jpg new file mode 100644 index 000000000..74a235369 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/chris_lauderbaugh.jpg differ diff --git a/preview-fall2024-info/staff-list/images/christina_koch.jpg b/preview-fall2024-info/staff-list/images/christina_koch.jpg new file mode 100644 index 000000000..455bad094 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/christina_koch.jpg differ diff --git a/preview-fall2024-info/staff-list/images/colby_walsworth.jpg b/preview-fall2024-info/staff-list/images/colby_walsworth.jpg new file mode 100644 index 000000000..c39ac9e3c Binary files /dev/null and b/preview-fall2024-info/staff-list/images/colby_walsworth.jpg differ diff --git a/preview-fall2024-info/staff-list/images/cole_bollig.jpg b/preview-fall2024-info/staff-list/images/cole_bollig.jpg new file mode 100644 index 000000000..f6c1052ca Binary files /dev/null and b/preview-fall2024-info/staff-list/images/cole_bollig.jpg differ diff --git a/preview-fall2024-info/staff-list/images/cristina_encarnacion.jpeg b/preview-fall2024-info/staff-list/images/cristina_encarnacion.jpeg new file mode 100644 index 000000000..63c6af413 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/cristina_encarnacion.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/david_jordan.jpg b/preview-fall2024-info/staff-list/images/david_jordan.jpg new file mode 100644 index 000000000..07dd7969c Binary files /dev/null and b/preview-fall2024-info/staff-list/images/david_jordan.jpg differ diff --git a/preview-fall2024-info/staff-list/images/default.jpg b/preview-fall2024-info/staff-list/images/default.jpg new file mode 100644 index 000000000..e42186f37 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/default.jpg differ diff --git a/preview-fall2024-info/staff-list/images/derek_weitzel.png b/preview-fall2024-info/staff-list/images/derek_weitzel.png new file mode 100644 index 000000000..e46c6b25f Binary files /dev/null and b/preview-fall2024-info/staff-list/images/derek_weitzel.png differ diff --git a/preview-fall2024-info/staff-list/images/emile_turatsinze.jpg b/preview-fall2024-info/staff-list/images/emile_turatsinze.jpg new file mode 100644 index 000000000..207ba1cce Binary files /dev/null and b/preview-fall2024-info/staff-list/images/emile_turatsinze.jpg differ diff --git a/preview-fall2024-info/staff-list/images/emily_yao.jpg b/preview-fall2024-info/staff-list/images/emily_yao.jpg new file mode 100644 index 000000000..da2a80f13 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/emily_yao.jpg differ diff --git a/preview-fall2024-info/staff-list/images/emma_turetsky.jpg b/preview-fall2024-info/staff-list/images/emma_turetsky.jpg new file mode 100644 index 000000000..631ecfce4 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/emma_turetsky.jpg differ diff --git a/preview-fall2024-info/staff-list/images/ewa_deelman.jpeg b/preview-fall2024-info/staff-list/images/ewa_deelman.jpeg new file mode 100644 index 000000000..774d860a4 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/ewa_deelman.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/fabio_andrijauskas.jpeg b/preview-fall2024-info/staff-list/images/fabio_andrijauskas.jpeg new file mode 100644 index 000000000..c3fb45426 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/fabio_andrijauskas.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/farnaz_golnaraghi.jpeg b/preview-fall2024-info/staff-list/images/farnaz_golnaraghi.jpeg new file mode 100644 index 000000000..feb787e63 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/farnaz_golnaraghi.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/frank_wuerthwein.jpg b/preview-fall2024-info/staff-list/images/frank_wuerthwein.jpg new file mode 100644 index 000000000..bc5cb071a Binary files /dev/null and b/preview-fall2024-info/staff-list/images/frank_wuerthwein.jpg differ diff --git a/preview-fall2024-info/staff-list/images/greg_thain.jpg b/preview-fall2024-info/staff-list/images/greg_thain.jpg new file mode 100644 index 000000000..10fc4785d Binary files /dev/null and b/preview-fall2024-info/staff-list/images/greg_thain.jpg differ diff --git a/preview-fall2024-info/staff-list/images/hannah_cheren.jpg b/preview-fall2024-info/staff-list/images/hannah_cheren.jpg new file mode 100644 index 000000000..5dd58aed6 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/hannah_cheren.jpg differ diff --git a/preview-fall2024-info/staff-list/images/haoming_meng.jpg b/preview-fall2024-info/staff-list/images/haoming_meng.jpg new file mode 100644 index 000000000..487f5322b Binary files /dev/null and b/preview-fall2024-info/staff-list/images/haoming_meng.jpg differ diff --git a/preview-fall2024-info/staff-list/images/ian_ross.jpg b/preview-fall2024-info/staff-list/images/ian_ross.jpg new file mode 100644 index 000000000..f5467bb6f Binary files /dev/null and b/preview-fall2024-info/staff-list/images/ian_ross.jpg differ diff --git a/preview-fall2024-info/staff-list/images/igor_sfiligoi.jpg b/preview-fall2024-info/staff-list/images/igor_sfiligoi.jpg new file mode 100644 index 000000000..6c901b2ff Binary files /dev/null and b/preview-fall2024-info/staff-list/images/igor_sfiligoi.jpg differ diff --git a/preview-fall2024-info/staff-list/images/irene_landrum.png b/preview-fall2024-info/staff-list/images/irene_landrum.png new file mode 100644 index 000000000..6bce28d19 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/irene_landrum.png differ diff --git a/preview-fall2024-info/staff-list/images/jaime_frey.jpg b/preview-fall2024-info/staff-list/images/jaime_frey.jpg new file mode 100644 index 000000000..0c96f694a Binary files /dev/null and b/preview-fall2024-info/staff-list/images/jaime_frey.jpg differ diff --git a/preview-fall2024-info/staff-list/images/janet_stathas.jpg b/preview-fall2024-info/staff-list/images/janet_stathas.jpg new file mode 100644 index 000000000..88b938689 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/janet_stathas.jpg differ diff --git a/preview-fall2024-info/staff-list/images/jason_patton.png b/preview-fall2024-info/staff-list/images/jason_patton.png new file mode 100644 index 000000000..63b5e2471 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/jason_patton.png differ diff --git a/preview-fall2024-info/staff-list/images/jeff_dost.jpg b/preview-fall2024-info/staff-list/images/jeff_dost.jpg new file mode 100644 index 000000000..e5adc8ac1 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/jeff_dost.jpg differ diff --git a/preview-fall2024-info/staff-list/images/jeff_peterson.jpg b/preview-fall2024-info/staff-list/images/jeff_peterson.jpg new file mode 100644 index 000000000..03f212b95 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/jeff_peterson.jpg differ diff --git a/preview-fall2024-info/staff-list/images/jeronimo_bezerra.jpeg b/preview-fall2024-info/staff-list/images/jeronimo_bezerra.jpeg new file mode 100644 index 000000000..83f194517 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/jeronimo_bezerra.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/joe_bartkowiak.jpg b/preview-fall2024-info/staff-list/images/joe_bartkowiak.jpg new file mode 100644 index 000000000..391d81da6 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/joe_bartkowiak.jpg differ diff --git a/preview-fall2024-info/staff-list/images/joe_reuss.jpeg b/preview-fall2024-info/staff-list/images/joe_reuss.jpeg new file mode 100644 index 000000000..40e2f80aa Binary files /dev/null and b/preview-fall2024-info/staff-list/images/joe_reuss.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/john_knoeller.jpg b/preview-fall2024-info/staff-list/images/john_knoeller.jpg new file mode 100644 index 000000000..eda38ee91 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/john_knoeller.jpg differ diff --git a/preview-fall2024-info/staff-list/images/john_parsons.jpeg b/preview-fall2024-info/staff-list/images/john_parsons.jpeg new file mode 100644 index 000000000..27790e88f Binary files /dev/null and b/preview-fall2024-info/staff-list/images/john_parsons.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/john_thiltges.jpg b/preview-fall2024-info/staff-list/images/john_thiltges.jpg new file mode 100644 index 000000000..cae48ccaa Binary files /dev/null and b/preview-fall2024-info/staff-list/images/john_thiltges.jpg differ diff --git a/preview-fall2024-info/staff-list/images/jordan_sklar.jpg b/preview-fall2024-info/staff-list/images/jordan_sklar.jpg new file mode 100644 index 000000000..5f233cea0 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/jordan_sklar.jpg differ diff --git a/preview-fall2024-info/staff-list/images/josh_drake.jpg b/preview-fall2024-info/staff-list/images/josh_drake.jpg new file mode 100644 index 000000000..b1962b9b9 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/josh_drake.jpg differ diff --git a/preview-fall2024-info/staff-list/images/josh_edwards.jpeg b/preview-fall2024-info/staff-list/images/josh_edwards.jpeg new file mode 100644 index 000000000..a7a0db417 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/josh_edwards.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/judith_stephen.jpeg b/preview-fall2024-info/staff-list/images/judith_stephen.jpeg new file mode 100644 index 000000000..e0f658d59 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/judith_stephen.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/julio_ibarra.jpg b/preview-fall2024-info/staff-list/images/julio_ibarra.jpg new file mode 100644 index 000000000..786c26352 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/julio_ibarra.jpg differ diff --git a/preview-fall2024-info/staff-list/images/justin_hiemstra.jpg b/preview-fall2024-info/staff-list/images/justin_hiemstra.jpg new file mode 100644 index 000000000..7a819cdc2 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/justin_hiemstra.jpg differ diff --git a/preview-fall2024-info/staff-list/images/kent_cramer.jpeg b/preview-fall2024-info/staff-list/images/kent_cramer.jpeg new file mode 100644 index 000000000..ac3fd4e9e Binary files /dev/null and b/preview-fall2024-info/staff-list/images/kent_cramer.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/kristina_zhao.jpg b/preview-fall2024-info/staff-list/images/kristina_zhao.jpg new file mode 100644 index 000000000..1cb6e5ac0 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/kristina_zhao.jpg differ diff --git a/preview-fall2024-info/staff-list/images/lili_bicoy.jpg b/preview-fall2024-info/staff-list/images/lili_bicoy.jpg new file mode 100644 index 000000000..bc46a64fb Binary files /dev/null and b/preview-fall2024-info/staff-list/images/lili_bicoy.jpg differ diff --git a/preview-fall2024-info/staff-list/images/matevz_tadel.jpg b/preview-fall2024-info/staff-list/images/matevz_tadel.jpg new file mode 100644 index 000000000..5b89c609f Binary files /dev/null and b/preview-fall2024-info/staff-list/images/matevz_tadel.jpg differ diff --git a/preview-fall2024-info/staff-list/images/mats_rynge.jpg b/preview-fall2024-info/staff-list/images/mats_rynge.jpg new file mode 100644 index 000000000..c2f526df7 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/mats_rynge.jpg differ diff --git a/preview-fall2024-info/staff-list/images/matt_westphall.jpeg b/preview-fall2024-info/staff-list/images/matt_westphall.jpeg new file mode 100644 index 000000000..3857eca52 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/matt_westphall.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/matyas_selmeci.jpg b/preview-fall2024-info/staff-list/images/matyas_selmeci.jpg new file mode 100644 index 000000000..7fff6f84f Binary files /dev/null and b/preview-fall2024-info/staff-list/images/matyas_selmeci.jpg differ diff --git a/preview-fall2024-info/staff-list/images/max_hartke.jpg b/preview-fall2024-info/staff-list/images/max_hartke.jpg new file mode 100644 index 000000000..d984af3da Binary files /dev/null and b/preview-fall2024-info/staff-list/images/max_hartke.jpg differ diff --git a/preview-fall2024-info/staff-list/images/michael_collins.png b/preview-fall2024-info/staff-list/images/michael_collins.png new file mode 100644 index 000000000..192afd5cf Binary files /dev/null and b/preview-fall2024-info/staff-list/images/michael_collins.png differ diff --git a/preview-fall2024-info/staff-list/images/mihir_manna.jpeg b/preview-fall2024-info/staff-list/images/mihir_manna.jpeg new file mode 100644 index 000000000..5542ed825 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/mihir_manna.jpeg differ diff --git a/preview-fall2024-info/staff-list/images/miron_livny.png b/preview-fall2024-info/staff-list/images/miron_livny.png new file mode 100644 index 000000000..a762f7e22 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/miron_livny.png differ diff --git a/preview-fall2024-info/staff-list/images/molly_mccarthy.jpg b/preview-fall2024-info/staff-list/images/molly_mccarthy.jpg new file mode 100644 index 000000000..7653df08a Binary files /dev/null and b/preview-fall2024-info/staff-list/images/molly_mccarthy.jpg differ diff --git a/preview-fall2024-info/staff-list/images/neha_talluri.jpg b/preview-fall2024-info/staff-list/images/neha_talluri.jpg new file mode 100644 index 000000000..464b953d4 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/neha_talluri.jpg differ diff --git a/preview-fall2024-info/staff-list/images/pascal_paschos.png b/preview-fall2024-info/staff-list/images/pascal_paschos.png new file mode 100644 index 000000000..845c783d1 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/pascal_paschos.png differ diff --git a/preview-fall2024-info/staff-list/images/patrick_brophy.jpg b/preview-fall2024-info/staff-list/images/patrick_brophy.jpg new file mode 100644 index 000000000..5cda328f1 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/patrick_brophy.jpg differ diff --git a/preview-fall2024-info/staff-list/images/pratham_patel.jpg b/preview-fall2024-info/staff-list/images/pratham_patel.jpg new file mode 100644 index 000000000..5d215fcf4 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/pratham_patel.jpg differ diff --git a/preview-fall2024-info/staff-list/images/rachel_lombardi.jpg b/preview-fall2024-info/staff-list/images/rachel_lombardi.jpg new file mode 100644 index 000000000..acac1723d Binary files /dev/null and b/preview-fall2024-info/staff-list/images/rachel_lombardi.jpg differ diff --git a/preview-fall2024-info/staff-list/images/rich_wellner.jpg b/preview-fall2024-info/staff-list/images/rich_wellner.jpg new file mode 100644 index 000000000..015215129 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/rich_wellner.jpg differ diff --git a/preview-fall2024-info/staff-list/images/rishideep_rallabandi.jpg b/preview-fall2024-info/staff-list/images/rishideep_rallabandi.jpg new file mode 100644 index 000000000..ba7c415c6 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/rishideep_rallabandi.jpg differ diff --git a/preview-fall2024-info/staff-list/images/rob_gardner.jpg b/preview-fall2024-info/staff-list/images/rob_gardner.jpg new file mode 100644 index 000000000..70efb9ed0 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/rob_gardner.jpg differ diff --git a/preview-fall2024-info/staff-list/images/ryan_boone.jpg b/preview-fall2024-info/staff-list/images/ryan_boone.jpg new file mode 100644 index 000000000..10de5fcb5 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/ryan_boone.jpg differ diff --git a/preview-fall2024-info/staff-list/images/ryan_jacob.jpg b/preview-fall2024-info/staff-list/images/ryan_jacob.jpg new file mode 100644 index 000000000..2545d5821 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/ryan_jacob.jpg differ diff --git a/preview-fall2024-info/staff-list/images/shawn_mckee.jpg b/preview-fall2024-info/staff-list/images/shawn_mckee.jpg new file mode 100644 index 000000000..6c388489e Binary files /dev/null and b/preview-fall2024-info/staff-list/images/shawn_mckee.jpg differ diff --git a/preview-fall2024-info/staff-list/images/shirley_obih.jpg b/preview-fall2024-info/staff-list/images/shirley_obih.jpg new file mode 100644 index 000000000..1d629fb62 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/shirley_obih.jpg differ diff --git a/preview-fall2024-info/staff-list/images/showmic_islam.jpg b/preview-fall2024-info/staff-list/images/showmic_islam.jpg new file mode 100644 index 000000000..cf2aa79ea Binary files /dev/null and b/preview-fall2024-info/staff-list/images/showmic_islam.jpg differ diff --git a/preview-fall2024-info/staff-list/images/susan_sons.jpg b/preview-fall2024-info/staff-list/images/susan_sons.jpg new file mode 100644 index 000000000..e0a19a647 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/susan_sons.jpg differ diff --git a/preview-fall2024-info/staff-list/images/tae_kidd.jpg b/preview-fall2024-info/staff-list/images/tae_kidd.jpg new file mode 100644 index 000000000..21c6f31b9 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/tae_kidd.jpg differ diff --git a/preview-fall2024-info/staff-list/images/theng_vang.jpg b/preview-fall2024-info/staff-list/images/theng_vang.jpg new file mode 100644 index 000000000..3f272bb37 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/theng_vang.jpg differ diff --git a/preview-fall2024-info/staff-list/images/thinh_nguyen.jpg b/preview-fall2024-info/staff-list/images/thinh_nguyen.jpg new file mode 100644 index 000000000..3a2a04971 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/thinh_nguyen.jpg differ diff --git a/preview-fall2024-info/staff-list/images/tim_cartwright.jpg b/preview-fall2024-info/staff-list/images/tim_cartwright.jpg new file mode 100644 index 000000000..5a6ac61b8 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/tim_cartwright.jpg differ diff --git a/preview-fall2024-info/staff-list/images/tim_theisen.png b/preview-fall2024-info/staff-list/images/tim_theisen.png new file mode 100644 index 000000000..854b4d2da Binary files /dev/null and b/preview-fall2024-info/staff-list/images/tim_theisen.png differ diff --git a/preview-fall2024-info/staff-list/images/todd_miller.png b/preview-fall2024-info/staff-list/images/todd_miller.png new file mode 100644 index 000000000..3f41dcc17 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/todd_miller.png differ diff --git a/preview-fall2024-info/staff-list/images/todd_tannenbaum.jpg b/preview-fall2024-info/staff-list/images/todd_tannenbaum.jpg new file mode 100644 index 000000000..5b3eefd2b Binary files /dev/null and b/preview-fall2024-info/staff-list/images/todd_tannenbaum.jpg differ diff --git a/preview-fall2024-info/staff-list/images/wil_cram.jpg b/preview-fall2024-info/staff-list/images/wil_cram.jpg new file mode 100644 index 000000000..658c07e8a Binary files /dev/null and b/preview-fall2024-info/staff-list/images/wil_cram.jpg differ diff --git a/preview-fall2024-info/staff-list/images/william_swanson.jpg b/preview-fall2024-info/staff-list/images/william_swanson.jpg new file mode 100644 index 000000000..dc92bbf33 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/william_swanson.jpg differ diff --git a/preview-fall2024-info/staff-list/images/yuxiao.jpg b/preview-fall2024-info/staff-list/images/yuxiao.jpg new file mode 100644 index 000000000..344e0bf88 Binary files /dev/null and b/preview-fall2024-info/staff-list/images/yuxiao.jpg differ diff --git a/preview-fall2024-info/staff-list/irene_landrum.yml b/preview-fall2024-info/staff-list/irene_landrum.yml new file mode 100644 index 000000000..9dbf367f9 --- /dev/null +++ b/preview-fall2024-info/staff-list/irene_landrum.yml @@ -0,0 +1,13 @@ +name: "Irene Landrum" +date: 2020-09-25T10:47:58+10:00 +draft: false +image: "images/irene_landrum.png" +title: "Project Manager" +#website: "" +institution: "Morgridge Institute for Research" +weight: 5 +status: Staff +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/jaime_frey.yml b/preview-fall2024-info/staff-list/jaime_frey.yml new file mode 100644 index 000000000..8d04d334d --- /dev/null +++ b/preview-fall2024-info/staff-list/jaime_frey.yml @@ -0,0 +1,14 @@ +name: "Jaime Frey" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/jaime_frey.jpg" +title: "Senior Systems Software Developer" +#website: "" +institution: "University of Wisconsin-Madison" +status: Staff +weight: 5 +chtc: + title: HTCondor Core Developer +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/janet_stathas.yml b/preview-fall2024-info/staff-list/janet_stathas.yml new file mode 100644 index 000000000..ecb521df4 --- /dev/null +++ b/preview-fall2024-info/staff-list/janet_stathas.yml @@ -0,0 +1,14 @@ +name: "Janet Stathas" +date: 2020-10-27T10:47:58+10:00 +draft: false +image: "images/janet_stathas.jpg" +title: "Project Manager" +institution: "Morgridge Institute for Research" +#website: "" +linkedinurl: "" +status: Staff +weight: 5 +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/jason_patton.yml b/preview-fall2024-info/staff-list/jason_patton.yml new file mode 100644 index 000000000..f0f91494c --- /dev/null +++ b/preview-fall2024-info/staff-list/jason_patton.yml @@ -0,0 +1,12 @@ +name: "Jason Patton" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/jason_patton.png" +title: "Software Integration Developer" +#website: "" +institution: "University of Wisconsin-Madison" +weight: 5 +status: Staff +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/jeff_dost.yml b/preview-fall2024-info/staff-list/jeff_dost.yml new file mode 100644 index 000000000..dc905c664 --- /dev/null +++ b/preview-fall2024-info/staff-list/jeff_dost.yml @@ -0,0 +1,6 @@ +name: "Jeff Dost" +image: "images/jeff_dost.jpg" +title: "Program Analyst" +institution: "University of California San Diego" +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/jeff_peterson.yml b/preview-fall2024-info/staff-list/jeff_peterson.yml new file mode 100644 index 000000000..57b0768d4 --- /dev/null +++ b/preview-fall2024-info/staff-list/jeff_peterson.yml @@ -0,0 +1,13 @@ +name: "Jeff Peterson" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/jeff_peterson.jpg" +title: "System Administrator" +institution: "Morgridge Institute" +status: Staff +website: http://opensciencegrid.org +weight: 5 +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/jeronimo_bezerra.yml b/preview-fall2024-info/staff-list/jeronimo_bezerra.yml new file mode 100644 index 000000000..a6e1c28db --- /dev/null +++ b/preview-fall2024-info/staff-list/jeronimo_bezerra.yml @@ -0,0 +1,6 @@ +name: "Jeronimo Bezerra" +image: "images/jeronimo_bezerra.jpeg" +title: "Senior Systems Administrator" +institution: "Florida International University" +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/joe_bartkowiak.yml b/preview-fall2024-info/staff-list/joe_bartkowiak.yml new file mode 100644 index 000000000..79ddf5dd7 --- /dev/null +++ b/preview-fall2024-info/staff-list/joe_bartkowiak.yml @@ -0,0 +1,11 @@ +image: images/joe_bartkowiak.jpg +institution: University of Wisconsin Madison +title: Systems Administrator +name: Joe Bartkowiak +shortname: jbartkowiak +status: Staff +website: null +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/joe_reuss.yml b/preview-fall2024-info/staff-list/joe_reuss.yml new file mode 100644 index 000000000..62d5305d8 --- /dev/null +++ b/preview-fall2024-info/staff-list/joe_reuss.yml @@ -0,0 +1,14 @@ +image: images/joe_reuss.jpeg +institution: University of Wisconsin-Madison +title: Software Engineer +name: Joe Reuss +status: Past +website: null +pelican : + title: Software Engineer + weight: 8 +organizations: + - path + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/john_knoeller.yml b/preview-fall2024-info/staff-list/john_knoeller.yml new file mode 100644 index 000000000..efcd15338 --- /dev/null +++ b/preview-fall2024-info/staff-list/john_knoeller.yml @@ -0,0 +1,14 @@ +name: "John TJ Knoeller" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/john_knoeller.jpg" +title: "Systems Software Developer" +status: Staff +#website: "" +institution: "University of Wisconsin-Madison" +weight: 5 +chtc: + title: HTCondor Core Developer +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/john_parsons.yml b/preview-fall2024-info/staff-list/john_parsons.yml new file mode 100644 index 000000000..70cece8d2 --- /dev/null +++ b/preview-fall2024-info/staff-list/john_parsons.yml @@ -0,0 +1,8 @@ +image: images/john_parsons.jpeg +institution: University of Wisconsin Madison +title: System Administrator Intern +name: John Parsons +status: Past +website: null +organizations: + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/john_thiltges.yml b/preview-fall2024-info/staff-list/john_thiltges.yml new file mode 100644 index 000000000..88e7afb06 --- /dev/null +++ b/preview-fall2024-info/staff-list/john_thiltges.yml @@ -0,0 +1,13 @@ +name: "John Thiltges" +shortname: jthiltges +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/john_thiltges.jpg" +title: "Systems Administrator" +institution: "University of Nebraska-Lincoln" +#website: "" +linkedinurl: "" +weight: 5 +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/jordan_sklar.yml b/preview-fall2024-info/staff-list/jordan_sklar.yml new file mode 100644 index 000000000..d9710a24f --- /dev/null +++ b/preview-fall2024-info/staff-list/jordan_sklar.yml @@ -0,0 +1,12 @@ +name: "Jordan Sklar" +image: "images/jordan_sklar.jpg" +title: "Student Science Writer" +institution: "Morgridge Institute for Research" +website: null +weight: 3 +status: Student +organizations: + - path + - chtc + - osg + - pelican diff --git a/preview-fall2024-info/staff-list/josh_drake.yml b/preview-fall2024-info/staff-list/josh_drake.yml new file mode 100644 index 000000000..95da05fee --- /dev/null +++ b/preview-fall2024-info/staff-list/josh_drake.yml @@ -0,0 +1,14 @@ +name: "Josh Drake" +date: 2021-07-20T09:00:00+10:00 +draft: false +image: "images/josh_drake.jpg" +title: "Institutional PI" +institution: "Indiana University" +website: https://cacr.iu.edu/about/people/Josh_Drake.html +osg: + title: OSG Information Security Officer + promoted: true + weight: 6 +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/josh_edwards.yml b/preview-fall2024-info/staff-list/josh_edwards.yml new file mode 100644 index 000000000..6b5f23d26 --- /dev/null +++ b/preview-fall2024-info/staff-list/josh_edwards.yml @@ -0,0 +1,9 @@ +image: images/josh_edwards.jpeg +institution: Indiana University +title: Security Analyst +name: Josh Edwards +status: Staff +website: null +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/judith_stephen.yml b/preview-fall2024-info/staff-list/judith_stephen.yml new file mode 100644 index 000000000..f58644ca7 --- /dev/null +++ b/preview-fall2024-info/staff-list/judith_stephen.yml @@ -0,0 +1,6 @@ +name: "Judith Stephen" +image: "images/judith_stephen.jpeg" +title: "Systems Administrator" +institution: "University of Chicago" +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/julio_ibarra.yml b/preview-fall2024-info/staff-list/julio_ibarra.yml new file mode 100644 index 000000000..f1b716568 --- /dev/null +++ b/preview-fall2024-info/staff-list/julio_ibarra.yml @@ -0,0 +1,6 @@ +name: "Julio Ibarra" +image: "images/julio_ibarra.jpg" +title: "Institutional PI" +institution: "Florida International University" +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/justin_hiemstra.yml b/preview-fall2024-info/staff-list/justin_hiemstra.yml new file mode 100644 index 000000000..4e3386d48 --- /dev/null +++ b/preview-fall2024-info/staff-list/justin_hiemstra.yml @@ -0,0 +1,12 @@ +image: images/justin_hiemstra.jpg +institution: Morgridge Institute For Research +title: Research Software Engineer +name: Justin Hiemstra +status: Staff +website: null +pelican: + weight: 5 +organizations: + - chtc + - osg + - pelican diff --git a/preview-fall2024-info/staff-list/kent_cramer_iii.yml b/preview-fall2024-info/staff-list/kent_cramer_iii.yml new file mode 100644 index 000000000..ebd3e4c1b --- /dev/null +++ b/preview-fall2024-info/staff-list/kent_cramer_iii.yml @@ -0,0 +1,7 @@ +image: images/kent_cramer.jpeg +institution: Morgridge Institute For Research +title: Network Infrastructure Support Specialist +name: Kent Cramer III +status: Staff +organizations: + - chtc diff --git a/preview-fall2024-info/staff-list/kristina_zhao.yml b/preview-fall2024-info/staff-list/kristina_zhao.yml new file mode 100644 index 000000000..c65e8951d --- /dev/null +++ b/preview-fall2024-info/staff-list/kristina_zhao.yml @@ -0,0 +1,25 @@ +name: Kristina Zhao +title: Fellow +institution: Morgridge Institute for Research +status: Student +organizations: + - chtc +image: images/kristina_zhao.jpg + + +fellowship: + name: Integrating PyTorch and Pelican + description: | + PyTorch is one of the most popular machine learning frameworks. + An important aspect of using it is the data engineering: how + is input data fed into the model during training? Going from + “tutorial scale” problems to cutting-edge research requires + drastically different techniques around data handling. + + For this project, we aim to better integrate Pelican + into the PyTorch community, providing both technical + mechanisms (implementing the fsspec interface for Pelican) + and documentation by providing tutorials and recipes for + scaling PyTorch-based training using a combination of HTCondor + and Pelican. + mentor: Emma Turetsky and Ian Ross diff --git a/preview-fall2024-info/staff-list/lili_bicoy.yml b/preview-fall2024-info/staff-list/lili_bicoy.yml new file mode 100644 index 000000000..3afdf47a6 --- /dev/null +++ b/preview-fall2024-info/staff-list/lili_bicoy.yml @@ -0,0 +1,12 @@ +image: images/lili_bicoy.jpg +institution: Morgridge Institute For Research +title: Student Science Writer +name: Lili Bicoy +status: Past +website: null +pelican: + weight: 17 +organizations: + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/marissa_zhang.yaml b/preview-fall2024-info/staff-list/marissa_zhang.yaml new file mode 100644 index 000000000..b2ae31bb6 --- /dev/null +++ b/preview-fall2024-info/staff-list/marissa_zhang.yaml @@ -0,0 +1,8 @@ +image: images/default.jpg +institution: University of Wisconsin-Madison +title: System Administrator Intern +name: Marissa (Yujia) Zhang +status: Student +website: null +organizations: + - chtc diff --git a/preview-fall2024-info/staff-list/matevz_tadel.yml b/preview-fall2024-info/staff-list/matevz_tadel.yml new file mode 100644 index 000000000..5833a4188 --- /dev/null +++ b/preview-fall2024-info/staff-list/matevz_tadel.yml @@ -0,0 +1,10 @@ +image: images/matevz_tadel.jpg +institution: University of California San Diego +title: Project Scientist +name: Matevz Tadel +status: Staff +website: null +pelican: + weight: 10 +organizations: + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/mats_rynge.yml b/preview-fall2024-info/staff-list/mats_rynge.yml new file mode 100644 index 000000000..6d7e7b14d --- /dev/null +++ b/preview-fall2024-info/staff-list/mats_rynge.yml @@ -0,0 +1,13 @@ +name: "Mats Rynge" +shortname: rynge +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/mats_rynge.jpg" +title: "Systems Integrator" +institution: "University of Southern California - Information Sciences Institute" +#website: "" +linkedinurl: "" +weight: 5 +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/matt_westphall.yml b/preview-fall2024-info/staff-list/matt_westphall.yml new file mode 100644 index 000000000..9905020a0 --- /dev/null +++ b/preview-fall2024-info/staff-list/matt_westphall.yml @@ -0,0 +1,10 @@ +image: images/matt_westphall.jpeg +institution: University of Wisconsin-Madison +title: Research Cyberinfrastructure Specialist +name: Matt Westphall +status: Staff +website: null +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/matyas_selmeci.yml b/preview-fall2024-info/staff-list/matyas_selmeci.yml new file mode 100644 index 000000000..0f1b5d21a --- /dev/null +++ b/preview-fall2024-info/staff-list/matyas_selmeci.yml @@ -0,0 +1,17 @@ +name: "Mátyás Selmeci" +shortname: matyasselmeci +date: 2020-09-18T15:46:09-05:00 +draft: false +image: "images/matyas_selmeci.jpg" +title: "Software Integration Developer" +institution: "University of Wisconsin–Madison" +status: Staff +linkedinurl: "" +weight: 5 +pelican: + weight: 15 +organizations: + - path + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/max_hartke.yml b/preview-fall2024-info/staff-list/max_hartke.yml new file mode 100644 index 000000000..7a7d25096 --- /dev/null +++ b/preview-fall2024-info/staff-list/max_hartke.yml @@ -0,0 +1,9 @@ +image: images/max_hartke.jpg +institution: University of Wisconsin-Madison +title: Student Programming Intern +name: Max Hartke +status: Past +website: null +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/michael_collins.yml b/preview-fall2024-info/staff-list/michael_collins.yml new file mode 100644 index 000000000..30368c476 --- /dev/null +++ b/preview-fall2024-info/staff-list/michael_collins.yml @@ -0,0 +1,11 @@ +name: Michael Collins +shortname: mcollins +title: Systems Administrator +active: green +institution: Morgridge Institute for Research +website: +image: images/michael_collins.png +status: Past +organizations: + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/mihir_manna.yml b/preview-fall2024-info/staff-list/mihir_manna.yml new file mode 100644 index 000000000..486c804a0 --- /dev/null +++ b/preview-fall2024-info/staff-list/mihir_manna.yml @@ -0,0 +1,9 @@ +image: images/mihir_manna.jpeg +institution: University of Wisconsin-Madison +title: System Administrator Intern +name: Mihir Manna +status: Past +website: null +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/miron_livny.yml b/preview-fall2024-info/staff-list/miron_livny.yml new file mode 100644 index 000000000..f22c183b5 --- /dev/null +++ b/preview-fall2024-info/staff-list/miron_livny.yml @@ -0,0 +1,26 @@ +name: "Miron Livny" +shortname: miron +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/miron_livny.png" +title: "PATh PI" +website: "https://wid.wisc.edu/people/miron-livny/" +institution: "University of Wisconsin–Madison" +promoted: true +weight: 1 +status: Leadership +description: Livny is a Professor of Computer Science and the lead of the PATh project. +chtc: + title: Director +osg: + title: OSG Technical Director and PI + promoted: true + weight: 1 +pelican: + title: Co-Principal Investigator + weight: 2 +organizations: + - path + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/molly_mccarthy.yml b/preview-fall2024-info/staff-list/molly_mccarthy.yml new file mode 100644 index 000000000..fa99b1f01 --- /dev/null +++ b/preview-fall2024-info/staff-list/molly_mccarthy.yml @@ -0,0 +1,12 @@ +name: "Molly McCarthy" +image: "images/molly_mccarthy.jpg" +title: "Student Web Developer" +institution: "Morgridge Institute for Research" +website: null +weight: 3 +status: Past +organizations: + - path + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/neha_talluri.yml b/preview-fall2024-info/staff-list/neha_talluri.yml new file mode 100644 index 000000000..8c69a14bd --- /dev/null +++ b/preview-fall2024-info/staff-list/neha_talluri.yml @@ -0,0 +1,20 @@ +name: Neha Talluri +title: Fellow +institution: Morgridge Institute for Research +status: Student +organizations: + - chtc +image: images/neha_talluri.jpg + +fellowship: + name: Where in the world am I + description: | + In PATh, an important part of the infrastructure is the “glidein”, a client that + starts at a remote location and provides computational cycles for research. + In the past, glideins have relied on configuration at remote locations to + determine their location but this often results in missing or incorrect + information. This project will focus on enhancing glideins so that they + can detect and report where they are running in the world, possibly including + data like geolocation and institutional owner. After a successful summer, + the student fellow will gain skills in Python, bash, and layer 3 networking. + mentor: Jason Patton diff --git a/preview-fall2024-info/staff-list/pascal_paschos.yml b/preview-fall2024-info/staff-list/pascal_paschos.yml new file mode 100644 index 000000000..d2b72c090 --- /dev/null +++ b/preview-fall2024-info/staff-list/pascal_paschos.yml @@ -0,0 +1,10 @@ +name: "Pascal Paschos" +date: 2020-09-28T05:00:01-05:00 +draft: false +image: "images/pascal_paschos.png" +title: "Senior Computational Scientist" +institution: "University of Chicago" +#website: "" +weight: 5 +organizations: + - path \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/patrick_brophy.yml b/preview-fall2024-info/staff-list/patrick_brophy.yml new file mode 100644 index 000000000..633b649d1 --- /dev/null +++ b/preview-fall2024-info/staff-list/patrick_brophy.yml @@ -0,0 +1,24 @@ +name: Patrick Brophy +title: Fellow +institution: Morgridge Institute for Research +status: Student +organizations: + - chtc +image: images/patrick_brophy.jpg + +fellowship: + name: Expanded Pelican Origin Monitoring + description: | + The Pelican origin service is responsible for exporting objects in the backend + storage to the data federation. As it is the “entry point” for the data, understanding + the load on the origin and its activities is key to keeping the federation healthy. + Pelican takes monitoring data from the web server component and feeds it into the popular + Prometheus software to store time series about the activity. This project would focus on: + - Implementing new monitoring probes to complement the existing information. + - Forwarding the raw, unsummarized data to an ElasticSearch database for further analysis. + - Designing visualizations to provide administrators with an overview of the origin’s activities. + - Implementing alerts when there are health issues with the origin. + + After a successful summer, the student fellow will gain skills in using the Go + language, the Prometheus monitoring system (and other Cloud Native technologies), and web design. + mentor: Haoming Meng diff --git a/preview-fall2024-info/staff-list/pratham_patel.yml b/preview-fall2024-info/staff-list/pratham_patel.yml new file mode 100644 index 000000000..ec78da9ca --- /dev/null +++ b/preview-fall2024-info/staff-list/pratham_patel.yml @@ -0,0 +1,19 @@ +name: Pratham Patel +title: Fellow +institution: Morgridge Institute for Research +status: Student +organizations: + - chtc +image: images/pratham_patel.jpg + +fellowship: + name: Enhancing container image build system + description: | + Container images are a widely used technology to package and distribute + software and services for use in systems such as Docker or Kubernetes. + The PATh project builds hundreds of these images on a weekly basis but + the build system needs improvement to support more images and additional + use cases. This project will focus on taking the existing system and + adding configurable, per-image build options. After a successful summer, + the student fellow will gain skills in Docker containers, GitHub actions, and Bash. + mentor: Brian Lin diff --git a/preview-fall2024-info/staff-list/rachel_lombardi.yml b/preview-fall2024-info/staff-list/rachel_lombardi.yml new file mode 100644 index 000000000..7a85404c6 --- /dev/null +++ b/preview-fall2024-info/staff-list/rachel_lombardi.yml @@ -0,0 +1,15 @@ +name: "Rachel Lombardi" +date: 2021-11-23T19:31:00-05:00 +draft: false +image: "images/rachel_lombardi.jpg" +title: "Research Computing Facilitator" +institution: "University of Wisconsin–Madison" +status: Staff +is_facilitator: 1 +#website: "" +linkedinurl: "" +weight: 5 +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/rich_wellner.yml b/preview-fall2024-info/staff-list/rich_wellner.yml new file mode 100644 index 000000000..fac85a9ae --- /dev/null +++ b/preview-fall2024-info/staff-list/rich_wellner.yml @@ -0,0 +1,10 @@ +image: images/rich_wellner.jpg +institution: San Diego Supercomputer Center +title: SDx Director +name: Rich Wellner +status: Staff +website: null +pelican: + weight: 11 +organizations: + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/rishideep_rallabandi.yml b/preview-fall2024-info/staff-list/rishideep_rallabandi.yml new file mode 100644 index 000000000..6393519bd --- /dev/null +++ b/preview-fall2024-info/staff-list/rishideep_rallabandi.yml @@ -0,0 +1,9 @@ +image: images/rishideep_rallabandi.jpg +institution: University of Wisconsin-Madison +title: Student Programming Intern +name: Rishideep Rallabandi +status: Past +website: null +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/rob_gardner.yml b/preview-fall2024-info/staff-list/rob_gardner.yml new file mode 100644 index 000000000..d3ce96ad7 --- /dev/null +++ b/preview-fall2024-info/staff-list/rob_gardner.yml @@ -0,0 +1,13 @@ +name: Rob Gardner +shortname: robrwg +image: images/rob_gardner.jpg +institution: University of Chicago +title: Institutional PI +website: https://efi.uchicago.edu/people/profile/rob-gardner/ +osg: + title: OSG Collaboration Support Lead and OSG Council Chair + promoted: true + weight: 4 +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/ryan_boone.yml b/preview-fall2024-info/staff-list/ryan_boone.yml new file mode 100644 index 000000000..f333efd40 --- /dev/null +++ b/preview-fall2024-info/staff-list/ryan_boone.yml @@ -0,0 +1,20 @@ +name: Ryan Boone +title: Fellow +institution: Morgridge Institute for Research +status: Student +organizations: + - chtc +image: images/ryan_boone.jpg + +fellowship: + name: Grid Exerciser + description: | + The OSPool is a very large, very dynamic, heterogenous high throughput system composed of execute + points from dozens of campuses all over the United States. Sometimes, something will go wrong + at one of these many sites, or one network, or one storage point, and it is difficult to determine + where the problem is. This project proposed the design and construction of a “Grid Exerciser”, + which consists of intentionally sending sample jobs to targetted locations on the OSPool to verify + correct operation and sufficient performance. The project will also have a reporting and + visualization component so that the voluminous results can be understood by a human in a + concise manner. + mentor: Cole Bollig and Rachel Lombardi diff --git a/preview-fall2024-info/staff-list/ryan_jacobs.yml b/preview-fall2024-info/staff-list/ryan_jacobs.yml new file mode 100644 index 000000000..292289be2 --- /dev/null +++ b/preview-fall2024-info/staff-list/ryan_jacobs.yml @@ -0,0 +1,10 @@ +image: images/ryan_jacob.jpg +institution: University of Wisconsin-Madison +title: System Administrator Intern +name: Ryan Jacob +status: Past +website: null +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/shawn_mckee.yml b/preview-fall2024-info/staff-list/shawn_mckee.yml new file mode 100644 index 000000000..db7043a4c --- /dev/null +++ b/preview-fall2024-info/staff-list/shawn_mckee.yml @@ -0,0 +1,9 @@ +name: Shawn McKee +shortname: smckee +title: Network Area Coordinator +active: green +institution: University of Michigan-Ann Arbor +website: https://lsa.umich.edu/physics/people/research-scientists/smckee.html +image: images/shawn_mckee.jpg +organizations: + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/shirley_obih.yml b/preview-fall2024-info/staff-list/shirley_obih.yml new file mode 100644 index 000000000..6d608aa35 --- /dev/null +++ b/preview-fall2024-info/staff-list/shirley_obih.yml @@ -0,0 +1,8 @@ +image: images/shirley_obih.jpg +institution: Morgridge Institute For Research +title: Communications Specialist +name: Shirley Obih +status: Past +website: null +organizations: + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/showmic_islam.yml b/preview-fall2024-info/staff-list/showmic_islam.yml new file mode 100644 index 000000000..d8c6a7c51 --- /dev/null +++ b/preview-fall2024-info/staff-list/showmic_islam.yml @@ -0,0 +1,9 @@ +name: "Showmic Islam" +image: "images/showmic_islam.jpg" +title: "Research Facilitator" +#website: "" +institution: "University of Nebraska-Lincoln" +weight: 5 +organizations: + - path + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/susan_sons.yml b/preview-fall2024-info/staff-list/susan_sons.yml new file mode 100644 index 000000000..a4a643c92 --- /dev/null +++ b/preview-fall2024-info/staff-list/susan_sons.yml @@ -0,0 +1,9 @@ +name: Susan Sons +shortname: HedgeMage +title: Security Analyst +active: green +institution: Indiana University +website: https://cacr.iu.edu/about/people/susan-sons.html +image: images/susan_sons.jpg +organizations: + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/tae_kidd.yml b/preview-fall2024-info/staff-list/tae_kidd.yml new file mode 100644 index 000000000..110281fba --- /dev/null +++ b/preview-fall2024-info/staff-list/tae_kidd.yml @@ -0,0 +1,13 @@ +image: images/tae_kidd.jpg +institution: Morgridge Institute For Research +title: Project Manager +name: Tae Kidd +status: Staff +website: null +pelican: + title: Project Manager + weight: 4 +organizations: + - path + - chtc + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/theng_vang.yml b/preview-fall2024-info/staff-list/theng_vang.yml new file mode 100644 index 000000000..21a163165 --- /dev/null +++ b/preview-fall2024-info/staff-list/theng_vang.yml @@ -0,0 +1,12 @@ +name: Theng Vang +shortname: theng +title: System Administrator +active: green +institution: University of Wisconsin-Madison +website: +image: images/theng_vang.jpg +status: Staff +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/thinh_nguyen.yml b/preview-fall2024-info/staff-list/thinh_nguyen.yml new file mode 100644 index 000000000..55fb6d830 --- /dev/null +++ b/preview-fall2024-info/staff-list/thinh_nguyen.yml @@ -0,0 +1,20 @@ +name: Thinh Nguyen +title: Fellow +institution: Morgridge Institute for Research +status: Student +organizations: + - chtc +image: images/thinh_nguyen.jpg + +fellowship: + name: ML for failure classification in the OSPool + description: | + The OSPool runs hundreds of thousands of jobs every day on dozens of + different sites, each unique in their own way. Naturally, there are + many hundreds of failures, most of which the system works around, but + with added latency to workflow completion. This project would attempt + to automatically classify failures from job logs to detect common + patterns and highlight places for humans to look to fix common failures + with the most payoff. Students working on this project will gain + experience applying ML techniques to real world problems. + mentor: Justin Hiemstra diff --git a/preview-fall2024-info/staff-list/tim_cartwright.yml b/preview-fall2024-info/staff-list/tim_cartwright.yml new file mode 100644 index 000000000..d07f09501 --- /dev/null +++ b/preview-fall2024-info/staff-list/tim_cartwright.yml @@ -0,0 +1,20 @@ +name: "Tim Cartwright" +shortname: osg-cat +date: 2020-09-21T05:00:01-05:00 +draft: false +image: "images/tim_cartwright.jpg" +title: "Research Services Manager" +institution: "University of Wisconsin–Madison" +website: http://pages.cs.wisc.edu/~cat/ +status: Staff +weight: 5 +chtc: + title: OSG Deputy Director/XO +osg: + title: CC* Coordinator + promoted: true + weight: 5 +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/tim_theisen.yml b/preview-fall2024-info/staff-list/tim_theisen.yml new file mode 100644 index 000000000..702b0fc68 --- /dev/null +++ b/preview-fall2024-info/staff-list/tim_theisen.yml @@ -0,0 +1,16 @@ +name: "Tim Theisen" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/tim_theisen.png" +title: "Senior Systems Software Developer" +status: Staff +institution: "University of Wisconsin-Madison" +weight: 5 +chtc: + title: Release Manager +osg: + title: Release Manager +organizations: + - path + - chtc + - osg \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/todd_miller.yml b/preview-fall2024-info/staff-list/todd_miller.yml new file mode 100644 index 000000000..579c578c3 --- /dev/null +++ b/preview-fall2024-info/staff-list/todd_miller.yml @@ -0,0 +1,13 @@ +name: "Todd L Miller" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/todd_miller.png" +title: "Senior Systems Software Developer" +status: Staff +institution: "University of Wisconsin-Madison" +weight: 5 +chtc: + title: HTCondor Core Developer +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/todd_tannenbaum.yml b/preview-fall2024-info/staff-list/todd_tannenbaum.yml new file mode 100644 index 000000000..40fd96c48 --- /dev/null +++ b/preview-fall2024-info/staff-list/todd_tannenbaum.yml @@ -0,0 +1,16 @@ +name: "Todd Tannenbaum" +date: 2018-11-19T10:47:58+10:00 +draft: false +image: "images/todd_tannenbaum.jpg" +title: "Software Development co-lead" +#website: "" +institution: "University of Wisconsin–Madison" +promoted: true +weight: 3 +status: Leadership +description: Tannenbaum is a Researcher and HTCondor Technical Lead at UW-Madison, and co-lead of PATh Software Development. +chtc: + title: HTCondor Software Lead +organizations: + - path + - chtc \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/wil_cram.yml b/preview-fall2024-info/staff-list/wil_cram.yml new file mode 100644 index 000000000..dc5278479 --- /dev/null +++ b/preview-fall2024-info/staff-list/wil_cram.yml @@ -0,0 +1,19 @@ +name: Wil Cram +title: Fellow +institution: Morgridge Institute for Research +status: Student +organizations: + - chtc +image: images/wil_cram.jpg + +fellowship: + name: Schedd performance analysis for human + description: | + The condor_schedd is a single threaded program, and when it is overloaded, + it is difficult for administrators to understand why. There are some + statistics about what it is doing, but there is no clear way to present + this information in a useful way to an administrator. Students working + on this project would build visualizations of complex data, and work + with end users and facilitators to tune output for real world human + consumption. + mentor: Greg Thain diff --git a/preview-fall2024-info/staff-list/william_swanson.yml b/preview-fall2024-info/staff-list/william_swanson.yml new file mode 100644 index 000000000..2177eeb11 --- /dev/null +++ b/preview-fall2024-info/staff-list/william_swanson.yml @@ -0,0 +1,11 @@ +name: William Swanson +image: images/william_swanson.jpg +title: Research Cyberinfrastructure Specialist +institution: "University of Wisconsin\u2013Madison" +status: Staff +pelican: + weight: 16 +organizations: + - chtc + - osg + - pelican \ No newline at end of file diff --git a/preview-fall2024-info/staff-list/yuxiao_qu.yml b/preview-fall2024-info/staff-list/yuxiao_qu.yml new file mode 100644 index 000000000..f70f73fd5 --- /dev/null +++ b/preview-fall2024-info/staff-list/yuxiao_qu.yml @@ -0,0 +1,8 @@ +image: images/yuxiao.jpg +institution: Morgridge Institute For Research +title: Research Software Engineer +name: Yuxiao Qu +status: Past +website: null +organizations: + - chtc diff --git a/preview-fall2024-info/staff/.htaccess b/preview-fall2024-info/staff/.htaccess new file mode 100644 index 000000000..49643e4f5 --- /dev/null +++ b/preview-fall2024-info/staff/.htaccess @@ -0,0 +1,13 @@ +AuthUserFile /p/condor/public/developers/dev-webpage-passwd +AuthName "Condor Developers" +AuthType Basic + +require valid-user + + +AddHandler cgi-script .pl +DefaultType text/html + +#PerlHandler HTML::Mason::ApacheHandler +#PerlSetVar MasonCompRoot /s/www/html/condor/developers +#PerlSetVar MasonDataDir /p/condor/public/mason diff --git a/preview-fall2024-info/staff/docs/Adding_News_Articles.html b/preview-fall2024-info/staff/docs/Adding_News_Articles.html new file mode 100644 index 000000000..2565aa9f3 --- /dev/null +++ b/preview-fall2024-info/staff/docs/Adding_News_Articles.html @@ -0,0 +1,558 @@ + + + + + + +Adding News Articles + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ Adding News Articles +

+ +

Table of Contents

+ + +

Using Markdown

+ +

You will be using Markdown to write all news articles. Markdown is a popular markup +language that is converted to HTML before being displayed on the website.

+ +

A good cheatsheet can be found here which contains +the markdown syntax and examples of how it looks when converted to html.

+ +

Adding Article To the Website

+ +

After you have written your article in the text editor of your choice and are ready to have it on the website you will first need to create a preview branch.

+ +

All of our websites have a preview location where you can view changes before adding them to the main website. I will use PATh for the example below, but this is the same process for CHTC, HTCondor and OSG as well.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ProjectGithub RepoPreview URL Prefix
CHTChttps://github.com/CHTC/chtc-website-sourcehttps://chtc.github.io/web-preview/
HTCondorhttps://github.com/htcondor/htcondor-webhttps://htcondor.com/web-preview/
OSGhttps://github.com/opensciencegrid/opensciencegrid.github.iohttps://opensciencegrid.org/web-preview/
PAThhttps://github.com/path-cc/path-cc.github.iohttps://path-cc.io/web-preview/
+ +
    +
  1. Go to the Github repo and Create a preview branch +
      +
    • Branch name must start with ‘preview-‘ followed by a descriptive term. +
        +
      • Example: You write an article about HTC and Genes, you name the branch ‘preview-gene-article’.
      • +
      +
    • +
    • Create a Github Branch
    • +
    +
  2. +
  3. Add your News article +
      +
    1. Check that are in your new branch +
        +
      • The previous step will put you in your new preview branch. You can check by looking at the branch name displayed.
      • +
      +
    2. +
    3. Go into the news article directory
    4. +
    5. Add new file with title ‘YYYY-MM-DD-title.md’ +
        +
      • Example: For the HTC and Genes article -> ‘2021-12-25-htc-and-genes.md’ if you are going to publish the article on Christmas 2021.
      • +
      +
    6. +
    7. Copy and Paste in the template +
        ---
      +  title:                          # Article Title
      +  date: 9999-12-31                # Article Date - In format YYYY-MM-DD - Article will not show on website until Article Data >= Current Date
      +  excerpt:                        # Article Excerpt - An abstract of the article
      +  image_src:                      # Path to the image to be displayed in article cards
      +  image_alt:                      # A description of this image
      +  author:                         # Article Author
      +  published: false                # If this article should be on the website, change to true when ready to publish
      +  --- 
      +         
      +  Content
      +
      +
    8. +
    9. Fill in all the front matter and replace ‘Content’ with your article. +Demo article filled in
    10. +
    +
  4. +
  5. Review your Preview +
      +
    • Look for your article preview at
    • +
    • Example for PATh: https://path-cc.io/web-preview/preview-helloworld +
        +
      • Project Preview URL Prefix: https://path-cc.io/web-preview/
      • +
      • Branch Name: preview-helloworld +Demo Preview
      • +
      +
    • +
    +
  6. +
  7. Create a Pull Request +
      +
    • When the article is ready to go live you must create a pull request from your branch into ‘master’.
    • +
    • Comment the preview URL in the Pull Request for easy review. +Create a PR
    • +
    +
  8. +
+ +

Using Images

+ +

Adding Images

+ +

Images can be added using either MD or HTML.

+ +
    +
  1. Markdown +
      +
    • To add an image in Markdown your user the syntax ![Alternate Caption](/image/path) excluding the single quotes.
    • +
    • You can add classes to adjust how the image appears by using {: .<class-name> } above. +
        +
      • All classes that can be used can be found here, but the ones you are most likely to use are float, and image classes.
      • +
      +
    • +
    • Example: The below markdown shows the demo image with the class float-right which positions the image to the right of the text. +
        {: .float-right }
      +  ![Demonstration Image](/images/docs/demo_image.jpg)
      +
      +
    • +
    +
  2. +
  3. HTML +
      +
    • Images added with html will use the ‘img’ tag.
    • +
    • The syntax for this is: +
        <img class="optional-class" src="/path/to/image" alt="Description of image for the visually impaired"
      +
      +
    • +
    • Using HTML gives you more options such as having figure captions +
        <figure>
      +     <img src="/path/to/image" alt="Description">
      +      <figcaption>The image caption</figcaption>
      +  </figure>
      +
      +
    • +
    +
  4. +
+ +

Reducing Image Size

+ +

High definition images can take up space that slows down the website when it loads, because of this it is important to reduce this footprint before adding them to the website.

+ +

To keep the image size reasonable follow the rules below.

+ +
    +
  • <= 1000 pixels wide +
      +
    • This is the maximum website width, so any images wider will have unused resolution.
    • +
    +
  • +
  • Convert to jpg and reduce to reasonable size +
      +
    • This is up to you and changes picture to picture. Some pictures look fine when compressed, some don’t.
    • +
    • Reasonable target is ~200kb
    • +
    +
  • +
+ +

Example

+ +

2MB Image

+ +

We will reduce this 2MB image to demonstrate.

+ +
    +
  1. +

    Pull up the image in photoshop. If you don’t have photoshop contact IT as you do have free access.

    +
  2. +
  3. +

    Go to Export as… +Export as Location On Mac

    +
  4. +
  5. +

    Update the Values to reduce size. +Photoshop image with updated values

    +
  6. +
  7. +

    Use your new compressed image in the article. +Compressed Image

    +
  8. +
+ +

Positioning Images

+ +

To position images on the page you must use classes. +For markdown this means including [: .<class> } above the image syntax, and +for html this means adding class="<class>" inside the html tag.

+ +

Potential classes that can be used to position the image come from Bootstrap Utilities.

+ +

The ones you will find the most helpful are:

+
    +
  1. Floats
  2. +
  3. Image Specific
  4. +
+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/staff/docs/Schedule_Calendar.html b/preview-fall2024-info/staff/docs/Schedule_Calendar.html new file mode 100644 index 000000000..87eda76ea --- /dev/null +++ b/preview-fall2024-info/staff/docs/Schedule_Calendar.html @@ -0,0 +1,493 @@ + + + + + + +Schedule Calendar Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ Schedule Calendar Guide +

+ + + + + +

Moving From Previous AFS File

+ +

This section shows how one might transfer from entering their times in afs to entering their times into a Google calendar.

+ +

Save a copy of your schedule file, the google calendar will overwrite it

+ +

If you did not do this and now need it back, Cannon has them all saved as of 2022-07-15.

+ +

Create an ics file from your current file in afs

+ +

Login to moria

+ +
cd /p/condor/public/html/htcondor/developers/schedules
+
+source generate_personal_ics.sh <your_file_name>
+
+ +

If an error is reported in your schedule file you can fix it or ignore it and that event just won’t be added.

+ +

Grab the ics file from this build and import it into your calendar you create below.

+ +

This can be done on the import page.

+ +

Don’t forget to also create your new schedule file which holds your schedule meta-data.

+ +

Creating The Schedule File

+ +

This section will describe how to create your yaml file, you can find a verbose template file on afs or use the one below.

+ +

<filename>.yml

+
Name: "name"
+ShortName: "shortname"
+corehours: "corehours"
+DailyEmail: "Yes" # ( or omit line entirely )
+Email: "email"
+Office: "location"
+Phone: "phone" # Office and/or Cell - This is read as a string so format how you want
+calendarurl: "calendarurl"
+default:
+  starttime: "starttime"
+  endtime: "endtime"
+  status: "status"
+
+
File Details
+ +
    +
  • Name: First Last
  • +
  • ShortName: (Optional, defaults to First Name if not specified) Should be unique and obviously you. If your name is “George Washington” and George Foreman also works in your group, “GeorgeW” would be a good choice.
  • +
  • corehours: A description of your corehours that is displayed. Format is not important. Example is “9:00 AM to 5:00 PM”
  • +
  • DailyEmail: If ‘Yes’ then you will receive a daily email with who is out, otherwise should be omitted entirely.
  • +
  • Email: Your preferred email address. Defaults to filename@cs.wisc.edu so you will likely want to change this
  • +
  • Office: Your office location. Example => “4261 CS”
  • +
  • Phone: Your phone number(s). Example => “+1 608 265 5736 (office)
    +1 608 576 0351 (cell)”
  • +
  • DailyEmail: Do you want a daily email with information about who is gone?
  • +
  • calendarurl: The url to your outage calendar. Details on obtaining this found below.
  • +
  • starttime: Your typical start time, use military format. Example => “09:00”
  • +
  • endtime: Your typical end time, use military format. Example => “17:00”
  • +
  • status: Your status during these hours. If you are unsure use “Office”.
  • +
  • default[Monday, Tuesday, Wednesday, Thursday, Friday]: This overwrites the default for that day. Use the same format as default.
  • +
+ +

Important

+ +

All of these data strings have to be encased in double quotations to be valid yaml. This encasement can be seen in the template file.

+ +

Creating Your ICAL URL

+ +

To power your outage calendar you need to create a google calendar which is solely used to populate your outages.

+ +
    +
  1. Go to https://calendar.google.com/ and sign in with your preferred account. You can use @morgridge.org and @wisc.edu.
  2. +
  3. Create a new calendar +
      +
    • Name and Description do not matter
    • +
    • Add Container Image
    • +
    +
  4. +
  5. Go into Calendar settings and retrieve the Secret Address +
      +
    • Go to calendar Settings +Go to Calendar Settings
    • +
    • Get the secret calendar url ( Will warn not to give this out )
      +Get Secret Calendar URL
    • +
    +
  6. +
  7. Post this address into your yaml file as the calendarurl
  8. +
+ +

Populating Your Days Off

+ +

Event Title

+ +

The event title should be one of the statuses bolded below. These statuses are +used to key the type of outage so anything but a approved status should be in the event title.

+ +
    +
  • Travel: Working, but not at the office. Perhaps a conference
  • +
  • Vacation: Taking vacation (“vacation” and “personal holiday” on the leave report)
  • +
  • Sick: Taking sick leave (“sick leave” on the leave report)
  • +
  • Holiday: Taking floating holiday (“legal holiday” on the leave report)
  • +
  • Furlough: State- or UW-mandated furlough (as required). Includes both fixed (“mandatory”) and floating time.
  • +
  • Off: Days not worked on a part-time employment
  • +
  • WFH: Work From Home
  • +
+ +

Event Description

+ +

Any description of the outage you would like to add can be added in the event +description.

+ +

Marking Event Time

+ +
Marking Full day/days Out
+ +

To mark full day outages you create an event with the “All day” attribute ticked ( This is used in the demo above ). Populate the title and description as expected.

+ +

Do not use the recurring event feature for multiple outage days.

+ +
Marking Partial Outages
+ +

To mark partial time you must do two different things.

+ +
    +
  1. Append the amount of hours this outage is taking with a colon separating the title. +
      +
    • For Example, if you have a four hour doctor appt. you would mark SICK:4
    • +
    • For Example, if you leave for vacation half a day early you would mark VACATION:4
    • +
    +
  2. +
  3. Mark the time you are *in* the office on Google +
      +
    • This is non-intuitive but when you are marking time you mark the time you are in.
    • +
    • For Example, if I am normally in 9-5 and am leaving 4 hours early I will mark my event to go from 9:00 AM to 1:00 PM.
    • +
    +
  4. +
+ +

Example

+ +

If you mark your title in Google as “Sick” and the description as “Wisdom Tooth Surgery and Recovery” the schedule output will be as so.

+ +

Google Event Demo

+ +

Google Event Demo Schedule App

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/staff/index.html b/preview-fall2024-info/staff/index.html new file mode 100644 index 000000000..2e2a036ec --- /dev/null +++ b/preview-fall2024-info/staff/index.html @@ -0,0 +1,348 @@ + + + + + + +CHTC Staff Page + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+
+
+
+

+ CHTC Staff Page +

+ +

Docs:

+ + +

Schedule Calendar

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/tackling-strongly-correlated-quantum-systems.html b/preview-fall2024-info/tackling-strongly-correlated-quantum-systems.html new file mode 100644 index 000000000..a33b25d6b --- /dev/null +++ b/preview-fall2024-info/tackling-strongly-correlated-quantum-systems.html @@ -0,0 +1,446 @@ + + + + + + +Tackling Strongly Correlated Quantum Systems on OSPool + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Tackling Strongly Correlated Quantum Systems on OSPool +

+

Duke University Associate Professor of Physics Shailesh Chandrasekharan and his graduate student Venkitesh Ayyar are +using the OSpool to tackle notoriously difficult problems in quantum systems.

+ +
+
+
+ +
Shailesh Chandrasekharan, courtesy photo
+
+
+
+
+ +
+
Venkitesh Ayyar, courtesy photo
+
+
+
+ +

These quantum systems are the physical systems of our universe, being investigated at the fundamental level where +elemental units carrying energy behave according to the laws of quantum mechanics. +In many cases, these units might be referred to as particles or more generally as “quantum degrees of freedom.” +The most exciting physics arises when these units are strongly correlated: the behavior of each one depends on the +system as a whole; they cannot be taken and studied independently. +Such systems arise naturally in many areas of fundamental physics, ranging from condensed matter (many materials +fabricated in laboratories contain electrons that are strongly correlated and show exotic properties) to nuclear and +particle physics.

+ +

The proton, one of the particles inside an atom’s nucleus, is itself a strongly correlated bound state involving many +quarks and gluons. +Understanding its properties is an important research area in nuclear physics. +The origin of mass and energy in the universe could be the result of strong correlations between fundamental quantum +degrees.

+ +

“Often we can write down the microscopic theory that describes a physical system. +For example, we believe we know how quarks and gluons interact with each other to produce a proton. +But then to go from there to calculate, for instance, the spin of the proton or its structure is non-trivial,” said +Chandrasekharan. +“Similarly, in a given material we have a good grasp of how electrons hop from one atom to another. +However, from that theory to compute the conductivity of a strongly correlated material is very difficult. +The final answer—that helps us understand things better—requires a lot of computation. +Typically the computational cost grows exponentially with the number of interacting quantum degrees of freedom.”

+ +

According to Chandrasekharan, the main challenge is to take this exponentially hard problem and convert it to something +that scales as a polynomial and can be computed on a classical computer. +“This step is often impossible for many strongly correlated quantum systems, due to the so-called +sign problem which arises due to quantum mechanics,” added +Chandrasekharan. +“Once the difficult sign problem is solved, we can use Monte Carlo calculations to obtain answers. +Computing clusters like the OSG can be used at that stage.”

+ +

Chandrasekharan has proposed an idea, called the fermion bag approach, +that has solved numerous sign problems that seemed unsolvable in systems containing fermions (electrons and quarks are +examples of fermions). +In order to understand a new mechanism for the origin of mass in the universe, Ayyar is specifically using the OSG to +study an interacting theory of fermions using the fermion bag approach.

+ +
+ +
Illustration of a fermion bag configuration. Image credit: Shailesh Chandrasekharan
+
+ +

“We compute correlation functions on lattices and look at their behavior as the lattice size increases,” Ayyar explained. +In the presence of a mass, the correlation functions decay exponentially. +“Ideally, we would want to perform computations on very large lattices (>100x100x100). +Each calculation involves computing the inverse of large matrices millions of times. +The matrix size scales with the lattice size and so the time taken increases very quickly (from days to weeks to months). +This is what limits the size of the lattice used in our computation and the precision of the quantities calculated. +”In a recent publication, Ayyar and Chandrasekharan performed computations on lattices of sizes up to 28x28x28, and +more recently they have been able to push these to lattices of size 40x40x40.

+ +

Since their computation is parallelizable, they can run several calculations at the same time. +Ayyar says this makes the OSG perfect for their work. +“Instead of running a job for 100 days sequentially,” he noted, “we can run 100 jobs simultaneously for one day to get +the same information. +This not only helps us speed up our calculation several times, but we also get very high precision.”

+ +

Ayyar uses simple scripts to submit a large number of jobs and monitor their progress. +One challenge he faced was the check-pointing of jobs. +“Some of our jobs run long, say two to six days, and we found these getting terminated before completion due to the +queuing system,” Ayyar said. +To solve this, he developed what he calls ‘manual check-pointing’ to execute jobs in pieces. +“This extends the completed processes and submits them so that long-running processes can be completed. +Being able to control the memory and disk-space requirements on the target nodes has proved to be extremely useful.”

+ +

Ayyar also noted that many individual research groups cannot afford the luxury of having thousands of computing nodes. +“This kind of resource sharing on the OSG has helped computational scientists like us attempt calculations that could +not be done before,” he added. +“For example, we are now attempting computations on lattices of size 60x60x60. +One sweep should only take a few hours on each core.”

+ +

Chandrasekharan points out that past technology breakthroughs like the kind that revolutionized processor chip +manufacturing have largely been based on basic quantum mechanics learned in the 1940s and 1950s. +“We still need to understand the complexity that can be produced when many quantum degrees of freedom interact with each +other strongly,” said Chandrasekharan. +“The physics we learn could be quite rich.”

+ +

He says this next phase of research is already happening in nanoelectronics. +“If the computational quantum many-body challenge that we face today is solved, it may help revolutionize the next +generation of technology and give us a better understanding of the physics.”

+ +

Ayyar and Chandrasekharan recently submitted a paper based on their work using the OSG. +Titled Massive fermions without fermion bilinear condensates, +it has been published in the journal Physical Review D of the American Physical Society.

+ +

– Greg Moore

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/technologies.html b/preview-fall2024-info/technologies.html new file mode 100644 index 000000000..201fcf1a6 --- /dev/null +++ b/preview-fall2024-info/technologies.html @@ -0,0 +1,348 @@ + + + + + + +Technologies + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Technologies +

+

The CHTC offers a suite of open-source software tools that manage HTC +workloads and enable organizations to form distributed HTC pools. The +HTCondor Software Suite (HTCSS) +is the product of over three decades of +research and development at the Computer Sciences Department of the +University of Wisconsin-Madison. It has been adopted by academic and +commercial entities around the world in support of their HTC workloads.

+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/the-pelican-project.html b/preview-fall2024-info/the-pelican-project.html new file mode 100644 index 000000000..9cdab1e66 --- /dev/null +++ b/preview-fall2024-info/the-pelican-project.html @@ -0,0 +1,371 @@ + + + + + + +The Pelican Project: Building a universal plug for scientific data-sharing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ The Pelican Project: Building a universal plug for scientific data-sharing +

+

From its founding, the Morgridge Institute for Research has driven the idea that open sharing of research computing resources will be a great enabler of scientific discovery, powering everything from black hole astronomy to stem cell biology.

+ +

Increasingly, the principle of sharing is being applied not only to computing resources, but to the wealth of data those projects are producing. Resources such as high-throughput computing and the OSG Consortium have been incorporating more tools for scientists to share their raw data for further exploration.

+ +

This principle is now getting traction on a national policy scale. The White House Office of Science and Technology Policy (OSTP) established new requirements in 2022 that any research supported by federal funds must be made available to the public without embargoes or paywalls.

+ +

This mandate applies not only to published findings, but to the core data those findings are based upon. Within the scientific community, the approach is referred to as the “FAIR” principles, which means that scientific data should be “findable, accessible, interoperable and reusable.”

+ +

Obviously, applying this new standard to data is as much a technical challenge as it is a cultural one. A new project at the Morgridge, led by research computing investigators Brian Bockelman and Miron Livny, is working toward creating a software platform that can facilitate the sharing of diverse research datasets.

+ +

Nicknamed “Pelican,” the project is supported through a $7 million grant from the National Science Foundation (NSF). The award (OAC-2331489) will strive to make data produced by researchers, from single-investigator labs to international collaborations, more accessible for computing and remote clients for viewing. Pelican supports and extends the work Bockelman and Livny have been doing as part of the OSG Consortium for over a decade.

+ +

Bockelman says that public research data-sharing has been a growing movement the past decade, but the COVID-19 pandemic served as a potent catalyst. The pandemic made the benefits of sharing abundantly clear, including the development of a vaccine at an unprecedented pace — 6 months compared to a typical multi-year process.

+ +

“Our philosophy is that not only should your research paper be public and readable, but your data should be as well,” Bockelman says. “If scientists just say, ‘here are the results in a pretty graph,’ and don’t share the underlying dataset, we lose a lot of value when others can’t access the data, can’t interpret it, or use it for their own research.”

+ +

Bockelman says there are some other core benefits that may come from the open science push. By making data more readily accessible, it should improve the reproducibility of experiments and potentially reduce scientific fraud. It can also narrow the gap between the “haves” and “have-nots” in the research world by providing data access regardless of institutional resources.

+ +

Bockelman likens the Pelican project to developing a “universal adapter plug” that can accommodate all different types of data. Just like homes have standard outlets that work for all different household appliances, that same approach should help individual scientists plug into a sharable data platform regardless of the nature of their data.

+ +

One of the first proving grounds for Pelican will be its participation within the National Discovery Cloud for Climate, an effort to bring together compute, data, and network resources to democratize access and advance the climate-related science and engineering. Bockelman says the Pelican project will help optimize this data sharing effort with the climate science community and provide a proof of concept for other research areas.

+ +

But ultimately, the best benefit may be enhancing public trust in high-impact science.

+ +

“Even for people who may not go digging into the data, they want to know that science has been done responsibly, especially for fields where it directly affects their lives,” Bockelman says. “Climate is a great example of where the science can really drive regulations that affect people. Getting data out as open and following the FAIR principles … is part of that relationship between the scientific community and the society at large.”

+ +

Bockelman says making data accessible is more than just downloading from a webserver. Pelican works to establish approaches that help people utilize the data effectively from anywhere in the nation’s computing infrastructure — essential so anyone from a tribal college to the largest university can understand and interpret the climate data.

+ +

The original memo was written in 2022 by then OSTP Director Alondra Nelson, and today the “Nelson memo” is viewed as a watershed document in federal research policy.

+ +

“When research is widely available to other researchers and the public, it can save lives, provide policy makers with the tools to make critical decisions, and drive more equitable outcomes across every sector of society,” Nelson wrote. “The American people fund tens of billions of dollars of cutting-edge research annually. There should be no delay or barrier between the American public and the returns on their investments in research.”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/tribalcollege.html b/preview-fall2024-info/tribalcollege.html new file mode 100644 index 000000000..2b826c43d --- /dev/null +++ b/preview-fall2024-info/tribalcollege.html @@ -0,0 +1,393 @@ + + + + + + +Tribal College and CHTC pursue opportunities to expand computing education and infrastructure + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Tribal College and CHTC pursue opportunities to expand computing education and infrastructure +

+

Salish Kootenai College and CHTC take steps toward bringing underrepresented communities to cyberinfrastructure.

+ +

Access to cyberinfrastructure (CI) is the bedrock foundation essential for students and researchers determined to contribute to science. +That’s why Lee Slater, +the Cyberinfrastructure Facilitator at Salish Kootenai College (SKC), a tribal community college in northwest Montana, first brought +up the “missing millions.” The term was coined after the National Science Foundation (NSF) reported +that users and providers of the CI as a whole do not accurately represent society. Underrepresented racial and gender demographics were largely missing from +the field. “[The missing millions] just don’t have access to high performance computing platforms and so they’re not contributing greatly to the scientific +body of knowledge that other privileged students have access to,” Slater explained. “It’s a real serious deficit for these students. One of the goals we’re +trying to get accomplished is to bring these educational and research platforms to students and faculty to really enrich the experience they have as students.”

+ +

SKC inhabits an indigenous reserve known as the Flathead Reservation, which includes territory in four western states. Established in 1855, the reservation +is home to the Confederated Salish and Kootenai Tribes. SKC — with just over 600 students — makes up a +small, but vital portion of the much larger reservation. The college consists largely of tribal descendents or members, making up almost 80 percent of the +school population.

+ +
+ TCU Salish Kootenai College in Montana. +
TCU Salish Kootenai College in Montana. +
+
+ +

The Center for High Throughput Computing (CHTC) Director Miron Livny traveled +to Montana this past October to meet with Salish Kootenai College faculty and staff. The four-day trip was coordinated by International Networking +Coordinator Dale Smith from the University of Oregon, who also works for the American Indian Higher Education Consortium. +The visit was meant for Livny to experience one of the nation’s tribal colleges and universities (TCUs) and to further the discourse between CHTC and SKC. +“The main goal was for him to see our infrastructure, meet the faculty and see research opportunities,” Slater recalled.

+ +

SKC’s biggest and most immediate computing goal is to provide the access and training to utilize a web platform for JupyterHub that would be available +for faculty and student use. The Jupyter Notebook connects with an OSPool Access Point, where students can place their workloads and data and which +automates the execution of jobs and data movement across associated resources. Slater believes this would be beneficial, as many SKC faculty members do +computing and data analysis within their specialties. “The fact that we could have a web platform with JupyterHub that students could access and faculty +could access would really be a great facilitation,” Slater explained.

+ +

Slater would also like to collaborate with other TCUs, train faculty in computing software and overall increase their cyberinfrastructure capabilities. +SKC Chief Information Officer (CIO) Al Anderson would +like to leverage storage capacity for a faculty researcher who is examining the novel behavior of elk on the National Bison Range. This work requires taking a +vast amount of photographs that then must be processed and stored. “We found that we have this storage issue — right now they’re using portable hard drives +and it’s just a mess,” Anderson said.

+ +

Engagements like this are an early, but important step in bringing underserved communities to cyberinfrastructure and thus to science and research. +The NSF “Missing Millions” report focused on the need for democratizing access to +computing and showed a deficiency of engagement with institutions created for marginalized groups. Institutions like historically black colleges and universities (HBCUs) +and TCUs tend to lack cyberinfrastructure capabilities that can be hard to implement without engagement from outside institutions. +SKC’s engagement with CHTC is an example of steps both are taking in addressing this deficiency.

+ +

Longer term goals for the college are largely educational-focused. “We’re a small school, traditionally we have more educational needs than really heavy +research needs,” Slater said. Anderson agreed stating, “I think a lot of our focus is the educational side of computing and how to get people hooked into +those things.”

+ +

Anderson and Slater are also focused on relationship-building with faculty and discovering what they need to educate their students. +They believe hearing from the SKC community should be first and foremost. “We’re still in that formative stage of asking, what do we need to support?” +Anderson explained, “Through these conversations we’re slowly discovering.”

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/ucsd-external-release.html b/preview-fall2024-info/ucsd-external-release.html new file mode 100644 index 000000000..ad62217ea --- /dev/null +++ b/preview-fall2024-info/ucsd-external-release.html @@ -0,0 +1,364 @@ + + + + + + +PATh Extends Access to Diverse Set of High Throughput Computing Research Programs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ PATh Extends Access to Diverse Set of High Throughput Computing Research Programs +

+

Finding the right road to research results is easier when there is a clear PATh to follow. The Partnership to Advance Throughput Computing (PATh)—a partnership between the OSG Consortium and the University of Wisconsin-Madison’s Center for High Throughput Computing (CHTC) supported by the National Science Foundation (NSF)—has cleared the way for science and engineering researchers for years with its commitment to advancing distributed high throughput computing (dHTC) technologies and methods.

+ +

HTC involves running a large number of independent computational tasks over long periods of time—from hours and days to week or months. dHTC tools leverage automation and build on distributed computing principles to save researchers with large ensembles incredible amounts of time by harnessing the computing capacity of thousands of computers in a network—a feat that with conventional computing could take years to complete.

+ +

Recently PATh launched the PATh Facility, a dHTC service meant to handle HTC workloads in support and advancement of NSF-funded open science. It was announced earlier this year via a Dear Colleague Letter issued by the NSF and identified a diverse set of eligible research programs that range across 14 domain science areas including geoinformatics, computational methods in chemistry, cyberinfrastructure, bioinformatics, astronomy, arctic research and more. Through this 2022-2023 fiscal year pilot project, the NSF awards credits for access to the PATh Facility, and researchers can request computing credits associated with their NSF awards. There are two ways to request credit: 1) within new proposals or 2) with existing awards via an email request for additional credits to participating program officers.

+ +

“It is a remarkable program because it spans almost the entirety of the NSF’s directorates and offices,” said San Diego Supercomputer Center (SDSC) Director Frank Würthwein, who also serves as executive director of the OSG Consortium.

+ +

Access to the PATh Facility offers researchers approximately 35,000 modern cores and up to 44 A100 GPUs. Recently SDSC, located at UC San Diego, added PATh Facility hardware on its Expanse supercomputer for use by researchers with PATh credits. According to SDSC Deputy Director Shawn Strande: “Within the first two weeks of operations, we saw researchers from 10 different institutions, including one minority serving institution, across nearly every field of science. The beauty of the PATh model of system integration is that researchers have access as soon as the resource is available via OSG. PATh democratizes access by lowering barriers to doing research on advanced computing resources.”

+ +

While the PATh credit ecosystem is still growing, any PATh Facility capacity not used for credit will be available to the Open Science Pool (OSPool) to benefit all open science under a Fair-Share allocation policy. “For researchers familiar with the OSPool, running HTC workloads on the PATh Facility should feel like second-nature” said Christina Koch, PATh’s research computing facilitator.

+ +

“Like the OSPool, the PATh Facility is nationally spanning, geographically distributed and ideal for HTC workloads. But while resources on the OSPool belong to a diverse range of campuses and organizations that have generously donated their resources to open science, the allocation of capacity in the PATh Facility is managed by the PATh project itself,” said Koch.

+ +

PATh will eventually reach over six national sites: SDSC at UC San Diego, CHTC at the University of Wisconsin-Madison, the Holland Computing Center at the University of Nebraska-Lincoln, Syracuse University’s Research Computing group, the Texas Advanced Computing Center at the University of Texas at Austin and Florida International University’s AMPATH network in Miami.

+ +

PIs may contact credit-accounts@path-cc.io with questions about PATh resources, using HTC, or estimating credit needs. More details also are available on the PATh credit accounts web page.

+ +
+ +
+ A diverse set of PATh national and international users benefit from the resource, and the recent launch of the PATh Facility further supports HTC workloads in an effort to advance NSF-funded open science. The colors on the chart correspond to the total number of core hours – nearly 884,000 – utilized by researchers at participating universities on PATh Facility hardware located at SDSC. Credit: Ben Tolo, SDSC +
+
+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/usgs-invasivespecies.html b/preview-fall2024-info/usgs-invasivespecies.html new file mode 100644 index 000000000..78860c03e --- /dev/null +++ b/preview-fall2024-info/usgs-invasivespecies.html @@ -0,0 +1,371 @@ + + + + + + +Protecting ecosystems with HTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ Protecting ecosystems with HTC +

+

Researchers at the USGS are using HTC to pinpoint potential invasive species for the United States.

+ +
+ Satellite image collage graphic +
From left to right: Mississippi River Delta, Colorado Rocky Mountains, Kansas’s Milford Lake. Images by USGS on Unsplash.
+
+ +

Benjamin Franklin famously advised that an ounce of prevention is worth a pound of cure, and researcher Richard Erickson has taken this advice to heart in his mission to protect our lakes and wildlife from invasive species. As a research ecologist at the United States Geological Survey’s (USGS) Upper Midwest Environmental Sciences Center, Erickson uses computation to identify invasive species before they pose a threat to U.S. ecosystems.

+ +

Instrumental to his preventative mission is the HTCondor Software Suite (HTCSS) and consulting from UW-Madison’s Center for High Throughput Computing (CHTC), which have been integral to the USGS’s in-house computing infrastructure. Equipped with the management capabilities of HTCSS and guidance from CHTC, Erickson recently completed a high-throughput horizon scan of over 8000 different species in less than two days.

+ +

Explaining how his team was able to accomplish such a feat in merely one weekend, Erickson reasons: ”High throughput computing software allows [big problems] to be broken into small jobs. Rather than having to worry about everything, I just have to worry about a small thing, and then high throughput computing does the small thing many times over, to solve big problems through small steps.”

+ +

Erickson’s big problem first began to take shape in 2020 when the U.S. Fish and Wildlife Service (FWS) provided the USGS with a list of over 8000 species currently being bought and sold in the United States, from Egyptian Geese, to Algerian hedgehogs, to Siberian weasels. If these animals proliferate in U.S. environments, they could potentially threaten native species, the ecosystem as a whole, and the societal and economic value associated with it. Erickson’s job? To determine which species are a threat, and to what areas –– a tall order when faced with 8000 unique species and roughly 900 different ecological regions across the United States.

+ +

With HTC, Erickson could approach this task by breaking it down into small, manageable steps. Each species was independent of one another, meaning that the colossal collection of 8000 plants and animals could be organized into 8000 different jobs for HTCSS to run in parallel. Each job contained calculations comparing the US and non-US environments across sixteen different climate metrics. Individually, the jobs took anywhere from under thirty minutes to over two hours to run.

+ +

To analyze this type of data, the team created their own R package, climatchR. The package was released to the public in early September, and the team plans to make their HTCondor code publicly available after it undergoes USGS review.

+ +

But the HTC optimization didn’t end there. Because the project also required several complex GIS software dependencies, the group used Docker to build a container that could hold the various R and GIS dependencies in the context of a preferred operating system. Such containers make the software portable and consistent between diverse users and their computers, and can also be easily distributed by HTCSS to provide a consistent and custom environment for each computed job running across a cluster.

+ +

By the end of their computing run, the 8000 jobs had used roughly a year of computing in less than two days. The output included a climate score between zero and ten for each of the 8000 species, corresponding to how similar a species’ original climate is to the climates of the United States.

+ +

Currently, different panels of experts are reviewing species with climate scores above 6 to determine which of them could jeopardize US ecosystems. This expert insight will inform FWS’s regulation and management of the species traded in the United States, ultimately preventing the arrival of those that are likely to be invasive.

+ +

Invasive species disrupt ecological interactions, contributing to the population decline and extinction of native species. But beyond their environmental consequences, these non-native species impact property values, tourism activities, and agricultural yields. Hopefully, the results of Erickson’s high-throughput horizon screen will prevent these costs before they’re endured –– all by using HTC to solve big problems, through small steps.

+ +

+ +

Erickson co-authored an open-access tutorial to help other environmental scientists and biologists who are getting started with HTCondor. +Erickson’s team hopes to make the results from this project publicly available in 2022.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/README.md b/preview-fall2024-info/uw-research-computing/README.md new file mode 100644 index 000000000..d3393fc0b --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/README.md @@ -0,0 +1,42 @@ +# Guide + +## Creating dropdowns + +```html + + + +
+
+

+ +

+
+
+ !!!This is the content of the dropdown!!! +
+
+
+
+``` + +### Example + +```html +
+
+

+ +

+
+
+ !!!This is the content of the dropdown!!! +
+
+
+
+``` \ No newline at end of file diff --git a/preview-fall2024-info/uw-research-computing/account-details.html b/preview-fall2024-info/uw-research-computing/account-details.html new file mode 100644 index 000000000..7dc13d13f --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/account-details.html @@ -0,0 +1,413 @@ + + + + + + +How to Request a CHTC Account + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ How to Request a CHTC Account +

+ +

The following sections detail the processes for requesting a new CHTC account, +or for continuing to use an existing CHTC account. Use of CHTC services are free +to use in support of University of Wisconsin - Madison’s research and teaching mission.

+ +

Current Member of UW - Madison

+ +

If you are a current student, staff, or faculty at UW - Madison, you can request an account +by completing the Account Request Form. A staff member from CHTC will follow +up with next steps.

+ +

All accounts require an active NetID and a faculty sponsor (typically the PI that is leading +your research project.)

+ +

Graduating from UW - Madison

+ +

We understand that some users may need to continue carrying out their computational +analyses after graduation and the subsequent expiration of their NetID.

+ +

Once you are no longer enrolled in or employed by the University, you can continue +to use your CHTC account as an “External Collaborator”. Follow the instructions in +the section below to have your faculty advisor sponsor your continued access to +CHTC.

+ +

We highly recommend reaching out to CHTC staff before your NetID expires, if possible.

+ +
    +
  • Our policy is that CHTC accounts are deactivated and user data is erased after a user +is no longer actively using their account (~1 year of inactivity). It is your responsibility +to maintain your data and important files in a location that is not CHTC’s file systems.
  • +
+ +

External Collaborator

+ +

If you are not a current member of UW - Madison, you can gain access to CHTC provided +that you are sponsored by a faculty member of UW - Madison. To begin the account +request process, have your Faculty Sponsor email CHTC (chtc@cs.wisc.edu) and provide:

+ +
    +
  1. Your name,
  2. +
  3. The reason you need (continued) access to CHTC resources,
  4. +
  5. The amount of time they would like to sponsor your account,
  6. +
  7. Your city/country of residence, and
  8. +
  9. Your institution.
  10. +
+ +

CHTC staff will then follow up with next steps to create or extend your account.

+ +
    +
  • Your faculty sponsor can sponsor your account for up to one year at a time. If +you need continued access past one year, your faculty sponsor must contact us and +re-confirm that you should have continued access.
  • +
  • Our policy is that CHTC accounts are deactivated and user data is erased after a user +is no longer actively using their account (~1 year of inactivity). It is your responsibility +to maintain your data and important files in a location that is not CHTC’s file systems.
  • +
+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/apptainer-build.html b/preview-fall2024-info/uw-research-computing/apptainer-build.html new file mode 100644 index 000000000..2b869097d --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/apptainer-build.html @@ -0,0 +1,835 @@ + + + + + + +Building an Apptainer Container + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Building an Apptainer Container +

+ +

This guide describes the general process for creating an Apptainer container. +Specifically, we discuss the components of the “definition file” and how that file is used to construct or “build” the container itself.

+ +

For instructions on using and building Apptainer containers

+ + + + + +

The Apptainer Definition File

+ +

The instructions for how Apptainer should build a container are located in the definition file (typically suffixed with .def). +The following table summarizes the sections that are routinely used when constructing a container:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SectionDescription
“Header”Choose an existing container to start from.
%filesAdd existing files (e.g., pre-downloaded source code) to use in the container.
%postInstallation commands for adding libraries/programs to the container.
%environmentAutomatically set environment variables when the container is started to help find installed programs.
%labelsAdd information or metadata to help identify the container and its contents.
%helpAdd text to help others use the container and its contents.
+ +

With the exception of the “Header”, sections in the definition file begin with a line starting with %name_of_section and all subsequent lines belong to that section until the end of the file or the next %section line is encountered. +Typically the contents of a section are indented to help visually distinguish the different parts.

+ +

Additional sections can be specified, though not all may be functional when using the container on CHTC systems. +For additional information on Apptainer definition files, see the Apptainer documentation. +The manual page provides a full reference on the different sections of the definition file.

+ +
+

Note that the %runscript section is ignored when the container is executed on the High Throughput system.

+
+ +

Header section

+ +

This must be the first section of the definition file. +The header specifies the container image that Apptainer should start with. +Apptainer will load this container image before attempting to execute the build commands.

+ +

Most users will use

+ +
Bootstrap: docker
+From: user/repo:tag
+
+ +

where user/repo:tag is any valid address to a Docker-based container registry. +For example,

+ +
Bootstrap: docker
+From: rocker/tidyverse:4.1.3
+
+ +

would use the Rocker tidyverse v4.1.3 container hosted on DockerHub as the base for the current build.

+ +

Alternatively,

+ +
Bootstrap: docker
+From: nvcr.io/nvidia/tensorflow:24.02-tf2-py3
+
+ +

would use the NVIDIA TensorFlow 2 v24.02 container hosted on the NVIDIA Container Registry (nvcr).

+ +
+

If you just want to convert an existing Docker container into an Apptainer container, you do not need to use a definition file. +Instead, you can directly run the apptainer build command using the Docker address, as described below.

+
+ +

Files section

+ +

The %files section is used to copy files from the machine that is running Apptainer (the “host”) into the container that Apptainer is building. +This section is typically used when you have the source code saved on the host and want to extract/compile/install it inside of the container image.

+ +
+

While the container is being built on the host system, by default it does not have direct access to files located on the host system. +The %files section serves as the bridge between the host system and the container being built.

+
+ +

The syntax for use is

+ +
%files
+    file_on_host file_in_container
+
+ +

where file_on_host is in the same directory as the .def definition file, and where file_in_container will be copied to the container’s root (/) by default. +You can instead provide absolute paths to the files on the host or in the container, or both. +For example:

+ +
%files
+    /home/username/my-source-code.tar.gz /opt/my-program-build/my-source-code.tar.gz
+
+ +
+

If the directories in the path in the container do not already exist, they will be created.

+
+ +

Post section

+ +

The %post section contains any and all commands to be executed when building the container. +Typically this involves first installing packages using the operating system’s package manager and then compiling/installing your custom programs. +Environment variables can be set as well, but they will only be active during the build (use the %environment section if you need them active during run time).

+ +

For example, if using an ubuntu based container, then you should be able to use the apt package manager to install your program’s dependencies.

+ +
%post
+    apt update -y
+    apt install -y gcc make wget
+
+ +

Note that we have used the -y option for apt to pre-emptively agree to update apt and to install the gcc, make, and wget packages. +Otherwise, the apt command will prompt you to confirm the executions via the command line. +But since the Apptainer build process is executed non-interactively, you will be unable to enter a response via the command line, and the commands will eventually time out and the build fail.

+ +

Once you install the dependencies you need using the operating system’s package manager, you can use those packages to obtain and install your desired program. +For example, the following commands will install the GNU Units command units.

+ +
    mkdir -p /opt/units-source
+    cd /opt/units-source
+    wget https://ftp.gnu.org/gnu/units/units-2.23.tar.gz
+    tar -xzf units-2.23.tar.gz
+    cd units-2.23
+    ./configure
+    make 
+    make install
+
+ +

If using the default installation procedure, your program should be installed in and detectable by the operating system. +If not, you may need to manually environment variables to recognize your program.

+ +

Environment section

+ +

The %environment section can be used to automatically set environment variables when the container is actually started.

+ +

For example, if you installed your program in a custom location /opt/my-program and the binaries are in the bin/ folder, you could use this section to add that location to your PATH environment variable:

+ +
%environment
+    export PATH="/opt/my-program/bin:$PATH"
+
+ +
+

Effectively, this section can be used like a .bashrc or .bash_profile file.

+
+ +

Labels section

+ +

The %labels section can be used to provide custom metadata about the container, which can make it easier for yourself and others to identify the nature and provenance of a container.

+ +

The syntax for this section is

+ +
%labels
+    LabelNameA LabelValueA
+    LableNameB LabelValueB
+
+ +

where LabelName is the name of the label, and LabelValue is the corresponding value. +For example,

+ +
%labels
+    Author Bucky Badger
+    ContactEmail bbadger@wisc.edu
+    Name Bucky's First Container
+
+ +

will generate the metadata in the container showing the Author as Bucky Badger, the ContactEmail as bbadger@wisc.edu, and the container Name as Bucky's First Container.

+ +

For an existing container, you can inspect the metadata with the command apptainer inspect my_container.sif.

+ +
+

For a container with the %labels in the above example, you should see the following output:

+ +
$ apptainer inspect my_container.sif
+
+Author: Bucky Badger
+ContactEmail: bbadger@wisc.edu
+Name: Bucky's First Container
+
+ +

along with some automatically generated labels.

+
+ +

Help section

+ +

The %help section can be used to provide custom help text about how to use the container. +This can make it easier for yourself and others to interact and use the container.

+ +

For example,

+ +
%help
+    This container is based on Ubuntu 22.04 and has the GNU Units command installed. 
+    You can use the command `units` inside this container to convert from one unit of measurement to another.
+    For example,
+        $ units '1 GB' 'MB'
+    returns
+                * 1000
+                / 0.001
+
+ +

For an existing container, you can inspect the help text with the command apptainer run-help my-container.sif.

+ +

The Apptainer Container Image

+ +

The actual container image, which can be executed by Apptainer as a stand-alone operating system, is stored in a .sif file.* +The instructions for constructing the .sif file are provided by the .def definition file, as described above. +Basically, the .sif file is a compression of all of the files in the stand-alone operating system that comprises a “container”. +Apptainer can use this one file to reconstitute the container at runtime.

+ +

* sif stands for “Singularity Image File”; Apptainer is formerly an open-source fork of the original program called Singularity.

+ +

Building the container

+ +

To create the .sif file from the .def file, you need to run the command

+ +
apptainer build my-container.sif my-container.def
+
+ +

Here the syntax is to provide the name of the .sif file that you want to create and then provide the name of the existing .def definition file.

+ +
+

Don’t run the apptainer build command on the login server! +Building the container image can be an intensive process and can consume the resources of the login server.

+ +
    +
  • On the High Throughput system, first start an interactive build job as described in our Use Apptainer Containers guide.
  • +
  • On the High Performance system, first launch an interactive Slurm session as described here.
  • +
+
+ +

Converting a Docker image to an Apptainer container image

+ +

You can directly convert an existing Docker container into an Apptainer container image without having to provide a definition file. +To do so, use the command

+ +
apptainer build my-container.sif docker://user/repo:tag
+
+ +

where user/repo:tag is any valid address to a Docker-based container registry. (For example, rocker/tidyverse:4.1.3 from DockerHub or nvcr.io/nvidia/tensorflow:24.02-tf2-py3 from NVIDIA Container Registry.)

+ +

Testing the container interactively

+ +

After building your container, we strongly recommend that you start it up and check that your program has been installed correctly. +Assuming that you are in an interactive session (i.e., not on the login server), then you can run

+ +
apptainer shell my-container.sif
+
+ +

This command should log you into a terminal that is backed by the container’s operating system.

+ +
+

On the High Throughput system, you can instead submit an interactive job that uses the .sif file as the container_image. +In this case, you do not need to run any apptainer commands, as HTCondor has automatically done so before you load into the interactive session.

+
+ +

Then you can check that the files are in the right place, or that your program can be found. +An easy way to check if your program is at least in recognized by the container is to try to print the help text for the program.

+ +

For example,

+ +
[username@hostname ~]$ apptainer shell units.sif
+Apptainer> units --help
+
+Usage: units [options] ['from-unit' 'to-unit']
+
+<additional output truncated>
+
+ +
+

By default, only your current directory will be mounted into the container, meaning the only files you can see from the host system are those in the directory where you ran the command.

+ +

Furthermore, the interactive container session may inherit environment variables from your terminal session on the host system, which may conflict with the container environment. +In this case, use the -e option to use a “clean” environment for the interactive session: apptainer shell -e my-container.sif.

+
+ +

Special Considerations for Building Your Container

+ + + +
    +
  • +

    Non-interactive

    + +

    Because the container build is a non-interactive process, all commands within the .def file must be able to execute without user intervention.

    +
  • +
  • +

    Be prepared to troubleshoot

    + +

    A consequence of the non-interactive build is that when something goes wrong, the build process will fail without creating a .sif file. +That in turn means that when the build is restarted, it does so from completely from scratch.

    + +

    It is rare to correctly write your .def file such that the container builds successfully on your first try! +Do not be discouraged - examine the build messages to determine what went wrong and use the information to correct your .def file, then try again.

    +
  • +
  • +

    Multi-stage build

    + +

    It is possible to have a multi-stage build. +In this scenario, you have two .def files. +You use the first one to construct an intermediary .sif file, which you can then use as the base for the second .def file. +In the second .def file, you can specify

    + +
    Bootstrap: localimage
    +From: path/to/first.sif
    +
    +
  • +
  • +

    .sif files can be large

    + +

    If you are installing a lot of programs, the final .sif image can be large, on the order of 10s of gigabytes. +Keep that in mind when requesting disk space. +On the High Throughput system, we encourage you to place your container image on the /staging system.

    +
  • +
  • +

    Files cannot be created or modified after the container has been built

    + +

    While you can read and execute any file within the container, you will not be able to create or modify files in the container once it has been built. +The exception is if the location is “mounted” into the container, which means that there is a corresponding location on the host system where the files will be stored. +Even then, you will only be allowed to create/modify files in that location if you would be able to normally without a container.

    + +

    This behavior is intentional as otherwise it would be possible for users to modify files on the host machine’s operating system, which would be a signicant security, operations, and privacy risk.

    +
  • +
  • +

    Manually set a HOME directory

    + +

    Some programs create .cache directories and may attempt to do so in the user’s “HOME” directory. +When executing in a container, however, the user typically does NOT have a “HOME” directory. +In this case, some programs default to creating the directory in the root / directory. +This will not work for reasons in the previous item.

    + +

    One workaround may be to manually set the HOME environment variable after the container has started. +On CHTC systems, the following should address this issue:

    + +
    export HOME=$(pwd)
    +
    + +

    If this does not address the issue, examine the error messages and consult the program documentation for how configure the program to use an alternate location for cache or temporary directories.

    +
  • +
+ +
+
+ + + + +
+ + + + + + +
General Guides
+
+ + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/apptainer-hpc.html b/preview-fall2024-info/uw-research-computing/apptainer-hpc.html new file mode 100644 index 000000000..7c5a84cea --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/apptainer-hpc.html @@ -0,0 +1,639 @@ + + + + + + +Using Apptainer Containers on HPC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Using Apptainer Containers on HPC +

+ +

Introduction

+ +

Similar to Docker containers, Apptainer environments allow users to prepare portable software and computing environments that can be sent to many jobs. +This means your jobs will run in a more consistent environment that is easily reproducible by others.

+ + + +

Create a Definition File

+ +

The definition (.def) file contains the instructions for what software to install while building the container. +CHTC provides example definition files in the software folder of our Recipes GitHub repository. We strongly recommend that you use one of the existing examples as the starting point for creating your own container.

+ +

To create your own container using Apptainer, you will need to create a definition (.def) file. +We encourage you to read our Building an Apptainer Container guide to learn more about the components of the Apptainer definition file.

+ +

Regarding MPI

+ +

We are still in the process of developing guidance for deploying MPI-based software in containers on the High Performance system. +The instructions in this guide should work for single-node jobs. +Multi-node jobs require the MPI installed in the container to integrate with Slurm and/or the cluster installation of MPI, and we are still exploring how to do so.

+ +

Start an Interactive Session

+ +

Building a container can be a computationally intense process. +As such, we require that you only build containers while in an interactive session. +On the High Performance system, you can use the following command to start the interactive session:

+ +
srun --mpi=pmix -n4 -N1 -t 240 -p int --pty bash
+
+ +

Build Your Container

+ +

We recommend working from your /scratch directory when first building your container.

+ +

Once the interactive session starts, set the Apptainer temporary directory:

+ +
export APPTAINER_TMPDIR=/scratch/$USER/apptainer/tmp
+mkdir -p $APPTAINER_TMPDIR
+
+ +

To build a container, Apptainer uses the instructions in the .def file to create a .sif file. The .sif file is the compressed collection of all the files that comprise the container.

+ +

To build your container, run this command:

+ +
apptainer build my-container.sif image.def
+
+ +

Feel free to rename the .sif file as you desire; for the purposes of this guide we are using my-container.sif.

+ +

As the command runs, a variety of information will be printed to the terminal regarding the container build process. +Unless something goes wrong, this information can be safely ignored. +Once the command has finished running, you should see INFO: Build complete: my-container.sif. +Using the ls command, you should now see the container file my-container.sif.

+ +

If the build command fails, examine the output for error messages that may explain why the build was unsuccessful. +Typically there is an issue with a package installation, such as a typo or a missing but required dependency. +Sometimes there will be an error during an earlier package installation that doesn’t immediately cause the container build to fail. +But, when you test the container, you may notice an issue with the package.

+ +

If you are having trouble finding the error message, edit the definition file and remove (or comment out) the installation commands that come after the package in question. +Then rebuild the image, and now the relevant error messages should be near the end of the build output.

+ +

For more information on building Apptainer containers, see our Building an Apptainer Container guide.

+ +

Test Your Container

+ +

Once your container builds successfully, it is important to test it to make sure you have all software, packages, and libraries installed correctly.

+ +

To test your container, use the command

+ +
apptainer shell -e my-container.sif
+
+ +

You should see your command prompt change to Apptainer>.

+ +

The shell command logs you into a terminal “inside” the container, with access to the libraries, packages, and programs that were installed in the container following the instructions in your image.def file. +(The -e option is used to prevent this terminal from trying to use the host system’s programs.)

+ +

While “inside” the container, try to run your program(s) that you installed in the container. +Typically it is easiest to try to print your program’s “help” text, e.g., my-program --help. +If using a programming language such as python3 or R, try to start an interactive code session and load the packages that you installed.

+ +

If you installed your program in a custom location, consider using ls to verify the files are in the right location. +You may need to manually set the PATH environment variable to point to the location of your program’s executable binaries. +For example,

+ +
export PATH=/opt/my-program/bin:$PATH
+
+ +

Consult the “Special Considerations” section of our Building an Apptainer Container guide for additional information on setting up and testing your container.

+ +

When you are finished running commands inside the container, run the command exit to exit the container. +Your prompt should change back to something like [username@spark-a006 directory]$. +If you are satisfied with the container that you built, run the exit command again to exit the interactive Slurm session.

+ +

Use an Apptainer Container in HPC Jobs

+ +

Now that you have the container image saved in the form of the .sif file, you can use it as the environment for running your HPC jobs.

+ +

For execution on a single node, we recommend adding the following commands to your sbatch script:

+ +
export TMPDIR=/scratch/$USER/apptainer_tmp/${SLURM_JOB_ID}
+mkdir -p $TMPDIR
+
+srun apptainer exec -e \
+    --bind /home/$USER \
+    --bind /scratch/$USER \
+    --bind $TMPDIR \
+    my-container.sif my-job-script.sh
+
+rm -rf $TMPDIR
+
+ +

All of the commands that you want to execute using the container should go into my-job-script.sh.

+ +

Then submit your job to Slurm as usual, as described in our Submitting and Managing Jobs Using SLURM guide.

+ +

On multiple nodes

+ +

We are still in the early stages of deploying containers on the High Performance system. +A complicating factor is the construction of the .def file to deploy MPI on the system to allow for execution across multiple nodes. +If you are interested in mutli-node execution using containers, contact a facilitator for more information.

+ +
+
+ + + + +
+ + + + + + +
HPC Guides
+
+ + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + + + + + + + + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/apptainer-htc-advanced-example.html b/preview-fall2024-info/uw-research-computing/apptainer-htc-advanced-example.html new file mode 100644 index 000000000..05ec67922 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/apptainer-htc-advanced-example.html @@ -0,0 +1,902 @@ + + + + + + +Advanced Apptainer Example - SUMO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Advanced Apptainer Example - SUMO +

+ +

Sometimes the program you want to use does not have a pre-existing container that you can build on top of. +Then you will need to install the program and its dependencies inside of the container. +In this example, we will show how to install the program SUMO in a container, as an illustration of how to build a container more-or-less from scratch.

+ + + +

1. Choose the Base Container Image

+ +

First, you will need to choose a base image for the container. +Consult the documentation for the program you want to install to make sure you select a compatible operating system.

+ +

For this example, we will use the most recent LTS version of Ubuntu from Docker. +The beginning of the image.def file should look like this:

+ +
Bootstrap: docker
+From: ubuntu:22.04
+
+ +

2. Add the Installation Commands

+ +

All of the installation commands that you want Apptainer to execute during the container build step are provided in the %post section of the definition file.

+ +

Setting up non-interactive installation

+ +

First, you may need to instruct programs that you are executing commands in a non-interactive environment. +There can be issues with installing packages in a container that would not normally occur when installing manually in the terminal.

+ +

On the HTC system in particular, the /tmp directory inside of the container needs to be given global read/write permissions. +This can be done by adding the following line at the start of the %post section:

+ +
   chmod 777 /tmp
+
+ +

Similarly, some packages require that the user answer interactive prompts for selecting various options. +Since the Apptainer build is non-interactive, this can cause the package installation to hang. +While this isn’t an issue in the present example, the issue can be avoided by adding the following line near the start of the %post section:

+ +
   DEBIAN_FRONTEND=noninteractive
+
+ +

Note that this particular command only applies to Debian-based container images, such as Ubuntu.

+ +

The image.def so far looks like this:

+ +
Bootstrap: docker
+From: ubuntu:22.04
+
+%post
+    chmod 777 /tmp
+    DEBIAN_FRONTEND=noninteractive
+
+ +

Install dependencies

+ +

First, you should install the dependencies that your program requires.

+ +

Following the program’s instructions, we can do install the dependencies with the following set of commands.

+ +
apt-get update -y
+apt-get install -y \
+    git \
+    cmake \
+    python3 \
+    g++ \
+    libxerces-c-dev \
+    libfox-1.6-dev \
+    libgdal-dev \
+    libproj-dev \
+    libgl2ps-dev \
+    python3-dev \
+    swig \
+    default-jdk \
+    maven \
+    libeigen3-dev
+
+ +

Note that we are using the built-in package manager (apt) of Ubuntu, since that is the base operating system we chose to build on top of. +If you choose a different operating system, you may need to use a different package manager.

+ +

In this case, the first command is apt-get update which will update the list of available packages. +This is necessary to get the latest versions of the packages in the following apt-get install command.

+ +

The apt-get install command will install the dependencies required by the SUMO program.

+ +
+

Note that these installation commands do not use sudo, as Apptainer already has permissions to install programs in the container.

+
+ +

The image.def file now looks like this:

+ +
Bootstrap: docker
+From: ubuntu:22.04
+
+%post
+    chmod 777 /tmp
+    DEBIAN_FRONTEND=noninteractive
+
+    apt-get update -y
+    apt-get install -y \
+        git \
+        cmake \
+        python3 \
+        g++ \
+        libxerces-c-dev \
+        libfox-1.6-dev \
+        libgdal-dev \
+        libproj-dev \
+        libgl2ps-dev \
+        python3-dev \
+        swig \
+        default-jdk \
+        maven \
+        libeigen3-dev
+
+ +

Compile the program

+ +

After installing the dependencies, you can provide the commands for actually compiling your program.

+ +

We now add the commands for compiling the SUMO program itself:

+ +
    git clone --recursive https://github.com/eclipse/sumo
+    export SUMO_HOME=/sumo
+    mkdir sumo/build/cmake-build && cd sumo/build/cmake-build
+    cmake ../..
+    make
+
+ +

The %post section is now complete and will install SUMO and its dependencies in the container at build time.

+ +

The image.def file now looks like this:

+ +
Bootstrap: docker
+From: ubuntu:22.04
+
+%post
+    chmod 777 /tmp
+    DEBIAN_FRONTEND=noninteractive
+
+    apt-get update -y
+    apt-get install -y \
+        git \
+        cmake \
+        python3 \
+        g++ \
+        libxerces-c-dev \
+        libfox-1.6-dev \
+        libgdal-dev \
+        libproj-dev \
+        libgl2ps-dev \
+        python3-dev \
+        swig \
+        default-jdk \
+        maven \
+        libeigen3-dev
+    
+    git clone --recursive https://github.com/eclipse/sumo
+    export SUMO_HOME=/sumo
+    mkdir sumo/build/cmake-build && cd sumo/build/cmake-build
+    cmake ../..
+    make
+
+ +

3. Add Environment Variables

+ +

While the %post section now contains all of the instructions for installing and compiling your desired program, +you likely need to add commands for setting up the environment so that the shell recognizes your program. +This is typically the case if your program compiled successfully but you still get a “command not found” error when you try to execute it.

+ +

To set environment variables automatically when your container runs, you need to add them to the %environment section before you build the container.

+ +

For example, in the %post section there is the command export SUMO_HOME=/sumo, which sets the environment variable SUMO_HOME to the location of the sumo directory. +This environment variable, however, is only active during the installation phase of the container build, and will not be set when the container is actually run. +Thus, we need to set SUMO_HOME and update PATH with the location of the SUMO bin folder by using the %environment section.

+ +

We therefore add the following lines to the image.def file:

+ +
%environment
+    export SUMO_HOME=/sumo
+    export PATH=/sumo/bin:$PATH
+
+ +

These environment variables will be set when the container starts, so that the sumo command is automatically found when we try to execute it.

+ +

Summary

+ +

The full image.def file for this advanced example is now:

+ +
Bootstrap: docker
+From: ubuntu:22.04
+
+%post
+    chmod 777 /tmp
+    DEBIAN_FRONTEND=noninteractive
+
+    apt-get update -y
+    apt-get install -y \
+        git \
+        cmake \
+        python3 \
+        g++ \
+        libxerces-c-dev \
+        libfox-1.6-dev \
+        libgdal-dev \
+        libproj-dev \
+        libgl2ps-dev \
+        python3-dev \
+        swig \
+        default-jdk \
+        maven \
+        libeigen3-dev
+
+    git clone --recursive https://github.com/eclipse/sumo
+    export SUMO_HOME=/sumo
+    mkdir sumo/build/cmake-build && cd sumo/build/cmake-build
+    cmake ../..
+    make
+
+%environment
+    export SUMO_HOME=/sumo
+    export PATH=/sumo/bin:$PATH
+
+ +

We can now build the container using this definition file.

+ +
    +
  • For more information on the components of an Apptainer definition (.def) file and container image file (.sif), see our Building an Apptainer Container guide.
  • +
  • For information on building and using the container on the HTC system, see our Use Apptainer Containers guide.
  • +
+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/apptainer-htc.html b/preview-fall2024-info/uw-research-computing/apptainer-htc.html new file mode 100644 index 000000000..f9c6f6520 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/apptainer-htc.html @@ -0,0 +1,1062 @@ + + + + + + +Use Apptainer Containers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Use Apptainer Containers +

+ +

Introduction

+ +

HTCondor supports the use of Apptainer (formerly known as Singularity) environments for jobs on the High Throughput Computing system.

+ +

Similar to Docker containers, Apptainer environments allow users to prepare portable software and computing environments that can be sent to many jobs. +This means your jobs will run in a more consistent environment that is easily reproducible by others.

+ +

Container jobs are able to take advantage of more of CHTC’s High Throughput resources because the operating system where the job is running does not need to match the operating system where the container was built.

+ + + +

Quickstart

+ +

Use an existing container

+ +

If you or a group member have already created the Apptainer .sif file, or are using a container from reputable sources such as the OSG, follow these steps to use it in an HTCondor job.

+ +

1. Add the container .sif file to your submit file

+ +

If the .sif file is in a /home directory:

+ +
container_image = path/to/my-container.sif
+
+ +

If the .sif file is in a /staging directory:

+ +
container_image = file:///staging/path/to/my-container.sif
+
+ +

If the .sif file is in a /staging directory AND you are using +WantFlocking or +WantGliding:

+ +
container_image = osdf:///chtc/staging/path/to/my-container.sif
+
+ +

Jump to more information

+ +

2. Test your container job

+ +

As always with the High Throughput system, submit a single test job and confirm that your job behaves as expected. +If there are issues with the job, you may need to modify your executable, or even (re)build your own container.

+ +

Build your own container

+ +

If you need to create your own container for the software you want to use, follow these steps. +For more information on any particular step, jump to the corresponding section later in this guide.

+ + + +

1. Create a definition file

+ +

The definition (.def) file contains the instructions for what software to install while building the container. +CHTC provides example definition files in the software folder of our Recipes GitHub repository. Choose from one of the existing examples, or create your own using the instructions later in this guide.

+ +

Jump to more information

+ +

2. Start an interactive build job

+ +

Start an interactive build job (an example submit file build.sub is provided below). +Be sure to include your .def file in the transfer_input_files line, or else create the file once the interactive job starts using a command line editor.

+ +

Then submit the interactive build job with

+ +
condor_submit -i build.sub
+
+ +

Jump to more information

+ +

3. Build your container

+ +

While in an interactive build job, run the command

+ +
apptainer build my-container.sif image.def
+
+ +

If the container build finishes successfully, then the container image (.sif) file is created. +This file is used for actually executing the container.

+ +

Jump to more information

+ +

4. Test your container

+ +

While still in the interactive build job, run the command

+ +
apptainer shell -e my-container.sif
+
+ +

This command will start the container and log you into it, allowing you to test your software commands.

+ +

Once you are done testing the container, enter

+ +
exit
+
+ +

once to exit the container.

+ +

Jump to more information

+ +

5. Move the container .sif file to staging

+ +

Once you are satisfied that your container is built correctly, copy your .sif file to your staging directory.

+ +
mv my-container.sif /staging/$USER
+
+ +

Once the file has transferred, exit the interactive job with

+ +
exit
+
+ +

Jump to more information

+ +

Once you’ve built the container, use the instructions above to use the container in your HTCondor job.

+ +

Create a Definition File

+ +

To create your own container using Apptainer, you will need to create a definition (.def) file. +For the purposes of this guide, we will call the definition file image.def.

+ +

CHTC provides example definition files in the software folder of our Recipes GitHub repository. We strongly recommend that you use one of the existing examples as the starting point for creating your own container.

+ +

If the software you want to use is not in the CHTC Recipes repository, you can create your own container. Here is general process for creating your own definition file for building your custom container:

+ +
    +
  1. +

    Consult your software’s documentation

    + +

    Determine the requirements for installing the software you want to use. +In particular you are looking for (a) the operating systems it is compatible with and (b) the prerequisite libraries or packages.

    +
  2. +
  3. +

    Choose a base container

    + +

    The base container should at minimum use an operating system compatible with your software. +Ideally the container you choose also has many of the prerequisite libraries/programs already installed.

    +
  4. +
  5. +

    Create your own definition file

    + +

    The definition file contains the installation commands needed to set up your software. +We encourage you to read our Building an Apptainer Container guide to learn more about the components of the Apptainer definition file. +An advanced example of a definition file is provided in our Advanced Apptainer Example - SUMO guide.

    +
  6. +
+ +

A simple definition file

+ +

As a simple example, here is the .def file that uses an existing container with python installed inside (python:3.11, from DockerHub), +and furthermore installs the desired packages cowsay and tqdm:

+ +
Bootstrap: docker
+From: python:3.11
+
+%post
+    python3 -m pip install cowsay tqdm
+
+ +

Remember that the .def file contains the instructions for creating your container and is not itself the container. +To use the software defined within the .def file, you will need to first “build” the container and create the .sif file, as described in the following sections.

+ +

Jump back to Quickstart

+ +

Start an Interactive Build Job

+ +

Building a container can be a computationally intense process. +As such, we require that you only build containers while in an interactive build job. +On the High Throughput system, you can use the following submit file build.sub:

+ +

+
# build.sub
+# For building an Apptainer container
+
+universe = vanilla
+log = build.log
+
+# In the latest version of HTCondor on CHTC, interactive jobs require an executable.
+# If you do not have an existing executable, use a generic linux command like hostname as shown below.
+executable = /usr/bin/hostname
+
+# If you have additional files in your /home directory that are required for your container, add them to the transfer_input_files line as a comma-separated list.
+transfer_input_files = image.def
+
+requirements = (HasCHTCStaging == true)
+
++IsBuildJob = true
+request_cpus = 4
+request_memory = 16GB
+request_disk = 16GB
+
+queue
+
+ +

Note that this submit file assumes you have a definition file named image.def in the same directory as the submit file.

+ +

Once you’ve created the submit file, you can submit an interactive job with the command

+ +
condor_submit -i build.sub
+
+ +
+

Apptainer .sif files can be fairly large, especially if you have a complex software stack. +If your interactive job abruptly fails during the build step, you may need to increase the value of request_disk in your submit file. +In this case, the .log file should have a message about the reason the interactive job was interrupted.

+
+ +

Jump back to Quickstart

+ +

Build Your Container

+ +

Once the interactive build job starts, confirm that your image.def was transferred to the current directory.

+ +

To build a container, Apptainer uses the instructions in the .def file to create a .sif file. The .sif file is the compressed collection of all the files that comprise the container.

+ +

To build your container, run this command:

+ +
apptainer build my-container.sif image.def
+
+ +

Feel free to rename the .sif file as you desire; for the purposes of this guide we are using my-container.sif.

+ +

As the command runs, a variety of information will be printed to the terminal regarding the container build process. +Unless something goes wrong, this information can be safely ignored. +Once the command has finished running, you should see INFO: Build complete: my-container.sif. +Using the ls command, you should now see the container file my-container.sif.

+ +

If the build command fails, examine the output for error messages that may explain why the build was unsuccessful. +Typically there is an issue with a package installation, such as a typo or a missing but required dependency. +Sometimes there will be an error during an earlier package installation that doesn’t immediately cause the container build to fail. +But, when you test the container, you may notice an issue with the package.

+ +

If you are having trouble finding the error message, edit the definition file and remove (or comment out) the installation commands that come after the package in question. +Then rebuild the image, and now the relevant error messages should be near the end of the build output.

+ +

Once the image is built, it is important to test it to make sure you have all software, packages, and libraries installed correctly.

+ +

For more information on building Apptainer containers, see our Building an Apptainer Container guide.

+ +

Jump back to Quickstart

+ +

Test Your Container

+ +

Once your container builds successfully, we highly encourage you to immediately test the container while still in the interactive build session.

+ +

To test your container, use the command

+ +
apptainer shell -e my-container.sif
+
+ +

You should see your command prompt change to Apptainer>.

+ +

The shell command logs you into a terminal “inside” the container, with access to the libraries, packages, and programs that were installed in the container following the instructions in your image.def file. +(The -e option is used to prevent this terminal from trying to use the host system’s programs.)

+ +

While “inside” the container, try to run your program(s) that you installed in the container. +Typically it is easiest to try to print your program’s “help” text, e.g., my-program --help. +If using a programming language such as python3 or R, try to start an interactive code session and load the packages that you installed.

+ +

If you installed your program in a custom location, consider using ls to verify the files are in the right location. +You may need to manually set the PATH environment variable to point to the location of your program’s executable binaries. +For example,

+ +
export PATH=/opt/my-program/bin:$PATH
+
+ +

Consult the “Special Considerations” section of our Building an Apptainer Container guide for additional information on setting up and testing your container.

+ +

When you are finished running commands inside the container, run the command exit to exit the container. +Your prompt should change back to something like [username@build4000 ~]$.

+ +

Jump back to Quickstart

+ +

Move the Container .sif File to Staging

+ +

Since Apptainer .sif files are routinely more than 1GB in size, we recommend that you transfer my-container.sif to your /staging directory. +It is usually easiest to move the container file directly to staging while still in the interactive build job:

+ +
mv my-container.sif /staging/$USER
+
+ +

If you do not have a /staging directory, you can skip this step and the .sif file will be automatically transferred back to the login server when you exit the interactive job. +We encourage you to request a /staging directory, especially if you plan on running many jobs using this container. +See our Managing Large Data in Jobs guide for more information on using staging.

+ +

Jump back to Quickstart

+ +

Use an Apptainer Container in HTC Jobs

+ +

Now that you have the container image saved in the form of the .sif file, you can use it as the environment for running your HTCondor jobs. +In your submit file, specify the image file using the container_image command. +HTCondor will automatically transfer the .sif file and automatically execute your executable file inside of the container; you do not need to include any apptainer commands in your executable file.

+ +

If the .sif file is located on the login server, you can use

+ +
container_image = my-container.sif
+
+ +

although we generally don’t recommend this, since .sif files are large and should instead be located in staging.

+ +

Therefore, we recommend using

+ +
container_image = file:///staging/path/to/my-container.sif
+
+ +

The full submit file otherwise looks like normal, for example:

+ +
# apptainer.sub
+
+# Provide HTCondor with the name of your .sif file and universe information
+container_image = file:///staging/path/to/my-container.sif
+
+executable = myExecutable.sh
+
+# Include other files that need to be transferred here.
+# transfer_input_files = other_job_files
+
+log = job.log
+error = job.err
+output = job.out
+
+requirements = (HasCHTCStaging == true)
+
+# Make sure you request enough disk for the container image in addition to your other input files
+request_cpus = 1
+request_memory = 4GB
+request_disk = 10GB      
+
+queue
+
+ +

Then use condor_submit with the name of your submit file:

+ +
condor_submit apptainer.sub
+
+ +

If you are using +WantFlocking or +WantGliding as described in our Scale Beyond Local HTC Capacity guide, then you should instead use

+ +
container_image = osdf:///chtc/staging/path/to/my-container.sif
+
+ +

to enable transferring of the .sif file via the OSDF to compute capacity beyond CHTC.

+ +

Jump back to Quickstart

+ +
+

From the user’s perspective, a container job is practically identical to a regular job. +The main difference is that instead of running on the execute point’s default operation system, the job is run inside the container.

+ +

When you submit a job to HTCondor using a submit file with container_image set, HTCondor automatically handles the process of obtaining and running the container. +The process looks roughly like

+ +
    +
  • Claim machine that satisifies submit file requirements
  • +
  • Pull (or transfer) the container image
  • +
  • Transfer input files, executable to working directory
  • +
  • Run the executable script inside the container, as the submit user, with key directories mounted inside (such as the working directory, /staging directories, etc.)
  • +
  • Transfer output files back to the submit server
  • +
+ +

For testing purposes, you can replicate the behavior of a container job with the following command. +First, start an interactive job. +Then run this command but change my-container.sif and myExecutable.sh to the names of the .sif and .sh files that you are using:

+ +
apptainer exec \
+        --scratch /tmp \
+        --scratch /var/tmp \
+        --workdir $(pwd) \
+        --pwd $(pwd) \
+        --bind $(pwd) \
+        --no-home \
+        --containall \
+        my-container.sif \
+        /bin/bash myExecutable.sh 1> job.out 2> job.err
+
+ +
+ + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/archived/hpc-transition-2023.html b/preview-fall2024-info/uw-research-computing/archived/hpc-transition-2023.html new file mode 100644 index 000000000..fd65fc5e1 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/archived/hpc-transition-2023.html @@ -0,0 +1,504 @@ + + + + + + +Transitioning to a New HPC Cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Transitioning to a New HPC Cluster +

+ +

The Center for High Throughput Computing’s High Performance Cluster is being +replaced by a new High Performance System! All components of the system (execute +nodes, network, shared file system) are new and we expect an improved experience for our HPC users.

+ +

ALL USERS OF THE EXISTING HPC SYSTEM WILL NEED TO MIGRATE TO THIS NEW CLUSTER. +Importantly, access to the existing cluster will be phased out in early 2022. +CHTC staff are here to assist with your transition.

+ +

Highlights

+ +
    +
  • The existing HPC cluster is being replaced by a new cluster. After February 2023 +ALL users will lose access to the existing cluster, and all user files will be +deleted.
  • +
  • Custom software will need to be reinstalled and jobs will need to be tested on +the new cluster.
  • +
  • The univ2 partition is being renamed, and partition policies have changed.
  • +
  • Users should avoid using mpirun and instead should use srun to execute their +MPI code. +
    +

    Note: At this time, interactive jobs on the “Spark” HPC Cluster can not be +used to run MPI code.

    +
    +
  • +
  • File system conventions have changed - jobs will now use /scratch/$USER to run, +and /home/$USER will be mainly used for software installations and reference +files.
  • +
+ +

Important Dates

+ +
    +
  • Mid January 2023: New cluster available for general use
  • +
  • February 28, 2023: Jobs will no longer run on the old cluster
  • +
  • March 15, 2023: Access to hpclogin1.chtc.wisc.edu login node and old file +system removed, Data for all users will be deleted on the old HPC system.
  • +
+ +

What You Need to Do

+ +

Move Data

+ +

Back up files from the old cluster to another system (e.g. your laptop), copy +files you are actively working with to the new cluster, and delete all data off +the old HPC system. All files in /home and /software will be deleted off the +existing system starting March 15, 2023.

+ +

Log In and Quota Check

+ +

Confirm you can access the new cluster by logging into the new login node.

+ +

Prepare and Submit Test Jobs

+ +

After logging in, prepare and submit a few test jobs to confirm that your work +will run, paying attention to these important changes:

+ +
    +
  1. Appropriate usage of /home and /scratch: +
      +
    • Jobs should be run out of /scratch/$USER. Your scratch directory has a quota of 100GB disk space and 250,000 items
    • +
    • Only use your /home directory for software installations and general job files and templates. +Your /home directory has a quota of 20GB disk space and 250,000 items.
    • +
    • The /software directory is being phased out.
    • +
    +
  2. +
  3. +

    Build software with new modules: users will need to reinstall and/or rebuild +their software on the new HPC cluster. Users may encounter different versions of +common tools on the new cluster, so it is important to try installing your +software early to ensure compatibility. If a software or library is not available +that is necessary for your installation is not installed, contact CHTC staff (see +our get help page).

    +
  4. +
  5. +

    Change MPI execution: Our systems administrators now recommend using srun +with the --mpi=pmix flag instead of mpirun or mpiexec to execute MPI type code. It +should look like this: + srun –mpi=pmix mpi_program

    +
  6. +
  7. Change #SBATCH options: The new cluster has different partition names and +different sized nodes. The main general use partition is now called shared +instead of univ2 or univ3. We also recommend the following changes because +most of our nodes now have 128 cores, so requesting multiple nodes is not +advantageous if your jobs are smaller than 128 cores. We also now recommend requesting +memory per core instead of memory per node, for similar reasons, using the --mem-per-cpu +flag with units of MB. Here are our recommendations for different sized jobs:
  8. +
+ + + + + + + + + + + + + + + + + + +
Job sizeRecommended #SBATCH flags
32-128 coresExample for 32 cores:
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=32 # recommend multiples of 16
+#SBATCH --mem-per-cpu=4000
96 - 256 coresSplit over a few nodes, for example for 160 cores:
+#SBATCH --nodes=2
+#SBATCH --ntasks-per-node=80 # designate cores per node
+#SBATCH --mem-per-cpu=4000
+ OR:
+#SBATCH --nodes=2
+#SBATCH --ntasks=160 # designate overall cores
+#SBATCH --mem-per-cpu=4000
128 or 256 cores (whole nodes)Example for 256 cores:
+#SBATCH --nodes=2
+#SBATCH --ntasks-per-node=128
+#SBATCH --mem-per-cpu=4000
+ +

Optimizing Jobs (Optional)

+ +

The new cluster nodes have very fast local disk space on each node. If your code +is able to use local space for certain parts of its calculations or is able to +sync data between local spaces, it may be advantageous to use this disk to speed +up your jobs. It is located at the following path on each node:

+ +
/local/$USER
+
+ +

New Cluster Specifications

+ +

Execute Nodes

+ +

We have 40 general use execute nodes, representing 5,120 cores of capacity. +Server specs (Dell Poweredge R6525):

+
    +
  • 128 cores using the AMD Epyc 7763 processor
  • +
  • 512GB of memory
  • +
  • 1.5TB of local (not shared) fast NVME disk
  • +
+ +

Operating System: CentOS Stream 8

+ +

Scheduler: SLURM 22.05.6

+ + +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/archived/software-overview-htc.html b/preview-fall2024-info/uw-research-computing/archived/software-overview-htc.html new file mode 100644 index 000000000..35da210da --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/archived/software-overview-htc.html @@ -0,0 +1,66 @@ + + +

+While our High Throughput system has little in the way of pre-installed software, +we've created resources to help users set up the software they want to use for running their jobs. +

+ +{% capture content %} + +

Quickstart

+

CHTC Recipes Repository

+

Containers

+ +{% endcapture %} +{% include /components/directory.html title="Table of Contents" %} + +

Quickstart

+ +

+Click the button that corresponds to the language/program/software that you want to use. +More information is provided in the Recipes repository and Containers sections. +

+ + + + +
+
Home tab content
+
Profile tab content
+
Contact tab content
+
+ +

CHTC Recipes Repository

+ +

+CHTC provides examples for software and workflows for use on our systems in our "Recipes" repository on Github: +https://github.com/CHTC/recipes. +

+ +

Containers

+ +

+Many of the recipes in our Recipes repository involve building your own container. +In this section, we provide a brief introduction into how to use containers for setting up your own software to run on the High Throughput system. +

+ + \ No newline at end of file diff --git a/preview-fall2024-info/uw-research-computing/archived/tensorflow-singularity-wait.html b/preview-fall2024-info/uw-research-computing/archived/tensorflow-singularity-wait.html new file mode 100644 index 000000000..f36553d2a --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/archived/tensorflow-singularity-wait.html @@ -0,0 +1,483 @@ + + + + + + +Running Tensorflow Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Running Tensorflow Jobs +

+ +

This guide describes how to use a pre-built Tensorflow environment +(implemented as a Singularity container) +to run Tensorflow jobs in CHTC and on the OS Pool.

+ +

Overview

+ +

Typically, software in CHTC jobs is installed or compiled locally by +individual users and then brought along to each job, either using the +default file transfer or our SQUID web server. However, another option +is to use a container system, where the software is installed in a +container image. CHTC (and the OS Pool) have capabilities to access and +start containers and run jobs inside them. One container option +available in CHTC is Docker; another is +Singularity.

+ +

In CHTC, our Singularity support consists of running jobs inside a +pre-made Singularity container with an installation of Tensorflow. This +Singularity set up is very flexible: it is accessible both in CHTC and +on the OS Pool, and can be used to run Tensorflow either with +CPUs or GPUs. This guide starts with a basic CPU example, but then goes +on to describe how to use the Singularity Tensorflow container for GPUs, +and also how to run on the OS Pool.

+ +
    +
  1. Basic Tensorflow Job Template
  2. +
  3. Using Tensorflow on GPUs
  4. +
  5. Using Tensorflow on the OS Pool
  6. +
+ +

+ +

1. Basic Tensorflow Job Template

+ +

The submit file for jobs that use the Tensorflow singularity container +will look similar to other CHTC jobs, except for the additional +Singularity options seen below.

+ +

Submit File

+ +
# Typical submit file options
+universe = vanilla
+log = $(Cluster).$(Process).log
+error = $(Cluster).$(Process).err
+output = $(Cluster).$(Process).out
+
+# Fill in with your own script, arguments and input files
+# Note that you don't need to transfer any software
+executable = run_tensorflow.sh
+arguments =
+transfer_input_files = 
+
+# Singularity settings
++SingularityImage = "/cvmfs/singularity.opensciencegrid.org/opensciencegrid/tensorflow:latest"
+Requirements = HAS_SINGULARITY == True
+
+# Resource requirements
+request_cpus = 1
+request_memory = 2GB
+request_disk = 4GB
+
+# Number of jobs
+queue 1
+
+ +

Sample Executable (Wrapper Script)

+ +

Your job will be running inside a container that has Tensorflow +installed, so there should be no need to set any environment variables.

+ +
#!/bin/bash
+
+# your own code here 
+python test.py
+
+ +

+ +

2. CPUs vs GPUs

+ +

The submit file above use a CPU-enabled version of Tensorflow. In order +to take advantage of GPUs, make the following changes to the submit file +above:

+ +
    +
  • +

    Request GPUs in addition to CPUs:

    + +
    request_gpus = 1
    +
    +
  • +
  • +

    Change the Singularity image to tensorflow with GPUs:

    + +
    +SingularityImage = "/cvmfs/singularity.opensciencegrid.org/opensciencegrid/tensorflow-gpu:latest"
    +
    +
  • +
  • +

    Add a GPU card requirement to the requirements line:

    + +
    requirements = HAS_SINGULARITY == True && CUDACapability >= 3
    +
    +
  • +
+ +

For more information about GPUs and how GPU jobs work in CHTC, see our +GPU Jobs guide.

+ +
+

Limited GPU availablity in CHTC
+This Singularity/Tensorflow functionality is not yet available on +CHTC's newer GPUs with a sufficiently high CUDA Capability. +Therefore, for now, the best way to use this Singularity/Tensorflow +environment with GPUs is by running jobs on the OS Pool (see +below). We are working on having Singularity support on all CHTC GPUs +soon.

+
+ +

+ +

3. Running on OS Pool

+ +

This Tensorflow environment can also be run on the OS Pool either as the CPU or GPU version.

+ +

For more details on accessing the OS Pool, see our guide for running +outside CHTC, sections 3 and 4.

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/campus_map.html b/preview-fall2024-info/uw-research-computing/campus_map.html new file mode 100644 index 000000000..8c2f8d5f0 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/campus_map.html @@ -0,0 +1,2154 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + \ No newline at end of file diff --git a/preview-fall2024-info/uw-research-computing/check-quota.html b/preview-fall2024-info/uw-research-computing/check-quota.html new file mode 100644 index 000000000..d8d12204a --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/check-quota.html @@ -0,0 +1,724 @@ + + + + + + +Check Disk Quota and Usage + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Check Disk Quota and Usage +

+ +

The following commands will allow you to monitor the amount of disk +space you are using in your home directory on our (or another) submit node and to determine the +amount of disk space you have been allotted (your quota).

+ +

If you also have a /staging directory on the HTC system, see our +staging guide for +details on how to check your quota and usage. +
+The default quota allotment on CHTC submit nodes is 20 GB with a hard +limit of 30 GB (at which point you cannot write more files).
+
+Note: The CHTC submit nodes are not backed up, so you will want to +copy completed jobs to a secure location as soon as a batch completes, +and then delete them on the submit node in order to make room for future +jobs. If you need more disk space to run a single batch or concurrent +batches of jobs, please contact us (Get Help!). We have multiple ways of dealing with large disk space +requirements to make things easier for you.

+ +

If you wish to change your quotas, please see Request a Quota Change.

+ +

1. Checking Your User Quota and Usage

+ +

From any directory location within your home directory, type +quota -vs. See the example below:

+ +
[alice@submit]$ quota -vs
+Disk quotas for user alice (uid 20384): 
+     Filesystem   space   quota   limit   grace   files   quota   limit   grace
+      /dev/sdb1  12690M  20480M  30720M            161k       0       0        
+
+ +

The output will list your total data usage under blocks, your soft +quota, and your hard limit at which point your jobs will no longer +be allowed to save data. Each of the values given are in 1-kilobyte +blocks, so you can divide each number by 1024 to get megabytes (MB), and +again for gigabytes (GB). (It also lists information for ` files`, but +we don't typically allocate disk space by file count.)

+ +

2. Checking the Size of Directories and Contents

+ +

Move to the directory you'd like to check and type du . After several +moments (longer if you're directory contents are large), the command +will add up the sizes of directory contents and output the total size of +each contained directory in units of kilobytes with the total size of +that directory listed last. See the example below:

+ +
[alice@submit]$ du ./
+4096    ./dir/subdir/file.txt
+4096    ./dir/subdir
+7140    ./dir
+74688   .
+
+ +

As for quota usage above, you can divide each value by 1024 to get +megabytes, and again for gigabytes.

+ +

Using du with the -h or --human-readable flags will display the +same values with only two significant digits and a K, M, or G to denote +the byte units. The -s or --summarize flags will total up the size +of the current directory without listing the size of directory contents +. You can also specify which directory you'd like to query, without +moving to it, by adding the relative filepath after the flags. See the +below example from the home directory which contains the directory +dir:

+ +
[alice@submit]$ du -sh dir
+7.1K    dir
+
+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/checkpointing.html b/preview-fall2024-info/uw-research-computing/checkpointing.html new file mode 100644 index 000000000..266c49b76 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/checkpointing.html @@ -0,0 +1,795 @@ + + + + + + +Checkpointing Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Checkpointing Jobs +

+ +

What is Checkpointing?

+ +

Checkpointing is a technique that provides fault tolerance for a user’s analysis. It consists of saving snapshots of a job’s progress so the job can be restarted without losing its progress and having to restart from the beginning. We highly encourage checkpointing as a solution for jobs that will exceed the 72 hour maximum default runtime on the HTC system.

+ +

This section is about jobs capable of periodically saving checkpoint information, and how to make HTCondor store that information safely, in case it’s needed to continue the job on another machine or at a later time.

+ +

There are two types of checkpointing: exit driven and eviction driven. In a vast majority of cases, exit driven checkpointing is preferred over eviction driven checkpointing. Therefore, this guide will focus on how to utilize exit driven checkpointing for your analysis.

+ +

Note that not all software, programs, or code are capable of creating checkpoint files and knowing how to resume from them. Consult the manual for your software or program to determine if it supports checkpointing features. Some manuals will refer this ability as “checkpoint” features, as the ability to “resume” mid-analysis if a job is interrupted, or as “checkpoint/restart” capabilities. Contact a Research Computing Facilitator if you would like help determining if your software, program, or code is able to checkpoint.

+ +

Why Checkpoint?

+ +

Checkpointing allows a job to automatically resume from approximately where it left off instead of having to start over if interrupted. This behavior is advantageous for jobs limited by a maximum runtime policy (72 hours on the HTC system). It is also advantageous for jobs submitted to backfill resources with no runtime guarantee (e.g. for +WantFlocking or +WantGliding jobs) where the compute resources may also be more prone to hardware or networking failures.

+ +

For example, checkpointing jobs that are limited by a runtime policy can enable HTCondor to exit a job and automatically requeue it to avoid hitting the maximum runtime limit. By using checkpointing, jobs circumvent hitting the maximum runtime limit and can run for extended periods of time until the completion of the analysis. This behavior avoids costly setbacks that may be caused by losing results mid-way through an analysis due to hitting a runtime limit.

+ +

Process of Exit Driven Checkpointing

+ +

Using exit driven checkpointing, a job is specified to time out after a user-specified amount of time with an exit code value of 85 (more on this below). Upon hitting this time limit, HTCondor transfers any checkpoint files listed in the submit file attribute transfer_checkpoint_files to a directory called /spool. This directory acts as a storage location for these files in case the job is interrupted. HTCondor then knows that jobs with exit code 85 should be automatically requeued, and will transfer the checkpoint files in /spool to your job’s working directory prior to restarting your executable.

+ +

The process of exit driven checkpointing relies heavily on the use of exit codes to determine the next appropriate steps for HTCondor to take with a job. In general, exit codes are used to report system responses, such as when an analysis is running, encountered an error, or successfully completes. HTCondor recognizes exit code 85 as checkpointing jobs and therefore will know to handle these jobs differently than non-checkpoiting jobs.

+ +

Requirements for Exit Driven Checkpointing

+ +

Requirements for your code or software:

+ +
    +
  • Checkpoint: The software, program, or code you are using must be able to generate checkpoint files (i.e. snapshots of the progress made thus far) and know how to resume from them.
  • +
  • Resume: This means your code must be able to recognize checkpoint files and know to resume from them instead of the original input data when the code is restarted.
  • +
  • Exit: Jobs should exit with an exit code value of 85 after successfully creating checkpoint files. Additionally, jobs need to be able to exit with a non-85 value if they encounter an error or write the writing the final outputs.
  • +
+ +

In some cases, these requirements can be achieved by using a wrapper script. This means that your executable may be a script, rather than the code that is writing the checkpoint. An example wrapper script that enables some of these behaviors is below.

+ +

Contact a Research Computing Facilitator for help determining if your job is capable of using checkpointing.

+ +

Changes to the Submit File

+ +

Several modifications to the submit file are needed to enable HTCondor’s checkpointing feature.

+ +
    +
  • The line checkpoint_exit_code = 85 must be added. HTCondor recognizes code 85 as a checkpoint job. This means HTCondor knows to end a job with this code but to then to requeue it repeatedly until the analysis completes.
  • +
  • The value of when_to_transfer_output should be set to ON_EXIT.
  • +
  • The name of the checkpoint files or directories to be transferred to /spool should be specified using transfer_checkpoint_files.
  • +
+ +

Optional +In some cases, it is necessary to write a wrapper script to tell a job when to timeout and exit. In cases such as this, the executable will need to be changed to the name of that wrapper script. An example of a wrapper script that enables a job to checkout and exit with the proper exit codes can be found below.

+ +

An example submit file for an exit driven checkpointing job looks like:

+ +
# exit-driven-example.submit
+
+executable                  = exit-driven.sh
+arguments                   = argument1 argument2
+
+checkpoint_exit_code        = 85
+transfer_checkpoint_files   = my_output.txt, temp_dir, temp_file.txt
++is_resumable = true
+
+should_transfer_files       = yes
+when_to_transfer_output     = ON_EXIT
+
+output                      = example.out
+error                       = example.err
+log                         = example.log
+
+cpu                         = 1
+request_disk                = 2 GB
+request_memory              = 2 GB 
+
+queue 1
+
+ +

Example Wrapper Script for Checkpointing Job

+ +

As previously described, it may be necessary to use a wrapper script to tell your job when and how to exit as it checkpoints. An example of a wrapper script that tells a job to exit every 4 hours looks like:

+ +
#!/bin/bash
+ 
+timeout 4h do_science arg1 arg2
+ 
+timeout_exit_status=$?
+ 
+if [ $timeout_exit_status -eq 124 ]; then
+    exit 85
+fi
+ 
+exit $timeout_exit_status
+
+ +

Let’s take a moment to understand what each section of this wrapper script is doing:

+ +
#!/bin/bash
+
+timeout 4h do_science argument1 argument2
+# The `timeout` command will stop the job after 4 hours (4h). 
+# This number can be increased or decreased depending on how frequent your code/software/program 
+# is creating checkpoint files and how long it takes to create/resume from these files. 
+# Replace `do_science argument1 argument2` with the execution command and arguments for your job.
+
+timeout_exit_status=$?
+# Uses the bash notation of `$?` to call the exit value of the last executed command 
+# and to save it in a variable called `timeout_exit_status`. 
+
+
+
+if [ $timeout_exit_status -eq 124 ]; then
+    exit 85
+fi
+
+exit $timeout_exit_status
+
+# Programs typically have an exit code of `124` while they are actively running. 
+# The portion above replaces exit code `124` with code `85`. HTCondor recognizes 
+# code `85` and knows to end a job with this code once the time specified by `timeout`
+# has been reached. Upon exiting, HTCondor saves the files from jobs with exit code `85` 
+# in the temporary directory within `/spool`.  Once the files have been transferred,
+# HTCondor automatically requeues that job and fetches the files found in `/spool`. 
+# If an exit code of `124` is not observed (for example if the program is done running 
+# or has encountered an error), HTCondor will end the job and will not automaticlally requeue it.
+
+
+ +

The ideal timeout frequency for a job is every 1-5 hours with a maximum of 10 hours. For jobs that checkpoint and timeout in under an hour, it is possible that a job may spend more time with checkpointing procedures than moving forward with the analysis. After 10 hours, jobs that checkpoint and timeout are less able to take advantage of submitting jobs outside of CHTC to run on other campus resources or on the OSPool.

+ +

Checking the Progress of Checkpointing Jobs

+ +

Always test a single checkpointing job before scaling up to identify odd or unintentional behaviors in your analysis.

+ +

To determine if your job is successfully creating and saving checkpoint files, you can investigate checkpoint files once they have been transferred to /spool.

+ +

You can explore the checkpointed files in /spool by navigating to /var/lib/condor/spool. The directories in this folder are the last four digits of a job’s cluster ID with leading zeros removed. Sub folders are labeled with the process ID for each job. For example, to investigate the checkpoint files for 17870068.220, the files in /spool would be found in folder 68 in a subdirectory called 220.

+ +

It is also possible to intentionally evict a running job and have it rematch to an execute server to test if your code is successfully resuming from checkpoint files or not. To test this, use condor_vacate_job <JobID>. This command will evict your job intentionally and have it return to “Idle” state in the queue. This job will begin running once it rematches to an execute server, allowing you to test if your job is correctly resuming from checkpoint files or incorrectly starting over with the analysis.

+ +

More Information

+ +

More information on checkpointing HTCondor jobs can be found in HTCondor’s manual: https://htcondor.readthedocs.io/en/latest/users-manual/self-checkpointing-applications.html This documentation contains additional features available to checkpointing jobs, as well as additional examples such as a python checkpointing job.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/chtc-services.html b/preview-fall2024-info/uw-research-computing/chtc-services.html new file mode 100644 index 000000000..33ef988c4 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/chtc-services.html @@ -0,0 +1,413 @@ + + + + + + +CHTC Services for Research Computing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ CHTC Services for Research Computing +

+ +

This page outlines CHTC services offered to UW - Madison affiliates and general guidelines for their use. Users with existing accounts should also refer to our User Expectations pages for more specific limits and guidelines for using CHTC services. To apply for a CHTC account, fill out this form: Getting Started

+ +

Overview

+ +

CHTC’s Research Computing activities aim to empower the research and teaching mission of the University of Wisconsin - Madison by providing access to scalable computing and data capacity.

+ +

Access to standard CHTC services is free of charge.

+ +

Who can use CHTC services?

+ +

Access to CHTC services is available to:

+ +
    +
  • Current UW Madison affiliates (faculty, students, staff, post-docs)
  • +
  • Current UW System affiliates
  • +
  • Collaborators of UW Madison affiliates, where collaborator access benefits the work of the UW - Madison affiliate, e.g. recently graduated students, collaborators on multi-institution grants
  • +
+ +

Computing

+ +

CHTC operates two large-scale computing systems.

+ +

High Throughput Computing

+ +
    +
  • Roughly 15k CPU cores, 100+ GPUs
  • +
  • Additional capacity available via campus HTC systems and the national OSPool.
  • +
  • Single user: 10s - 1000s of tasks (jobs) running at once
  • +
  • Scheduled using HTCondor
  • +
+ +

High Performance Computing (SPARK)

+
    +
  • About 8k CPU cores
  • +
  • Infiniband networking for multi-node capability
  • +
  • Single user: up to 10 jobs running or 720 cores in use
  • +
  • Jobs are scheduled using SLURM job scheduling software
  • +
+ +

CHTC computing capacity is allocated via a fair-share scheduling algorithm. For groups that require additional or dedicated computing capacity, there is the option to purchase hardware. See our description of buy-in options here: CHTC Buy-In Overview

+ +

Data

+ +

CHTC provides space for data, software and other files that are being used for active computational work. Our file systems have no backup or other redundancy and should not be used as a storage solution. Researchers are assigned a default space quota when their account is created that can be increased upon request. For needs greater than 2TB or for longer-term projects, contact us to sign a data use memorandum of understanding.

+ +

Software

+ +

CHTC systems support software that runs on Linux. Whether or not licensed software +can be run on CHTC depends significantly on the type of license.

+ +

Both CHTC computing systems support containers.

+ +

Citing CHTC

+ +

In order to track our scientific impact we ask that users cite a DOI in all publications that have benefited from our services. See Citing CHTC for more details.

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/cite-chtc.html b/preview-fall2024-info/uw-research-computing/cite-chtc.html new file mode 100644 index 000000000..1e8da09f0 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/cite-chtc.html @@ -0,0 +1,406 @@ + + + + + + +Citing CHTC Resources + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Citing CHTC Resources +

+ +

In a Publication

+ +

In order to track our scientific impact we ask that users cite the following DOI in all publications +that have benefited from our services.

+ +
+

Center for High Throughput Computing. (2006). Center for High Throughput Computing. doi:10.21231/GNT1-HW21

+
+ +

For your convenience, a BibTex file containing the above reference is been provided.

+ +

(Last updated Feb, 2023)

+ +

Appropriate acknowledgement of OSG resources is described here

+ +


+ +

For a Grant Proposal

+ +

(Feel free to modify the below text, use only certain paragraphs, or contact us for more input or customizable letters of support.)

+ +

The University of Wisconsin-Madison (UW-Madison) campus is an excellent match for meeting the computational needs of this project. Existing UW-Madison technology infrastructure supported by the CHTC can be readily leveraged, including CPU capacity, network connectivity, storage availability, and middleware connectivity. The UW-Madison has invested in the CHTC as the primary provider of shared, computing resources to campus researchers. All standard CHTC services are provided free-of-charge to UW-Madison researchers, their projects, and collaborators. But perhaps most important, the UW-Madison has significant staff experience and core competency in deploying, managing, and using computational technology.

+ +

The CHTC is home to over 20 full-time staff with a proven track record of making compute middleware work for scientists. Far beyond just being familiar with the deployment and use of such software, UW staff has been intimately involved in its design and implementation. Dedicated Research Computing Facilitators are available to provide training to all CHTC users and are available to consult on computational practices for achieving the best scientific throughput. As always, CHTC will be happy to provide consulting to ensure optimal use of its facilities, and development of robust, reproducible methods for scalable computing.

+ +

The UW-Madison maintains multiple compute clusters (including the largest of these operated by CHTC) across campus that are managed using either HTCondor or SLURM with support from CHTC. These clusters are connected by HTCondor technology to share resources with each other and with other institutions around the world via OSG services. Local computing capacity directly enabled by CHTC includes:

+ +
    +
  • +

    High-Throughput Computing (HTC) resources totaling about 30,000 CPU cores in support of research. Temporary file space for large individual files can support up to hundreds of terabytes of total working data. For single computing runs needing significant memory on a single server, the CHTC maintains several multi-core servers with terabytes of memory.

    + +
      +
    • When on-campus resources are fully utilized, CHTC leverages OSG services to provision additional opportunistic resources from multiple external sites.
    • +
    +
  • +
  • +

    A High-Performance Computing (HPC) cluster consisting of roughly 7,000 tightly coupled cores. Compute nodes have 64 or 128 cores each, and 512 GB RAM, and are networked with 200 Gbps Infiniband, with access to a shared file system and resources managed via Slurm.

    +
  • +
  • +

    An origin server where users can make research data available through the Open Science Data Federation and an on-campus cache server which allows external jobs to cache files locally.

    +
  • +
+ +

In the last year, CHTC made possible the use of more than 40,000 core years of computing work for campus researchers, supporting over 300 projects across a wide range of research domains. Temporary storage space for large files can support up to hundreds of terabytes of total working data. Should these resources not be sufficient for the project, the CHTC can also engage computing resources from across the campus grid and the OS Pool, an NSF-supported and expanding alliance of more than 100 universities, national laboratories, scientific collaborations, and software developers.

+ +

The UW–Madison network currently comprises a 200Gbps backbone and WAN connectivity with 160Gbps to the Discovery building. The equipment is located on the “Research Backbone Network”, which allows for friction-free (e.g., no middlebox devices such as firewalls on the data path) to the nation’s research and education networks. Redundancy is built into the network and its supporting infrastructure. An equitable funding model assures that network resources are kept current. The UW has been fundamental to the establishment of the Broadband Optical Research Education And Science network (BOREAS). This Regional Optical Network (RON) connects to the CIC OmniPoP in Chicago, providing a high-speed gateway to various research networks, including Internet2, ESNet, CERN, and other global research networks. BOREAS, along with our participation in the Northern Tier Network Consortium, provides various options to connect at very high speeds to research partners with shared or dedicated bandwidth

+ +

(Last updated May 3, 2024)

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/conda-installation.html b/preview-fall2024-info/uw-research-computing/conda-installation.html new file mode 100644 index 000000000..1f13e4b85 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/conda-installation.html @@ -0,0 +1,1058 @@ + + + + + + +Create a Portable Python Installation with Miniconda + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Create a Portable Python Installation with Miniconda +

+ +

Quickstart: Conda

+ + + +

Build a container with Conda packages installed inside:

+ +
    +
  1. How to build your own container
  2. +
  3. Example container recipes for Conda
  4. +
  5. Use your container in your HTC jobs
  6. +
+ +

Option B

+ +

Create your own portable copy of your Conda packages:

+ +
    +
  1. Follow the instructions in our guide
  2. +
+ +
+

This approach may be sensitive to the operating system of the execution point. +We recommend building a container instead, but are keeping these instructions as a backup.

+
+ + + +

More information

+ +

The above instructions are intended for if you have package(s) that need to be installed using conda install. +Miniconda can be used to install Python and R and corresponding packages. +But if you only need to install Python or R, and do not otherwise need to use a conda install command to set up the packages, +you should see the instructions specifically for setting up Python or R because there is less chance of obscure errors when building your container.

+ +

When building or using a Miniconda container, you do not need to create or activate a conda environment. +For the build process, you skip directly to the conda install commands you want to run. +Similarly, when executing a script in a Miniconda container, the packages are loaded when the container starts.

+ +

Executable

+ +

If you are planning to execute a python .py script using your Miniconda container, you can follow the instructions in the Python guide.

+ +

If you are planning to execute a .R script using your Miniconda container, you can follow the instructions in the R guide.

+ +

Otherwise, you can use a bash .sh script as the submit file executable:

+ +
#!/bin/bash
+
+<your commands go here>
+
+ +

where the contents of the file are the commands that you want to execute using your conda environment. +You do not and should not try to activate the conda environment in the executable if you are using a container.

+ +

Specifying Exact Dependency Versions

+ +

An important part of improving reproducibility and consistency between runs +is to ensure that you use the correct/expected versions of your dependencies.

+ +

When you run a command like conda install numpy, conda tries to install +the most recent version of numpy. For example, numpy version 1.18.2 +was released on March 17, 2020. To install exactly this version of numpy, you +would run conda install numpy=1.18.2 +(the same works for pip, if you replace = with ==). We +recommend installing with an explicit version to make sure you have exactly +the version of a package that you want. This is often called +“pinning” or “locking” the version of the package.

+ +

If you want a record of what is installed in your environment, or want to +reproduce your environment on another computer, conda can create a file, usually +called environment.yml, that describes the exact versions of all of the +packages you have installed in an environment. +This file can be re-used by a different conda command to recreate that +exact environment on another computer.

+ +

To create an environment.yml file from your currently-activated environment, run

+ +
[alice@submit]$ conda env export > environment.yml
+
+ +

This environment.yml will pin the exact version of every dependency in your +environment. This can sometimes be problematic if you are moving between +platforms because a package version may not be available on some other platform, +causing an “unsatisfiable dependency” or “inconsistent environment” error. +A much less strict pinning is

+ +
[alice@submit]$ conda env export --from-history > environment.yml
+
+ +

which only lists packages that you installed manually, and does not pin their +versions unless you yourself pinned them during installation. +If you need an intermediate solution, it is also possible to manually edit +environment.yml files; see the +conda environment documentation +for more details about the format and what is possible. +In general, exact environment specifications are simply not guaranteed to be +transferable between platforms (e.g., between Windows and Linux). +We strongly recommend using the strictest possible pinning available to you.

+ +

To create an environment from an environment.yml file, run

+ +
[alice@submit]$ conda env create -f environment.yml
+
+ +

By default, the name of the environment will be whatever the name of the source +environment was; you can change the name by adding a -n <name> option to the +conda env create command.

+ +

If you use a source control system like git, we recommend checking your +environment.yml file into source control and making sure to recreate it +when you make changes to your environment. +Putting your environment under source control gives you a way to track how it +changes along with your own code.

+ +

If you are developing software on your local computer for eventual use on +the CHTC pool, your workflow might look like this:

+
    +
  1. Set up a conda environment for local development and install packages as desired +(e.g., conda create -n science; conda activate science; conda install numpy).
  2. +
  3. Once you are ready to run on the CHTC pool, create an environment.yml file +from your local environment (e.g., conda env export > environment.yml).
  4. +
  5. Move your environment.yml file from your local computer to the submit machine +and create an environment from it (e.g., conda env create -f environment.yml), +then pack it for use in your jobs, as per +Create Software Package.
  6. +
+ +

More information on conda environments can be found in +their documentation.

+ + + +

Option B: Create your own portable copy

+ +

1. Create a Miniconda installation

+ +

On the submit server, +download the latest Linux miniconda installer and run it.

+ +
[alice@submit]$ wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
+[alice@submit]$ sh Miniconda3-latest-Linux-x86_64.sh
+
+ +

Accept the license agreement and default options. At the end, you can choose whether or +not to “initialize Miniconda3 by running conda init?” +We recommend that you enter “yes”. +Once you’ve completed the installer, you’ll be prompted to restart your terminal. +Log out and log back in, and conda will be ready to use to set up your software.

+ +
+

If you choose “no” you’ll want to save the eval command shown by the installer so that you can reactivate the +Miniconda installation when needed in the future.

+
+ +

2. Create a conda “environment” with your software

+ +
+

(If you are using an environment.yml file as described +later, you should instead create +the environment from your environment.yml file. If you don’t have an +environment.yml file to work with, follow the install instructions in this +section. We recommend switching to the environment.yml method of creating +environments once you understand the “manual” method presented here.)

+
+ +

Make sure that you’ve activated the base Miniconda environment if you haven’t +already. Your prompt should look like this:

+ +
(base)[alice@submit]$ 
+
+ +

To create an environment, use the conda create command and then activate the +environment:

+ +
(base)[alice@submit]$ conda create -n env-name
+(base)[alice@submit]$ conda activate env-name
+
+ +

Then, run the conda install command to install the different packages and +software you want to include in the installation. How this should look is often +listed in the installation examples for software +(e.g. Qiime2, +Pytorch).

+ +
(env-name)[alice@submit]$ conda install pkg1 pkg2
+
+ +

Some Conda packages are only available via specific Conda channels +which serve as repositories for hosting and managing packages. If Conda is +unable to locate the requested packages using the example above, you may +need to have Conda search other channels. More detail are available at +https://docs.conda.io/projects/conda/en/latest/user-guide/concepts/channels.html.

+ +

Packages may also be installed via pip, but you should only do this +when there is no conda package available.

+ +

Once everything is installed, deactivate the environment to go back to the +Miniconda “base” environment.

+ +
(env-name)[alice@submit]$ conda deactivate
+
+ +

For example, if you wanted to create an installation with pandas and +matplotlib and call the environment py-data-sci, you would use this sequence +of commands:

+ +
(base)[alice@submit]$ conda create -n py-data-sci
+(base)[alice@submit]$ conda activate py-data-sci
+(py-data-sci)[alice@submit]$ conda install pandas matplotlib
+(py-data-sci)[alice@submit]$ conda deactivate
+(base)[alice@submit]$ 
+
+ +
+

More about Miniconda

+ +

See the official conda documentation for +more information on creating and managing environments with conda.

+
+ +

3. Create Software Package

+ +

Make sure that your job’s Miniconda environment is created, but deactivated, so +that you’re in the “base” Miniconda environment:

+ +
(base)[alice@submit]$ 
+
+ +

Then, run this command to install the conda pack tool:

+ +
(base)[alice@submit]$ conda install -c conda-forge conda-pack
+
+ +

Enter y when it asks you to install.

+ +

Finally, use conda pack to create a zipped tar.gz file of your environment +(substitute the name of your conda environment where you see env-name), +set the proper permissions for this file using chmod, and check the size of +the final tarball:

+ +
(base)[alice@submit]$ conda pack -n env-name --dest-prefix='$ENVDIR'
+(base)[alice@submit]$ chmod 644 env-name.tar.gz
+(base)[alice@submit]$ ls -sh env-name.tar.gz
+
+ +

When this step finishes, you should see a file in your current directory named +env-name.tar.gz

+ +

4. Check Size of Conda Environment Tar Archive

+ +

The tar archive, env-name.tar.gz, created in the previous step will be used as input for +subsequent job submission. As with all job input files, you should check the size of this +Conda environment file. If >100MB in size, you should NOT transfer the tar ball using +transfer_input_files. Instead, you should plan to use either CHTC’s web proxy, SQUID or +large data filesystem Staging. Please contact a research computing facilitators at +chtc@cs.wisc.edu to determine the best option for your jobs.

+ +

More information is available at File Availability with Squid Web Proxy +and Managing Large Data in HTC Jobs.

+ +

5. Create a Job Executable

+ +

The job will need to go through a few steps to use this “packed” conda environment; +first, setting the PATH, then unzipping the environment, then activating it, +and finally running whatever program you like. The script below is an example +of what is needed (customize as indicated to match your choices above).

+ +
#!/bin/bash
+
+# have job exit if any command returns with non-zero exit status (aka failure)
+set -e
+
+# replace env-name on the right hand side of this line with the name of your conda environment
+ENVNAME=env-name
+# if you need the environment directory to be named something other than the environment name, change this line
+export ENVDIR=$ENVNAME
+
+# these lines handle setting up the environment; you shouldn't have to modify them
+export PATH
+mkdir $ENVDIR
+tar -xzf $ENVNAME.tar.gz -C $ENVDIR
+. $ENVDIR/bin/activate
+
+# modify this line to run your desired Python script and any other work you need to do
+python3 hello.py
+
+ +

6. Submit Jobs

+ +

In your submit file, make sure to have the following:

+ +
    +
  • Your executable should be the the bash script you created in step 5.
  • +
  • Remember to transfer your Python script and the environment tar.gz file via + transfer_input_files. +Since the tar.gz file will almost certainly be larger than 100MB, +please email us about different tools for +delivering the installation to your jobs, +likely our SQUID web proxy.
  • +
+ + + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/condor_q.html b/preview-fall2024-info/uw-research-computing/condor_q.html new file mode 100644 index 000000000..aa3371d2f --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/condor_q.html @@ -0,0 +1,960 @@ + + + + + + +Learn About Your Jobs Using condor_q + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Learn About Your Jobs Using condor_q +

+ +

The condor_q command can be used for much more than just +checking on whether your jobs are running or not! Read on to learn how +you can use condor_q to answer many common questions about running +jobs.

+ +

Summary

+ +
    +
  • condor_q: Show my jobs that have been submitted on this server.
    +Useful options: +
      +
    • -nobatch: Starting in version HTCondor 8.6.0 installed in July +2016, data is displayed in a compact mode (one line per +cluster). With this option output will be displayed in the old +format (one line per process)
    • +
    • -all: Show all the jobs submitted on the submit server.
    • +
    • -hold: Show only jobs in the "on hold" state and the reason +for that. Held jobs are those that got an error so they could +not finish. An action from the user is expected to solve the +problem.
    • +
    • -better-analyze JobId: -better-analyze : Analyse a specific +job and show the reason why it is in its current state.
    • +
    • -run: Show your running jobs and related info, like how much +time they have been running, in which machine, etc.
    • +
    • -dag: Organize condor_q output by DAG.
    • +
    • -long JobId: Show all information related to that job.
    • +
    • -af Attr1 Attr2 ...: List specific attributes of jobs, using +autoformat.
    • +
    +
  • +
+ +

Examples and Further Explanation

+ +

+ +

1. Default condor_q output

+ +

As of July 19, 2016, the default condor_q output will show a single +user's jobs, grouped in "batches", as shown below:

+ +
[alice@submit]$ condor_q
+OWNER   BATCH_NAME        SUBMITTED   DONE   RUN    IDLE   HOLD  TOTAL JOB_IDS
+alice   CMD: sb          6/22 13:05      _     32      _      _      _ 14297940.23-99
+alice   DAG: 14306351    6/22 13:47     27    113     65      _    205 14306411.0 ...
+alice   CMD: job.sh      6/22 13:56      _      _     12      _      _ 14308195.6-58
+alice   DAG: 14361197    6/22 16:04    995      1      _      _   1000 14367836.0
+
+ +

HTCondor will automatically group jobs into "batches" for this +display. However, it's also possible for you to specify groups of jobs +as a "batch" yourself. You can either:

+ +
    +
  • +

    Add the following line to your submit file:

    + +
     batch_name = "CoolJobs" 
    +
    +
  • +
  • +

    Use the -batch-name option with condor_submit:

    + +
    [alice@submit]$ condor_submit submit_file.sub -batch-name CoolJobs
    +
    +
  • +
+ +

Either option will create a batch of jobs with the label "CoolJobs".

+ +

+ +

2. View all jobs.

+ +

To display more detailed condor_q output (where each job is listed on a +separate line), you can use the batch name or any existing grouping +constraint (ClusterId or other "-constraint" options - see +below for more on constraints) and the -nobatch flag.

+ +

Looking at a batch of jobs with the same ClusterId would look like +this:

+ +
[alice@submit]$ condor_q -nobatch 195
+
+ ID     OWNER    SUBMITTED     RUN_TIME ST PRI SIZE CMD
+195.10  alice    6/22 13:00   0+00:00:00 H  0    0.0 job.sh
+195.14  alice    6/22 13:00   0+00:01:44 R  0    0.0 job.sh
+195.16  alice    6/22 13:00   0+00:00:26 R  0    0.0 job.sh
+195.39  alice    6/22 13:00   0+00:00:05 R  0    0.0 job.sh
+195.40  alice    6/22 13:00   0+00:00:00 I  0    0.0 job.sh
+195.41  alice    6/22 13:00   0+00:00:00 I  0    0.0 job.sh
+195.53  alice    6/22 13:00   0+00:00:00 I  0    0.0 job.sh
+195.57  alice    6/22 13:00   0+00:00:00 I  0    0.0 job.sh
+195.58  alice    6/22 13:00   0+00:00:00 I  0    0.0 job.sh
+
+9 jobs; 0 completed, 0 removed, 5 idle, 3 running, 1 held, 0 suspended
+
+ +

This was the default view for condor_q from January 2016 until July +2016.

+ +

+ +

3. View jobs from all users.

+ +

By default, condor_q will just show you information about your +jobs. To get information about all jobs in the queue, type:

+ +
[alice@submit]$ condor_q -all
+
+ +

This will show a list of all job batches in the queue. To see a list of +all jobs (individually, not in batches) for all users, combine the +-all and -nobatch options with condor_q. This was the default view +for condor_q before January 2016.

+ +

+ +

4. Determine why jobs are on hold.

+ +

If your jobs have gone on hold, you can see the hold reason by running:

+ +
[alice@submit]$ condor_q -hold
+
+ +

or

+ +
[alice@submit]$ condor_q -hold JobId 
+
+ +

The first will show you the hold reasons for all of your jobs that +are on hold; the second will show you the hold reason for a specific +job. The hold reason is sometimes cut-off; try the following to see the +entire hold reason:

+ +
[alice@submit]$ condor_q -hold -af HoldReason
+
+ +

If you aren't sure what your hold reason means email +chtc@cs.wisc.edu.

+ +

+ +

5. Find out why jobs are idle

+ +

condor_q has an option to describe why a job hasn't matched and +started running. Find the JobId of a job that hasn't started running +yet and use the following command:

+ +
$ condor_q -better-analyze JobId
+
+ +

After a minute or so, this command should print out some information +about why your job isn't matching and starting. This information is not +always easy to understand, so please email us with the output of this +command if you have questions about what it means.

+ +

+ +

6. Find out where jobs are running.

+ +

To see which computers your jobs are running on, use:

+ +
[alice@submit]$ condor_q -nobatch -run
+428.0   alice        6/22  17:27   0+00:07:17 slot1_12@e313.chtc.wisc.edu
+428.1   alice        6/22  17:27   0+00:07:11 slot1_8@e376.chtc.wisc.edu
+428.2   alice        6/22  17:27   0+00:07:16 slot1_15@e451.chtc.wisc.edu
+428.3   alice        6/22  17:27   0+00:07:16 slot1_17@e277.chtc.wisc.edu
+428.5   alice        6/22  17:27   0+00:07:16 slot1_9@e351.chtc.wisc.edu
+428.7   alice        6/22  17:27   0+00:07:16 slot1_1@e373.chtc.wisc.edu
+428.8   alice        6/22  17:27   0+00:07:16 slot1_5@e264.chtc.wisc.edu
+
+ +

+ +

7. View jobs by DAG.

+ +

If you have submitted multiple DAGs to the queue, it can be hard to tell +which jobs belong to which DAG. The -dag option to condor_q will +sort your queue output by DAG:

+ +
[alice@submit]$ condor_q -nobatch -dag
+ ID      OWNER/NODENAME   SUBMITTED     RUN_TIME ST PRI SIZE CMD               
+460.0   alice        11/18 16:51   0+00:00:17 R  0   0.3  condor_dagman -p 0
+462.0    |-0           11/18 16:51   0+00:00:00 I  0   0.0  print.sh
+463.0    |-1           11/18 16:51   0+00:00:00 I  0   0.0  print.sh
+464.0    |-2           11/18 16:51   0+00:00:00 I  0   0.0  print.sh
+461.0   alice        11/18 16:51   0+00:00:09 R  0   0.3  condor_dagman -p 0
+465.0    |-0           11/18 16:51   0+00:00:00 I  0   0.0  print.sh
+466.0    |-1           11/18 16:51   0+00:00:00 I  0   0.0  print.sh
+467.0    |-2           11/18 16:51   0+00:00:00 I  0   0.0  print.sh
+
+8 jobs; 0 completed, 0 removed, 6 idle, 2 running, 0 held, 0 suspended
+
+ +

+ +

8. View all details about a job.

+ +

Each job you submit has a series of attributes that are tracked by +HTCondor. You can see the full set of attributes for a single job by +using the "long" option for condor_q like so:

+ +
[alice@submit]$ condor_q -l JobId 
+...
+Iwd = "/home/alice/analysis/39909"
+JobPrio = 0
+RequestCpus = 1
+JobStatus = 1
+ClusterId = 19997268
+JobUniverse = 5
+RequestDisk = 10485760
+RequestMemory = 4096
+DAGManJobId = 19448402
+...
+
+ +

Attributes that are often useful for checking on jobs are:

+ +
    +
  • Iwd: the job's submission directory on the submit node
  • +
  • UserLog: the log file for a job
  • +
  • RequestMemory, RequestDisk: how much memory and disk you've +requested per job
  • +
  • MemoryUsage: how much memory the job has used so far
  • +
  • JobStatus: numerical code indicating whether a job is idle, +running, or held
  • +
  • HoldReason: why a job is on hold
  • +
  • DAGManJobId: for jobs managed by a DAG, this is the JobId of the +parent DAG
  • +
+ +

+ +

9. View specific details about a job using auto-format

+ +

If you would like to see specific attributes (see above) for a job or +group of jobs, you can use the "auto-format" (-af) option to +condor_q which will print out only the attributes you name for a +single job or group of jobs.

+ +

For example, if I would like to see the amount of memory and disk I've +requested for all of my jobs, and how much memory is currently behing +used, I can run:

+ +
[alice@submit]$ condor_q -af RequestMemory RequestDisk MemoryUsage
+1 325 undefined
+1 325 undefined
+2000 1000 245
+2000 1000 220
+2000 1000 245
+
+ +

+ +

10. Constraining the output of condor_q.

+ +

If you would like to find jobs that meet certain conditions, you can use +condor_q's "constraint" option. For example, suppose you want to +find all of the jobs associated with the DAGMan Job ID "234567". You +can search using:

+ +
[alice@submit]$ condor_q -constraint "DAGManJobId == 234567" 
+
+ +

To use a name (for example, a batch name) as a constraint, you'll need +to use multiple sets of quotation marks:

+ +
[alice@submit]$ condor_q -constraint 'JobBatchName == "MyJobs"'
+
+ +

One common use of constraints is to find all jobs that are running, +held, or idle. To do this, use a constraint with the JobStatus +attribute and the appropriate status number - the status codes can be +found in Appendix +A +of the HTCondor Manual.

+ +

Remember condor_q -hold from before? In the background, the +-hold option is constraining the list of jobs to jobs that are on hold +(using the JobStatus attribute) and then printing out the HoldReason +attribute. Try running:

+ +
[alice@submit]$ condor_q -constraint "JobStatus == 5" -af ClusterId ProcId HoldReason
+
+ +

You should see something very similar to running condor_q -hold!

+ +

+ +

11. Remove a held job from the queue

+ +

To remove a job held in the queue, run:

+ +
[alice@submit]$ condor_rm <JobID>
+
+ +

This will remove the job in the queue. Once you have made changes to allow the job to run successfully, the job can be resubmitted using condor_submit.

+ +
+ +

This page takes some of its content and formatting from this HTCondor +reference +page.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/configure-ssh.html b/preview-fall2024-info/uw-research-computing/configure-ssh.html new file mode 100644 index 000000000..be97d4e67 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/configure-ssh.html @@ -0,0 +1,611 @@ + + + + + + +Automate CHTC Log In + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Automate CHTC Log In +

+ +

This guide describes

+
    +
  • how to authenticate with Duo when logging into CHTC’s HTC and HPC systems
  • +
  • how to set your login (SSH) configuration to “reuse” a two-factor authenticated +connection over a certain period of time.
  • +
  • terminals and applications that are known to support persistent connections
  • +
+ +

Authentication with Duo

+ +

As of December 2022, accessing CHTC resources +now requires two-factor authentication. The first “factor” uses your NetID password +(or SSH keys) and the second “factor” is authentication using Duo, via either a +Duo fob or the Duo app.

+ +

See the following video for an demonstration of two-factor authentication with Duo +when logging into CHTC:

+ + + +

Re-Using SSH Connections

+ +

To reduce the number of times it is necessary to enter your credentials, it’s possible +to customize your SSH configuration in a way that allows you to “reuse” a connection +for logging in again or moving files. This configuration is optional, and +most useful if you will connect to +the same server multiple times in a short window, for example, when uploading or +downloading files.

+ +

WARNING: This guide describes how to configure your local machine to not require +reentering your NetID password or Duo authentication each time you login. +This should ONLY be used on secure devices that you manage - it should +not be used on any shared laptop, desktop, or research group resource. Users +found violating this policy risk having their CHTC account permanently deactivated.

+ +

The instructions below are meant for users who can use a terminal (Mac, Linux, newer Windows operating systems):

+ +
    +
  1. +

    Open a terminal window.

    +
  2. +
  3. Create (or edit) your personal SSH configuration file at ~/.ssh/config to use +what’s called “ControlMaster” +This is the text that should be added to a file called config in the .ssh directory in your computer’s home directory: +
     Host *.chtc.wisc.edu
    +   # Turn ControlMaster on
    +   ControlMaster auto
    +   # ControlMaster connection will persist
    +   # for 2 hours of idleness, after which
    +   # it will disconnect
    +   ControlPersist 2h
    +   # Where to store files that represent
    +   # the ControlMaster persistent connections
    +   ControlPath ~/.ssh/connections/%r@%h:%p
    +
    +

    If you’re not able to find or create the config file, executing the code below from a terminal on your computer + will add the right information to the config file

    +
     # Let's create (or add to) our SSH client configuration file. 
    + echo "
    + Host *.chtc.wisc.edu
    +   # Turn ControlMaster on
    +   ControlMaster auto
    +   # ControlMaster connection will persist
    +   # for 2 hours of idleness, after which
    +   # it will disconnect
    +   ControlPersist 2h
    +   # Where to store files that represent
    +   # the ControlMaster persistent connections
    +   ControlPath ~/.ssh/connections/%r@%h:%p" >> ~/.ssh/config
    +
    +
  4. +
  5. You also create a directory that will be used to track connections. In +the same .ssh directory, make a folder called connections by typing: +
     $ mkdir -p ~/.ssh/connections
    +
    + +

    Once you login to a CHTC server, this is where the system will store information + about your previous connection information so that you do not have to reenter your + password or Duo authenticate.

    +
  6. +
  7. Now, log into your CHTC submit server or login node as normal. The first time you log in, you will need to use +two-factor authentication, but subsequent logins to that machine will not require +authentication as long as they occur within the time value used in +the ControlPersist configuration option (so in this example, 2 hours).
  8. +
+ +

For Windows users who use PuTTY to log in, you need to go to +the Connection -> SSH section in the “Category” menu on the left side, +and then check the “Share SSH Connection if possible” box. If you don’t +see this option, try downloading a newer version of PuTTY.

+ +

Ending “Stuck” Connections

+ +

Sometimes a connection goes stale and you can’t reconnect using it, even if +it is within the timeout window. In this case, you can avoid using the existing +connection by removing the relevant file in ~/.ssh/connections; This will probably +look something like:

+ +
$ ls ~/.ssh/connections/
+alice@submit.chtc.wisc.edu:22
+$ rm ~/.ssh/connections/alice@submit.chtc.wisc.edu:22
+
+ +

Connection settings

+ +

Note that all port forwarding, including X display forwarding, must be setup by +the initial connection and cannot be changed. If you forget to use -Y on the initial +connection, you will not be able to open X programs on subsequent connections.

+ +

File Transfer Tools

+ +

There are a variety of tools that people use for transferring and editing files +like WinSCP and MobaXTerm. Some of these tools are able to use ssh configuration +or have options that do not require Duo 2FA every time a file is uploaded or +downloaded or edited, but some do not.

+ +

Known to support persistent connections

+ +
    +
  • +

    Linux, Mac, and Windows Subsystem for Linux (WSL) terminals

    +
  • +
  • +

    WinSCP

    + +

    May need to adjust preferences. Within WinSCP:

    + +
      +
    1. Go to Options, then Preferences, and click on Background under the Transfer section.
    2. +
    3. Set ‘Maximal number of transfers at the same time:’ to 1.
    4. +
    5. Make sure ‘Use multiple connections for single transfer’ checkbox is checked.
    6. +
    7. Click ‘OK’ to save.
    8. +
    +
  • +
  • +

    Cyberduck (taken from these docs

    + +

    Cyberduck does not use SSH configurations, therefore the following setting + can be used to enable connection persistence. Within Cyberduck:

    + +
      +
    1. Select Preferences, then the Transfers button, and then the General section.
    2. +
    3. Under “Transfers”, use the “Transfer Files” drop-down to select “Use browser +connection”.
    4. +
    +
  • +
+ +

Known to NOT support ControlMaster or similar persistent connections

+ +
    +
  • +

    Windows PowerShell

    +
  • +
  • +

    File transfer tools from Panic, like Transmit and Nova

    +
  • +
+ +

Other Tools

+ +

For those on spotty wireless or those who move a lot with their connection +(and on *nix) then the open source shell Mosh (https://mosh.org/) has capabilities +to keep sessions open as you change connections. Note that Mosh doesn’t support the +following SSH features:

+
    +
  • ControlMaster (connection multiplexing)
  • +
  • X11 forwarding
  • +
  • Port forwarding
  • +
+ + +
+
+ + + + +
+ + + + + + +
General Guides
+
+ + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/connecting.html b/preview-fall2024-info/uw-research-computing/connecting.html new file mode 100644 index 000000000..2475e0c3b --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/connecting.html @@ -0,0 +1,633 @@ + + + + + + +Log In to CHTC Resources + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Log In to CHTC Resources +

+ +

This guide assumes +that you have already gotten a CHTC account for either our high +throughput or high performance compute systems. If you haven't, see our +getting started page.

+ + + +

+ +

1. Accessing the Submit Servers

+ +

You will need the following information to log into our CHTC submit +servers or head nodes:

+ +

Username and Password

+ +
    +
  • UW - Madison NetId and password
  • +
+ +

Hostname

+ + + + + + + + + + + + + + + +
HTC System
ap2001.chtc.wisc.edu (formerly submit1.chtc.wisc.edu) - typically for accounts created before June 2019, between March 2021 - June 2022, or after Feb 1, 2023
ap2002.chtc.wisc.edu (formerly submit2.chtc.wisc.edu) - typically for accounts created between June 2019 - February 2021 or between July 1, 2022 - Jan 31, 2023
+ + + + + + + + + + + + + + + +
HPC Cluster
spark-login.chtc.wisc.edu
hpclogin3.chtc.wisc.edu - access the old HPC cluster, slated to be decommissioned
+ +

As of December 2022, we also require two-factor authentication with Duo to +access CHTC resources.

+ +
+

Are you off-campus?
+All of our CHTC submit servers and head nodes are firewalled to block +log-ins from off-campus. If you are off-campus and want to log in, you +can either:

+ +
    +
  • Activate the campus Virtual Private Network (VPN) (more details on how to set this up +DoIT’s VPN webpage). This will allow you join the campus network when working off-campus.
  • +
  • Log into another computer that is on campus (typically by SSH-ing into that computer) and then SSH to our submit server.
  • +
+ +

In either case, it will appear like you are on-campus, and you should +then be able to log into CHTC as usual.

+
+ +

+ +

2. Logging In

+ +

Using the information described above, you can log in to servers two +different ways -- from the command line or using an SSH program:

+ +

+ +

A. On the command line

+ +

On Mac, Linux, and modern Windows (10+) systems, you can use the "Terminal" application to +log in. Open a terminal window and use the following command to connect +to the appropriate server:

+ +
$ ssh username@hostname
+
+ +

You will be prompted for your password, and then for Duo +authentication.

+ +

+ +

B. Using an SSH program (Windows)

+ +

There are multiple programs to connect to remote servers for Windows. We +recommend "PuTTy", which can be downloaded +here. +To log in, click on the PuTTy executable (putty.exe). You should see a +screen like this:

+ +

+ +

Fill in the hostname as described in part 1. You should use Port 22 and +connect using "ssh" -- these are usually the defaults. After you +click "connect" you will be prompted to fill in your username and +password, and then to authenticate with Duo.

+ +

Note that once you have submitted jobs to the queue, you can leave your +logged in session (by typing exit). Your jobs will run and return +output without you needing to be connected.

+ + + +

C. Re-Using SSH Connections

+ +

To reduce the number of times it is necessary to enter your credentials, it’s +possible to customize your SSH configuration in a way that allows you to “reuse” +a connection for logging in again or moving files. More details are shown +in this guide: Automating CHTC Log In

+ +

+ +

3. Learning About the Command Line

+ +

Why learn about the command line? If you haven't used the command +line before, it might seem like a big challenge to get started, and +easier to use other tools, especially if you have a Windows computer. +However, we strongly recommend learning more about the command line for +multiple reasons:

+ +
    +
  • You can do most of what you need to do in CHTC by learning a few +basic commands.
  • +
  • With a little practice, typing on the command line is significantly +faster and much more powerful than using a point-and-click graphic +interface.
  • +
  • Command line skills are useful for more than just large-scale +computing.
  • +
+ +

For a good overview of command line tools, see the Software Carpentry +Unix Shell lesson. In +particular, we recommend the sections on:

+ + + + +
+
+ + + + +
+ + + + + + +
General Guides
+
+ + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/dagman-workflows.html b/preview-fall2024-info/uw-research-computing/dagman-workflows.html new file mode 100644 index 000000000..5f783d9c3 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/dagman-workflows.html @@ -0,0 +1,678 @@ + + + + + + +Workflows with HTCondor's DAGMan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Workflows with HTCondor's DAGMan +

+ +

Overview

+ +

If your work requires jobs that run in a particular sequence, you may benefit +from a workflow tool that submits and monitors jobs for you in the correct +order. A simple workflow manager that integrates with HTCondor is DAGMan, +or “DAG Manager” where DAG stands for the typical picture of a workflow, a +directed acyclic graph.

+ +

Learning Resources

+ +

This talk (originally presented at HTCondor Week 2020) gives a good overview of +when to use DAGMan and its most useful features:

+ +

+DAGMan Talk +

+ +

For full details on various DAGMan features, see the HTCondor manual page:

+ + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/dask.html b/preview-fall2024-info/uw-research-computing/dask.html new file mode 100644 index 000000000..b81d4c519 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/dask.html @@ -0,0 +1,443 @@ + + + + + + +Using Dask at CHTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Using Dask at CHTC +

+ +

Dask +is a Python library for parallel computing. +Though it is not the +traditional HTCondor workflow, it is possible to use +Dask on the CHTC pool through a special adapter package provided by CHTC. +This guide describes the situations in which you should consider using +Dask instead of the traditional workflow, and will point you toward the +documentation for the adapter package (which will guide you through +actually using it).

+ +
+

This is a new how-to guide on the CHTC website. Recommendations and +feedback are welcome via email (chtc@cs.wisc.edu) or by creating an +issue on the CHTC website GitHub repository: Create an issue

+
+ +

What is Dask?

+ +

Dask +is a Python library that can “scale up” Python code in two ways:

+
    +
  • “Low-level” parallelism, through transparently-parallel calculations on familiar interfaces like numpy arrays.
  • +
  • “High-level” parallelism, through an explicit run-functions-in-parallel interface.
  • +
+ +

Both kinds of parallelism can be useful, depending on your work. +For example, Dask could be used to perform data analysis on a single multi-TB +dataframe stored in distributed memory, as if it was all stored locally. +It could also be used to run thousands of independent simulations across +a cluster, aggregating their results locally as they finish. +Dask can also smoothly handle cases between these extremes (perhaps each of your +independent simulations also needs a large amount of memory?).

+ +

Dask also “scales down”: it runs the same way on your laptop as it does on +a cluster thereby providing a smooth transition between running on +local resources and running on something like the CHTC pool.

+ +

When should I use Dask at CHTC?

+ +

Several use cased are described below for considering the use of Dask for parallelism +in CHTC instead of the traditional HTCondor workflow +of creating jobs and DAGs:

+ +
    +
  • You are already using Dask for parallelism and want to smoothly scale +up your computing resources. Note that many foundational libraries in the +scientific Python ecosystem, like xarray, +now use Dask internally.
  • +
  • You are already using something like +multiprocessing or +joblib +for high-level parallelism. +Dask’s high-level parallelism interface is fairly similar to these libraries, +and switching from them to Dask should not involve too much work.
  • +
  • You can make your overall workflow more efficient by adjusting it based +on intermediate results. +For example, +adaptive hyperparameter optimization +can be significantly more efficient than something like a random grid search, +but requires a “controller” to guide the process at every step.
  • +
  • You want to operate on single arrays or dataframes that are larger +than can be stored in the memory of a single average CHTC worker +(more than a few GB). Dask can store this kind of data in “chunks” on workers +and seamlessly perform calculations on the chunks in parallel.
  • +
  • You want your workflow to “scale down” to local resources. Being able to run +your workflow locally may make developing and testing it easier.
  • +
  • You want a more interactive way of using the CHTC pool. +The adapter package provides tools for running Jupyter Notebooks on the +CHTC pool, connected to your Dask cluster. +This can be useful for debugging or inspecting the progress of your workflows.
  • +
+ +

You may also be interested in Dask’s own +“Why Dask?” page.

+ +

If you are unsure whether you should use Dask or the traditional workflow, +please get in touch with a research computing facilitator by emailing +chtc@cs.wisc.edu to set up a consultation.

+ +

How do I use Dask at CHTC?

+ +

Dask integration with the CHTC pool is provided by the +Dask-CHTC package. +See that package’s documentation +for details on how to get started.

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/docker-build.html b/preview-fall2024-info/uw-research-computing/docker-build.html new file mode 100644 index 000000000..75cbda563 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/docker-build.html @@ -0,0 +1,1011 @@ + + + + + + +Build a Docker Container Image + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Build a Docker Container Image +

+ +

Linux containers are a way to build a self-contained environment that +includes software, libraries, and other tools. CHTC currently supports +running jobs inside Docker +containers. This guide describes how to build a Docker image +that you can use for running jobs in CHTC. For information on using +this image for jobs, see our Docker Jobs guide.

+ +

Overview

+ +

Note that all the steps below should be run on your own computer, not +in CHTC.

+ +

Docker images can be created using a special file format +called a “Dockerfile”. This file has commands that allow you to:

+ +
    +
  • use a pre-existing Docker image as a base
  • +
  • add files to the image
  • +
  • run installation commands
  • +
  • set environment variables
  • +
+ +

You can then “build” an image from this +file, test it locally, and push it to DockerHub, where +HTCondor can then use the image to build containers to run jobs in. +Different versions of the image can be labeled with different version +“tags”.

+ +

This guide has:

+ +
    +
  1. Step by Step Instructions
  2. +
  3. Examples
  4. +
+ +

A. Step by Step Instructions

+ +

1. Set Up Docker on Your Computer

+ +

If you haven’t already, create a DockerHub account and install +Docker on your computer. You’ll want to look for the Docker Community +Edition +for your operating system. It sometimes takes some time for Docker to +start, especially the first time. Once Docker starts, it won’t open a +window; you’ll just see a little whale and container icon in one of your +computers toolbars. In order to actually use Docker, you’ll need to +open a command line program (like Terminal, or Command Prompt) and run +commands there.

+ +

2. Explore Docker Containers (optional)

+ +

If you have never used Docker before, we recommend exploring a pre-existing container +and testing out installation steps interactively before creating a Dockerfile. See the +first half of this guide: Exploring and Testing a Docker Container

+ +

3. Create a Dockerfile

+ +

A Dockerfile is a plain text file with keywords that add elements to a +Docker image. There are many keywords that can be used in a Dockerfile (documented on +the Docker website here: Dockerfile +keywords), but we will use a +subset of these keywords following this basic outline:

+ +
    +
  • Starting point: Which Docker image do you want to start with?
  • +
  • Additions: What needs to be added? Folders? Data? Other software?
  • +
  • Environment: What variables (if any) are set as part of the software installation?
  • +
+ +

Create the file

+ +

Create a blank text file named Dockerfile. If you are planning on making +multiple images for different parts of your workflow, +you should create a separate folder for each +new image with the a Dockerfile inside each of them.

+ +

Choose a base image with FROM

+ +

Usually you don’t want to start building your image from scratch. +Instead you’ll want to choose a “base” image to add things to.

+ +

You can find a base image by searching DockerHub. If you’re +using a scripting language like Python, R or perl, you could start with +the “official” image from these languages. If you’re not sure what to +start with, using a basic Linux image (Debian, Ubuntu and CentOS are common +examples) is often a good place to start.

+ +

Images often have tagged versions. Besides choosing the image +you want, make sure to choose a version by clicking on the “Tags” tab of +the image.

+ +

Once you’ve decided on a base image and version, add it as the first +line of your Dockerfile, like this:

+ +
FROM repository/image:tag
+
+ +

Some images are maintained by DockerHub itself +(these are the “official” images mentioned above), +and do not have a repository. +For example, to start with Centos 7, +you could use

+ +
FROM centos:7
+
+ +

while starting from one of +HTCondor’s HTC Jupyter notebook images +might look like

+ +
FROM htcondor/htc-minimal-notebook:2019-12-02
+
+ +

When possible, you should use a specific tag +(not the automatic latest tag) +in FROM statements.

+ +

Here are some base images you might find useful to build off of:

+ + + +

Install packaged software with RUN

+ +

The next step is the most challenging. We need to add commands to the +Dockerfile to install the desired software. There are a few standard ways to +do this:

+ +
    +
  • Use a Linux package manager. This is usually apt-get for Debian-based +containers (e.g, Ubuntu) or yum for RedHat Linux containers (e.g., CentOS).
  • +
  • Use a software-specific package manager (like pip or conda for Python).
  • +
  • Use installation instructions (usually a progression of configure, +make, make install).
  • +
+ +

Each of these options will be prefixed by the RUN keyword. You can +join together linked commands with the && symbol; to break lines, put +a backslash \ at the end of the line. RUN can execute any command inside the +image during construction, but keep in mind that the only thing kept in the final +image is changes to the filesystem (new and modified files, directories, etc.).

+ +

For example, suppose that your job’s executable ends up running Python and +needs access to the packages numpy and scipy, as well as the Unix tool wget. +Below is an example of a Dockerfile that uses RUN to install these packages +using the system package manager and Python’s built-in package manager.

+ +
# Build the image based on the official Python version 3.8 image
+FROM python:3.8
+
+# Our base image happens to be Debian-based, so it uses apt-get as its system package manager
+# Use apt-get to install wget 
+RUN apt-get update \
+ && apt-get install wget
+
+# Use RUN to install Python packages (numpy and scipy) via pip, Python's package manager
+RUN pip3 install numpy scipy
+
+ +

If you need to copy specific files (like source code) from your computer into the +image, place the files in the same folder as the +Dockerfile and use the COPY keyword. You could also download files +within the image by using the RUN keyword and commands like wget +or git clone.

+ +

For example, suppose that you need to use +JAGS +and the +rjags package for R. +If you have the +JAGS source code +downloaded next to the Dockerfile, you could compile and +install it inside the image like so:

+ +
FROM rocker/r-ver:3.4.0
+
+# COPY the JAGS source code into the image under /tmp
+COPY JAGS-4.3.0.tar.gz /tmp
+
+# RUN a series of commands to unpack the JAGS source, compile it, and install it
+RUN cd /tmp \
+ && tar -xzf JAGS-4.3.0.tar.gz \
+ && cd JAGS-4.3.0 \
+ && ./configure \
+ && make \
+ && make install
+
+# install the R package rjags
+RUN install2.r --error rjags
+
+ +

Set up the environment with ENV

+ +

Your software might rely on certain environment variables being set correctly.

+ +

One common situation is that if you’re installing a program to a custom location +(like a home directory), you may need to add that directory to the image’s system +PATH. For example, if you installed some scripts to /home/software/bin, you +could use

+ +
ENV PATH="/home/software/bin:${PATH}"
+
+ +

to add them to your PATH.

+ +

You can set multiple environment variables at once:

+ +
ENV DEBIAN_FRONTEND=noninteractive \
+    LC_ALL=en_US.UTF-8 \
+    LANG=en_US.UTF-8 \
+    LANGUAGE=en_US.UTF-8
+
+ +

4. Build, Name, and Tag the Image

+ +

So far we haven’t actually created the image – we’ve just been +listing instructions for how to build the image in the Dockerfile. +Now we are ready to build the image!

+ +

First, decide on a name for the image, as well as a tag. Tags are +important for tracking which version of the image you’ve created (and +are using). A simple tag scheme would be to use numbers (e.g. v0, v1, +etc.), but you can use any system that makes sense to you.

+ +

Because HTCondor caches Docker images by tag, we strongly recommend that you +never use the latest tag, and always build images with a new, unique tag that +you then explicitly specify in new jobs.

+ +

To build and tag your image, open a Terminal (Mac/Linux) or Command +Prompt (Windows) and navigate to the folder that contains your +Dockerfile:

+ +
$ cd directory
+
+ +

(Replace directory with the path to the appropriate folder.)

+ +

Then make sure Docker is running (there should be an icon on +your status bar, and running docker info shouldn’t indicate any errors) and run:

+ +
$ docker build -t username/imagename:tag .
+
+ +

Replace username with your Docker Hub username and replace +imagename and tag with the values of your choice. Note the . at the end +of the command (to indicate “the current directory”).

+ +

If you get errors, try to determine what you may need to add or change +to your Dockerfile and then run the build command again. Debugging a Docker +build is largely the same as debugging any software installation process.

+ +

5. Test Locally

+ +

This page describes how to interact with your new Docker image on your +own computer, before trying to run a job with it in CHTC:

+ + + +

6. Push to DockerHub

+ +

Once your image has been successfully built and tested, you +can push it to DockerHub so that it will be available to run jobs in +CHTC. To do this, run the following command:

+ +
$ docker push username/imagename:tag
+
+ +

(Where you once again replace username/imagename:tag with what you used in +previous steps.)

+ +

The first time you push an image to DockerHub, you may need to run this +command beforehand:

+ +
$ docker login
+
+ +

It should ask for your DockerHub username and password.

+ +
+

Reproducibility

+ +

If you have a free account on Docker Hub, any container image that you +have pushed there will be scheduled for removal if it is not used (pulled) at least once +every 6 months (See the Docker Terms of Service).

+ +

For this reason, and just because it’s a good idea in general, we recommend +creating a file archive of your container image and placing it in whatever space +you use for long-term, backed-up storage of research data and code.

+ +

To create a file archive of a container image, use this command, +changing the name of the archive file and container to reflect the +names you want to use:

+
docker save --output archive-name.tar username/imagename:tag
+
+ +

It’s also a good idea to archive a copy of the Dockerfile used to generate a +container image along with the file archive of the container image itself.

+
+ +

7. Running Jobs

+ +

Once your Docker image is on Docker Hub, you can use it to run +jobs on CHTC’s HTC system. See this guide for more details:

+ + + +

B. Examples

+ +

This section holds various example Dockerfile that cover more advanced use cases.

+ +

Installing a Custom Python Package from GitHub

+ +

Suppose you have a custom Python package hosted on GitHub, but not available +on PyPI. +Since pip can install packages directly from git repositories, you could +install your package like this:

+ +
FROM python:3.8
+
+RUN pip3 install git+https://github.com/<RepositoryOwner>/<RepositoryName>
+
+

where you would replace <RepositoryOwner> and <RepositoryName> with your +desired targets.

+ +

QIIME

+ +

This Dockerfile installs QIIME2 based on +these instructions. +It assumes that the Linux 64-bit miniconda +installer has been downloaded into the +directory with the Dockerfile.

+ +
FROM python:3.6-stretch
+
+COPY Miniconda3-latest-Linux-x86_64.sh /tmp
+
+RUN mkdir /home/qiimeuser
+ENV HOME=/home/qiimeuser
+
+RUN cd /tmp \
+ && ./Miniconda3-latest-Linux-x86_64.sh -b -p /home/qiimeuser/minconda3 \
+ && export PATH=/home/qiimeuser/minconda3/bin:$PATH \
+ && conda update conda \
+ && conda create -n qiime2-2017.10 --file https://data.qiime2.org/distro/core/qiime2-2017.10-conda-linux-64.txt
+
+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/docker-jobs.html b/preview-fall2024-info/uw-research-computing/docker-jobs.html new file mode 100644 index 000000000..1d2fc78ee --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/docker-jobs.html @@ -0,0 +1,774 @@ + + + + + + +Running HTC Jobs Using Docker Containers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Running HTC Jobs Using Docker Containers +

+ +

Linux containers are a way to build a self-contained environment that +includes software, libraries, and other tools. This guide shows how to +submit jobs that use Docker containers.

+ +

Overview

+ +

Typically, software in CHTC jobs is installed or compiled locally by +individual users and then brought along to each job, either using the +default file transfer or our SQUID web server. However, another option +is to use a container system, where the software is installed in a +container image. Using a container to handle software can be +advantageous if the software installation 1) has many dependencies, 2) +requires installation to a specific location, or 3) “hard-codes” paths +into the installation.

+ +

CHTC has capabilities to access and start containers and +run jobs inside them. This guide shows how to do this for +Docker containers.

+ +

1. Use a Docker Container in a Job

+ +

Jobs that run inside a Docker container will be almost exactly the same +as “vanilla” HTCondor jobs. The main change is indicating which Docker +container to use and an optional “container universe” option:

+ +
# HTC Submit File
+
+# Provide HTCondor with the name of the Docker container
+container_image = docker://user/repo:tag
+universe = container
+
+executable = myExecutable.sh
+transfer_input_files = other_job_files
+
+log = job.log
+error = job.err
+output = job.out
+
+request_cpus = 1
+request_memory = 4GB
+request_disk = 2GB
+
+queue
+
+ +

In the above, change the address of the Docker container image as +needed based on the container you are using. More information on finding +and making container is below.

+ +

Integration with HTCondor

+ +

When your job starts, HTCondor will pull the indicated image from +DockerHub, and use it to run your job. You do not need to run any +Docker commands yourself.

+ +

Other pieces of the job (your executable and input files) should be just +like a non-Docker job submission.

+ +

The only additional change may be that your +executable no longer needs to install or unpack your software, since it +will already be present in the Docker container.

+ +

2. Choose or Create a Docker Container Image

+ +

To run a Docker job, you will first need access to a Docker container +image that has been built and placed onto the +DockerHub website. There are two primary ways +to do this.

+ +

A. Pre-existing Images

+ +

The easiest way to get a Docker container image for running a job is to +use a public or pre-existing image on DockerHub. You can find images by +getting an account on DockerHub and searching for the software you want +to use.

+ +

Sample images:

+ + + +

An image supported by a group will be continuously updated and the +versions will be indicated by “tags”. We recommend choosing a specific +tag (or tags) of the container to use in CHTC.

+ +

B. Build Your Own Image

+ +

You can also build your own Docker container image and upload it to +DockerHub. See this link to our guide on building containers or the Docker +documentation for more +information.

+ +

Similarly, we recommend using container tags. Importantly, whenever you make a significant change +to your container, you will want to use a new tag name to ensure that your jobs are getting an +updated version of the container, and not an ‘old’ version that has been cached by DockerHub +or CHTC.

+ +

3. Testing

+ +

If you want to test your jobs, you have two options:

+ +
    +
  • We have a guide on exploring and testing Docker containers on your own computer here: + +
  • +
  • You can test a container interactively in CHTC by using a normal Docker job submit file and using the +interactive flag with condor_submit: +
      [alice@submit]$ condor_submit -i docker.sub
    +
    +

    This should start a session inside the indicated Docker container and connect you to it using ssh. Type exit to end the interactive job. Note: Files generated during your interactive job with Docker will not be transfered back to the submit node. If you have a directory on staging, you can transfer the files there instead; if you have questions about this, please contact a facilitator.

    +
  • +
+ + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/docker-test.html b/preview-fall2024-info/uw-research-computing/docker-test.html new file mode 100644 index 000000000..72a37d975 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/docker-test.html @@ -0,0 +1,926 @@ + + + + + + +Explore and Test Docker Containers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Explore and Test Docker Containers +

+ +

Linux containers are a way to build a self-contained environment that +includes software, libraries, and other tools. This guide shows how to +explore and test a Docker container on your own computer.

+ +

A. Overview

+ +

Note that all the steps below should be run on your own computer, not +in CHTC.

+ +

This guide has two sections:

+ + + +

If you’ve never used Docker before, and/or are getting ready to build your own +container image, we recommend starting with the first part of the +guide.

+ +

If you’ve explored Docker already or built your own image and you want to test if it +will work successfully in CHTC’s HTC system, +you can follow the directions in the second section.

+ +

A. Set Up Docker on Your Computer

+ +

If you haven’t already, create a DockerHub account and install +Docker on your computer. You’ll want to look for the Docker Community +Edition +for your operating system. It sometimes takes some time for Docker to +start, especially the first time. Once Docker starts, it won’t open a +window; you’ll just see a little whale and container icon in one of your +computers toolbars. In order to actually use Docker, you’ll need to +open a command line program (like Terminal, or Command Prompt) and run +commands there.

+ +

B. Explore Docker Containers

+ +

1. Get a Docker Container Image

+ +

We need to have a local copy of the Docker container image in order to +test it. You can see what container images you already have on your +computer by running:

+ +
$ docker image ls
+
+ +

If you just installed Docker on your computer +and are using it for the first time, this list is probably empty. +If you want to use a pre-made container from Docker Hub, +you will need to “pull” it down to your computer. +If you created a container on your computer, it should already +be in the list of container images.

+ +

If using a container from Docker Hub, find the container and its name, which +will be of the format: username/imagename:tag. Then pull a copy of the container +image to your computer by running the following from either a Terminal +(Mac/Linux) or Command Prompt (Windows):

+ +
$ docker pull username/image:tag
+
+ +

If you run docker image ls again, you should see the container you downloaded +listed.

+ +

2. Explore the Container Interactively

+ +

To actually explore a container, run this command:

+ +
$ docker run -it --rm=true username/image:tag /bin/bash
+
+ +

This will start a running copy of the container and start a command line shell +inside. You should see your command line prompt change to something like:

+ +
root@2191c1169757:/#
+
+ +
+

What Do All the Options Mean?

+ +
    +
  • -it: interactive flag
  • +
  • --rm=true: after we exit, this will clean up the runnining container so Docker uses less disk space.
  • +
  • username/image:tag: which container to start
  • +
  • /bin/bash: tells Docker that when the container starts, we want a command line (bash) inside to run commands
  • +
+
+ +

If you explore the container using cd and ls, you’ll see that this is a whole, +self-contained file system, separate from your computer. Try running commands with their + --help or --version options to see what’s installed. If you’re planning to create + your own container, try following a few of the installation instructions for the software + you want to use and see what happens.

+ +

3. Exit the Container

+ +

Once you’re done exploring, type exit to leave the container.

+ +
root@2191c1169757:/# exit
+
+ +

Note that any changes or +commands you ran in the container won’t be saved! Once you exit the +running container is shut down and removed (although the container image will still be +on your computer, which you can see if you type docker image ls again).

+ +

C. Simulate a CHTC Docker Job

+ +

The directions above were about simply exploring a container. If you want to +simulate what happens in a CHTC job more specifically, we’ll want to do a few things:

+ +
    +
  • create a test working directory, with needed files
  • +
  • have a list of commands to run or a script you want to use as the executable.
  • +
  • use some extra options when running the container.
  • +
+ +

1. Create Working Directory

+ +

For testing, we need a folder on your computer to stand in for the +working directory that HTCondor creates for running your job. Create a folder +for this purpose on your Desktop. The folder’s name shouldn’t include +any spaces. Inside this folder, put all of the files that are normally +inside the working directory for a single job – data, scripts, etc. If +you’re using your own executable script, this should be in the folder.

+ +

Open a Windows Command Prompt or Mac/Linux Terminal to access that +folder, replacing “folder” with the name of the folder you created.

+ +
    +
  • Mac/Linux: +
    $ cd ~/Desktop/folder
    +
    +
  • +
+
    +
  • Windows: +
    $ cd %HOMEPATH%\Desktop\folder
    +
    +
  • +
+ +

2. Plan What to Run

+ +

Once the container starts, you have a few options for testing your job:

+ +
    +
  • Run Commands Directly +
      +
    • When you start the container, you’ll be able to run each command you + want to use, step-by-step. If you have multiple commands, these will eventually + need to be put into a shell script as your executable.
    • +
    • Example: Running multiple steps of a bioinformatics pipeline
    • +
    +
  • +
  • Run an Executable +
      +
    • If you’ve already written a script with all your commands or code, you can + test this in the container.
    • +
    • Examples: Running a shell script with multiple steps, running a machine learning Python script
    • +
    +
  • +
  • Run a Single Command +
      +
    • If you only want to run one command, using a program installed in the Docker + container, you can run this in the container.
    • +
    • Example: Running GROMACS from a container
    • +
    +
  • +
+ +

3. Start the Docker Container

+ +

We’ll use a similar docker run command to start the Docker container, +with a few extra options to better emulate how containers are run in +the HTC system with HTCondor.

+ +

This command can be run verbatim except for the +username, imagename and tag; these should be whatever you used to +pull or tag the container image.

+ +
    +
  • Mac/Linux: +
    $ docker run --user $(id -u):$(id -g) --rm=true -it \
    +  -v $(pwd):/scratch -w /scratch \
    +  username/imagename:tag /bin/bash
    +
    +
  • +
+
    +
  • Windows: +
    $ docker run --rm=true -it -v ${pwd}:/scratch -w /scratch username/imagename:tag 
    +/bin/bash
    +
    +
  • +
+ +

For Windows users, a window may pop up, asking for permission to share +your main drive with Docker. This is necessary for the files to be +placed inside the container. As in the previous section, the docker run command +will start a running copy of the container and start a command line shell +inside.

+ +
+

What Do All the Options Mean? Part 2

+ +

The options that we have added for this example are used in CHTC to make jobs run +successfully and securely.

+ +
    +
  • --user $(id -u):$(id -g): runs the container with more restrictive permissions
  • +
  • -v $(pwd):/scratch: Put the current working directory (pwd) into the container but call it /scratch. +In CHTC, this working directory will be the job’s usual working directory.
  • +
  • -w /scratch: when the container starts, make /scratch the working directory
  • +
+
+ +

4. Test the job

+ +

Your command line prompt should have changed to look like this:

+ +
I have no name!@5a93cb:/scratch$
+
+ +

We can now see if the job would complete successfully!

+ +

If you have a single command or list of commands to run, start running them one by one. +If you have an executable script, you can run it like so:

+ +
I have no name!@5a93cb:/scratch$ ./exec.sh
+
+ +

If your “executable” is software already in the container, run the +appropriate command to use it.

+ +
+

Permission Errors

+ +

The following commands may not be necessary, but if you see messages +about “Permission denied” or a bash error about bad formatting, you +may want to try one (or both) of the following (replacing exec.sh +with the name of your own executable.)

+ +

You may need to add executable permissions to the script for it to run +correctly:

+ +
I have no name!@5a93cb:/scratch$ chmod +x exec.sh
+
+ +

Windows users who are using a bash script may also need to run the +following two commands:

+ +
I have no name!@5a93cb:/scratch$ cat exec.sh | tr -d \\r > temp.sh
+I have no name!@5a93cb:/scratch$ mv temp.sh exec.sh 
+
+
+ +

When your test is done, type exit to leave the container:

+ +

If the program didn’t work, try searching for the cause of the error +messages, or email CHTC’s Research Computing Facilitators.

+ +

If your local test did run successfully, you are now ready to set up +your Docker job to run on CHTC.

+ + + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/dos-unix.html b/preview-fall2024-info/uw-research-computing/dos-unix.html new file mode 100644 index 000000000..b725ae607 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/dos-unix.html @@ -0,0 +1,701 @@ + + + + + + +Windows / Linux Incompatibility + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Windows / Linux Incompatibility +

+ +

If your job is running a bash or shell script (includes the header +#!/bin/bash), and it goes on hold, you might be experiencing a +Windows/Linux incompability error. Files written in Windows (based on +the DOS operating system) and files written in Mac/Linux (based on the +UNIX operating system) use different invisible characters to mean "end +of a line" in a file. Normally this isn't a problem, except when +writing bash scripts; bash will not be able to run scripts if they have +the Windows/DOS line endings.

+ +

To find why the job went on hold, look for the hold reason, either by +running

+ +
[alice@submit]$ condor_q -af HoldReason
+
+ +

or by looking in the log file.

+ +

If a Windows/Linux incompatibility is the problem, the hold reason will +look something like this:

+ +
Error from slot1_11@e189.chtc.wisc.edu: Failed to execute 
+'/var/lib/condor/execute/slot1/dir_4086540/condor_exec.exe' with 
+arguments 2: (errno=2: 'No such file or directory')
+
+ +

To check if this is the problem, you can open the script in the vi text +editor, using its "binary" mode:

+ +
[alice@submit]$ vi -b hello-chtc.sh
+
+ +

(Replace hello-chtc.sh with the name of your script.) If you see ^M +characters at the end of each line, those are the DOS line endings and +that's the problem.
+(Type :q to quit vi)

+ +

Luckily, there is an easy fix! To convert the script to unix line +endings so that it will run correctly, you can run:

+ +
[alice@submit]$ dos2unix hello-chtc.sh
+
+ +

on the submit node and it will change the format for you. If you release +your held jobs (using condor_release) or re-submit the jobs, you +should no longer get the same error.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/file-avail-largedata.html b/preview-fall2024-info/uw-research-computing/file-avail-largedata.html new file mode 100644 index 000000000..45624696f --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/file-avail-largedata.html @@ -0,0 +1,1050 @@ + + + + + + +Managing Large Data in HTC Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Managing Large Data in HTC Jobs +

+ +

Which Option is the Best for Your Files?

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input SizesOutput SizesLink to GuideFile LocationHow to TransferAvailability, Security
0 - 100 MB per file, up to 500 MB per job0 - 5 GB per jobSmall Input/Output File Transfer via HTCondor/homesubmit file; filename in transfer_input_filesCHTC, UW Grid, and OSG; works for your jobs
100 MB - 1 GB per repeatedly-used fileNot available for outputLarge Input File Availability Via Squid/squidsubmit file; http link in transfer_input_filesCHTC, UW Grid, and OSG; files are made *publicly-readable* via an HTTP address
100 MB - TBs per job-specific file; repeatedly-used files > 1GB4 GB - TBs per jobLarge Input and Output File Availability Via Staging/stagingjob executable; copy or move within the joba portion of CHTC; accessible only to your jobs
+ + + +

When submitting jobs to CHTC’s High Throughput Computing (HTC) system, +there is a distinct location for staging data that is too large to be +handled at scale via the default HTCondor file transfer mechanism. This +location should be used for jobs that require input files larger than 100MB +and/or that generate output files larger than 3-4GB.

+ +

To best understand the below information, users should already be +familiar with:

+ +
    +
  1. Using the command-line to: navigate directories, +create/edit/copy/move/delete files and directories, and run intended +programs (aka “executables”).
  2. +
  3. CHTC’s Intro to Running HTCondor Jobs
  4. +
  5. CHTC’s guide for Typical File Transfer
  6. +
+ + + +

1. Policies and Intended Use

+ +
+

USERS VIOLATING ANY OF THE POLICIES IN THIS GUIDE WILL +HAVE THEIR DATA STAGING ACCESS AND/OR CHTC ACCOUNT REVOKED UNTIL CORRECTIVE +MEASURES ARE TAKEN. CHTC STAFF RESERVE THE RIGHT TO REMOVE ANY +PROBLEMATIC USER DATA AT ANY TIME IN ORDER TO PRESERVE PERFORMANCE.

+
+ +

A. Intended Use

+ +

Our large data staging location is only for input and output files that +are individually too large to be managed by our other data movement +methods, HTCondor file transfer or SQUID. This includes individual input files +greater than 100MB and individual output files greater than 3-4GB.

+ +

Users are expected to abide by this intended use expectation and follow the +instructions for using /staging written in this guide (e.g. files placed +in /staging should NEVER be listed in the submit file, but rather accessed +via the job’s executable (aka .sh) script).

+ +

B. Access to Large Data Staging

+ +

Any one with a CHTC account whose data meets the intended use above can request +space in our large data staging area. A Research Computing Facilitator will +review the request and follow up. If appropriate, access will be granted via +a directory in the system and a quota. Quotas are based on individual user needs; +if a larger quota is needed, see our Request a Quota Change guide.

+ +

We can also create group or shared spaces by request.

+ +

C. User Data Management Responsibilities

+ +

As with all CHTC file spaces:

+ +
    +
  • Keep copies: Our large data staging area is not backed up and has the +possibility of data loss; keep copies of ANY and ALL data in /staging in another, non-CHTC +location.
  • +
  • Remove data: We expect that users remove data from /staging AS +SOON AS IT IS NO LONGER NEEDED FOR ACTIVELY-RUNNING JOBS.
  • +
  • Monitor usage and quota: Each /staging folder has both a size and “items” quota. Quota changes +can be requested as described in our Request a Quota Change guide.
  • +
+ +

CHTC staff reserve the right to remove data from our large data staging +location (or any CHTC file system) at any time.

+ +

D. Data Access Within Jobs

+ +

Staged large data will +be available only within the the CHTC pool, on a subset of our total +capacity.

+ +

Staged data are owned by the user, and only the user’s own +jobs can access these files (unless the user specifically modifies unix +file permissions to make certain files available for other users).

+ +

2. Staging Large Data

+ +

In order to stage large data for use on CHTC’s HTC system:

+ +
    +
  • Get a directory: Large data staging is available by request.
  • +
  • Reduce file counts: Combine and compress files that are used together.
  • +
  • Use the transfer server: Upload your data via our dedicated file transfer server.
  • +
  • Remove files after jobs complete: our data staging space is quota controlled and not backed up.
  • +
+ +

A. Get a Directory

+ +

Space in our large data staging area is granted by request. If you think you need +a directory, email CHTC’s Research Computing Facilitators (chtc@cs.wisc.edu).

+ +

The created directory will exist at this path: /staging/username

+ +

B. Reduce File Counts

+ +

Data placed in our large data /staging location +should be stored in as few files as possible (ideally, +one file per job), and will be used by a job only after being copied +from /staging into the job working directory (see below). +Similarly, large output should first be written to the +job working directory then compressed in to a single file before being +copied to /staging at the end of the job.

+ +

To prepare job-specific data that is large enough to pre-staging +and exists as multiple files or directories (or a directory of multiple +files), first create a compressed tar package before placing the file in +/staging (either before submitting jobs, or within jobs before +moving output to /staging). For example:

+ +
$ tar -czvf job_package.tar.gz file_or_dir 
+
+ +

C. Use the Transfer Server

+ +

Movement of data into/out of /staging before and after jobs should +only be performed via CHTC’s transfer server, as below, and not via a +CHTC submit server. After obtaining a user directory within +/staging and an account on the transfer server, copy relevant +files directly into this user directory from your own computer:

+ +
    +
  • Example scp command on your own Linux or Mac computer: +
    $ scp large.file username@transfer.chtc.wisc.edu:/staging/username/ 
    +
    +

    {.term}

    +
  • +
  • If using a Windows computer: +
      +
    • Using a file transfer application, like WinSCP, directly drag the large +file from its location on your computer to a location within +/staging/username/ on transfer.chtc.wisc.edu.
    • +
    +
  • +
+ +

D. Remove Files After Jobs Complete

+ +

As with all CHTC file spaces, data should be removed from /staging AS +SOON AS IT IS NO LONGER NEEDED FOR ACTIVELY-RUNNING JOBS. Even if it +will be used it the future, it should be deleted from and copied +back at a later date. Files can be taken off of /staging using similar +mechanisms as uploaded files (as above).

+ +

3. Using Staged Files in a Job

+ +

As shown above, the staging directory for large data is /staging/username. +All interaction with files in this location should occur within your job’s +main executable.

+ +

A. Accessing Large Input Files

+ +

To use large data placed in the /staging location, add commands to your +job executable that copy input +from /staging into the working directory of the job. Program should then use +files from the working directory, being careful to remove the coiped +files from the working +directory before the completion of the job (so that they’re not copied +back to the submit server as perceived output).

+ +

Example, if executable is a shell script:

+ +
#!/bin/bash
+#
+# First, copy the compressed tar file from /staging into the working directory,
+#  and un-tar it to reveal your large input file(s) or directories:
+cp /staging/username/large_input.tar.gz ./
+tar -xzvf large_input.tar.gz
+#
+# Command for myprogram, which will use files from the working directory
+./myprogram large_input.txt myoutput.txt
+#
+# Before the script exits, make sure to remove the file(s) from the working directory
+rm large_input.tar.gz large_input.txt
+#
+# END
+
+ +

B. Moving Large Output Files

+ +

If jobs produce large (more than 3-4GB) output files, have +your executable write the output file(s) to a location within +the working directory, and then make sure to move this large file to +the /staging folder, so that it’s not transferred back to the home directory, as +all other “new” files in the working directory will be.

+ +

Example, if executable is a shell script:

+ +
#!/bin/bash
+# 
+# Command to save output to the working directory:
+./myprogram myinput.txt output_dir/
+#
+# Tar and mv output to staging, then delete from the job working directory:
+tar -czvf large_output.tar.gz output_dir/ other_large_files.txt
+mv large_output.tar.gz /staging/username/
+rm other_large_files.txt
+#
+# END
+
+ +

C. Handling Standard Output (if needed)

+ +

In some instances, your software may produce very large standard output +(what would typically be output to the command screen, if you ran the +command for yourself, instead of having HTCondor do it). Because such +standard output from your software will usually be captured by HTCondor +in the submit file “output” file, this “output” file WILL still be +transferred by HTCondor back to your home directory on the submit +server, which may be very bad for you and others, if that captured +standard output is very large.

+ +

In these cases, it is useful to redirect the standard output of commands +in your executable to a file in the working directory, and then move it +into /staging at the end of the job.

+ +

Example, if “myprogram” produces very large standard output, and is +run from a script (bash) executable:

+ +
#!/bin/bash
+#
+# script to run myprogram,
+# 
+# redirecting large standard output to a file in the working directory:
+./myprogram myinput.txt myoutput.txt > large_std.out
+# 
+# tar and move large files to staging so they're not copied to the submit server:
+tar -czvf large_stdout.tar.gz large_std.out
+cp large_stdout.tar.gz /staging/username/subdirectory
+rm large_std.out large_stdout.tar.gz
+# END
+
+ +

4. Submit Jobs Using Staged Data

+ +

In order to properly submit jobs using staged large data, always do the following:

+ +
    +
  • Submit from /home: ONLY submit jobs from within your home directory + (/home/username), and NEVER from within /staging.
  • +
+ +

In your submit file:

+ +
    +
  • No large data in the submit file: Do NOT list any /staging files in any of the submit file + lines, including: executable, log, output, error, transfer_input_files. Rather, your + job’s ENTIRE interaction with files in /staging needs to occur + WITHIN each job’s executable, when it runs within the job (as shown above)
  • +
  • Request sufficient disk space: Using request_disk, request an amount of disk +space that reflects the total of a) input data that each job will copy into + the job working directory from /staging, and b) any output that + will be created in the job working directory.
  • +
  • Require access to /staging: Include the CHTC specific attribute that requires +servers with access to /staging
  • +
+ +

See the below submit file, as an example, which would be submitted from +within the user’s /home directory:

+ +
### Example submit file for a single job that stages large data
+# Files for the below lines MUST all be somewhere within /home/username,
+# and not within /staging/username
+
+executable = run_myprogram.sh
+log = myprogram.log
+output = $(Cluster).out
+error = $(Cluster).err
+
+## Do NOT list the large data files here
+transfer_input_files = myprogram
+
+# IMPORTANT! Require execute servers that can access /staging
+Requirements = (Target.HasCHTCStaging == true)
+
+# Make sure to still include lines like "request_memory", "request_disk", "request_cpus", etc. 
+
+queue
+
+ +
+

Note: in no way should files on /staging be specified in the submit file, +directly or indirectly! For example, do not use the initialdir option ( +Submitting Multiple Jobs in Individual Directories) +to specify a directory on /staging.

+
+ +

5. Checking your Quota, Data Use, and File Counts

+ +

You can use the command get_quotas to see what disk +and items quotas are currently set for a given directory path. +This command will also let you see how much disk is in use and how many +items are present in a directory:

+ +
[username@transfer ~]$ get_quotas /staging/username
+
+ +

Alternatively, the ncdu command can also be used to see how many +files and directories are contained in a given path:

+ +
[username@transfer ~]$ ncdu /staging/username
+
+ +

When ncdu has finished running, the output will give you a total file +count and allow you to navigate between subdirectories for even more +details. Type q when you're ready to exit the output viewer. More +info here: https://lintut.com/ncdu-check-disk-usage/

+ + + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/file-avail-s3.html b/preview-fall2024-info/uw-research-computing/file-avail-s3.html new file mode 100644 index 000000000..ee1a92563 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/file-avail-s3.html @@ -0,0 +1,675 @@ + + + + + + +Managing Large Data in HTC Jobs with S3 Buckets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Managing Large Data in HTC Jobs with S3 Buckets +

+ +

Which Option is the Best for Your Files?

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input SizesOutput SizesLink to GuideFile LocationHow to TransferAvailability, Security
0 - 100 MB per file, up to 500 MB per job0 - 5 GB per jobSmall Input/Output File Transfer via HTCondor/homesubmit file; filename in transfer_input_filesCHTC, UW Grid, and OSG; works for your jobs
100 MB - 1 GB per repeatedly-used fileNot available for outputLarge Input File Availability Via Squid/squidsubmit file; http link in transfer_input_filesCHTC, UW Grid, and OSG; files are made *publicly-readable* via an HTTP address
100 MB - TBs per job-specific file; repeatedly-used files > 1GB4 GB - TBs per jobLarge Input and Output File Availability Via Staging/stagingjob executable; copy or move within the joba portion of CHTC; accessible only to your jobs
+ + + +

When submitting jobs to CHTC’s High Throughput Computing (HTC) system, +there is a distinct location for staging data that is too large to be +handled at scale via the default HTCondor file transfer mechanism +but needs to be accessed outside of CHTC +(for example, data for jobs that run on the OS Pool).

+ +

To best understand the below information, users should already be +familiar with:

+ +
    +
  1. Using the command-line to: navigate directories, +create/edit/copy/move/delete files and directories, and run intended +programs (aka “executables”).
  2. +
  3. CHTC’s Intro to Running HTCondor Jobs
  4. +
  5. CHTC’s guide for Typical File Transfer
  6. +
+ +

Contents

+ +
    +
  1. Policies and Intended Use
  2. +
  3. Staging Large Data in S3 Buckets
  4. +
  5. Using Staged Files in a Job + +
  6. +
  7. Checking your Data Use and File Counts
  8. +
+ +

1. Policies and Intended Use

+ +
+

USERS VIOLATING ANY OF THE POLICIES IN THIS GUIDE WILL +HAVE THEIR DATA STAGING ACCESS AND/OR CHTC ACCOUNT REVOKED UNTIL CORRECTIVE +MEASURES ARE TAKEN. CHTC STAFF RESERVE THE RIGHT TO REMOVE ANY +PROBLEMATIC USER DATA AT ANY TIME IN ORDER TO PRESERVE PERFORMANCE.

+
+ +

A. Intended Use

+ +

Our S3 data storage is only for input and output files that +are individually too large to be managed by our other data movement +methods, HTCondor file transfer or SQUID, and when these files are +expected to be accessed outside of CHTC. This includes individual input files +greater than 100MB and individual output files greater than 3-4GB.

+ +

Files in our S3 data storage are organized in storage units called +“buckets.” You can think of an S3 bucket like a folder containing a +set of data. Each bucket has a unique name of your choosing and can +contain folders, executable files, data files, and most other types of +files. S3 buckets are protected with a key that is unique to you +(similar to a password) and, when provided with the key, buckets +can be accessed from any machine with an internet connection. CHTC +automatically creates and manages keys for users, so you do not have +to provide your key when manging files in your S3 buckets on CHTC +transfer servers or when submitting jobs on CHTC submit servers that +transfer data from S3 buckets.

+ +

Users are expected to abide by this intended use expectation and follow the +instructions for using S3 buckets written in this guide (e.g. files placed +in S3 buckets should ALWAYS be listed in the submit file).

+ +

B. Getting Access to Create S3 Buckets

+ +

Any one with a CHTC account whose data meets the intended use above +can request access to create S3 buckets inside CHTC’s S3 data +storage. A Research Computing Facilitator will review the request and +follow up. If appropriate, S3 bucket creation will be enabled for and +a quota will be set on your account. Quotas are based on individual +user needs; if a larger quota is needed, email chtc@cs.wisc.edu with +your request.

+ +

C. User Data Management Responsibilities

+ +

As with all CHTC file spaces:

+ +
    +
  • Keep copies: Our S3 buckets are not backed up and have the +possibility of data loss; keep copies of ANY and ALL data in S3 +buckets in another, non-CHTC location.
  • +
  • Remove data: We expect that users remove data from S3 buckets AS +SOON AS IT IS NO LONGER NEEDED FOR ACTIVELY-RUNNING JOBS.
  • +
  • Monitor usage and quota: Your account has both a size and +number of files quota that applies across all buckets owned by your +account. Quota changes can be requested by emailing chtc@cs.wisc.edu.
  • +
+ +

CHTC staff reserve the right to remove S3 buckets or revoke bucket +creation permission at any time.

+ +

D. Data Access Within Jobs

+ +

Data in a CHTC S3 bucket can be accessed from jobs running almost +anywhere (including most of OS Pool). HTCondor automatically matches and +runs jobs that use S3 buckets only on machines that support S3 data +transfers.

+ +

Data in CHTC S3 buckets are owned by the user (or a set of users), and +only the user’s (or users’) own jobs can access these files.

+ +

2. Staging Large Data in S3 Buckets

+ +

In order to stage data in an S3 bucket for use on CHTC’s HTC system:

+ +
    +
  • Get S3 bucket creation access: Bucket creation access is granted by request.
  • +
  • Create an S3 bucket: Create a bucket that will contain the data for your project.
  • +
  • Reduce file counts: Combine and compress files that are used together.
  • +
  • Use the transfer server: Upload your data to your bucket via our dedicated file transfer server.
  • +
  • Remove files after jobs complete: Data in S3 buckets are quota controlled and not backed up.
  • +
+ +

A. Get S3 Bucket Creation Access

+ +

CHTC S3 bucket creation access is granted by request. If you think you need +to create S3 buckets, email CHTC’s Research Computing Facilitators (chtc@cs.wisc.edu).

+ +

B. Create an S3 Bucket

+ +

Buckets can be created on a CHTC submit server or the CHTC transfer server +using the mc command:

+ +
[alice@transfer]$ mc mb chtc/my-bucket-name
+
+ +

Each bucket in CHTC must have a unique name, so be descriptive! We +recommend creating a bucket per dataset or per batch of jobs.

+ +

C. Reduce File Counts

+ +

Data placed in S3 buckets should be stored in as few files as possible +(ideally, one file per job). Similarly, large output should first be +written to the job working directory then compressed in to a single +file before being transferred back to an S3 bucket at the end of the job.

+ +

To prepare job-specific data that is large enough +and exists as multiple files or directories (or a directory of multiple +files), first create a compressed tar package before placing the file in +an S3 bucket (either before submitting jobs, or within jobs before +transferring output to). For example:

+ +
$ tar -czvf job_package.tar.gz file_or_dir
+
+ +

D. Use the Transfer Server

+ +

Movement of large data into/out of S3 buckets before and after jobs +should be performed via CHTC’s transfer server, as below, and +not via a CHTC submit server. After obtaining an account on the +transfer server and creating an S3 bucket, copy relevant files directly into your +home directory from your own computer:

+ +
    +
  • Example scp command on your own Linux or Mac computer: +
    $ scp large-input.file username@transfer.chtc.wisc.edu:/home/username/
    +
    +
  • +
  • If using a Windows computer: +
      +
    • Using a file transfer application, like WinSCP, directly drag the large +file from its location on your computer to a location within +/home/username/ on transfer.chtc.wisc.edu.
    • +
    +
  • +
+ +

Then in an SSH session on the transfer server, copy files in to your +S3 bucket:

+ +
[alice@transfer]$ mc cp large-input.file chtc/my-bucket
+
+ +

E. Remove Files After Jobs Complete

+ +

As with all CHTC file spaces, data should be removed from S3 buckets AS +SOON AS IT IS NO LONGER NEEDED FOR ACTIVELY-RUNNING JOBS. Even if it +will be used again in the future, it should be deleted from and copied +back at a later date. Files can be taken out of S3 buckets using similar +mechanisms as uploaded files. In an SSH session on the transfer +server, copy files from your bucket to your home directory:

+ +
[alice@transfer]$ mc cp chtc/my-bucket/large-output.file .
+
+ +

Then copy files from the transfer server to your own computer:

+ +
    +
  • Example scp command on your own Linux or Mac computer: +
    $ scp username@transfer.chtc.wisc.edu:/home/username/large-output.file .
    +
    +
  • +
  • If using a Windows computer: +
      +
    • Using a file transfer application, like WinSCP, directly drag the large +file from its location within /home/username/ on +transfer.chtc.wisc.edu to your computer.
    • +
    +
  • +
+ +

To remove a file inside your S3 bucket, in an SSH session on the +transfer server:

+ +
[alice@transfer]$ mc rm chtc/my-bucket/large-input.file
+[alice@transfer]$ mc rm chtc/my-bucket/large-output.file
+
+ +

To remove an entire bucket (only do this if you are certain the +bucket is no longer needed):

+ +
[alice@transfer]$ mc rb chtc/my-bucket
+
+ +

3. Using Staged Files in a Job

+ +

A. Transferring Large Input Files

+ +

To use data placed in a CHTC S3 bucket, add files to your submit +file’s transfer_input_files that point to the filename +(e.g. large-input.file) inside your bucket (e.g. my-bucket) on +CHTC’s S3 storage (s3dev.chtc.wisc.edu):

+ +
...
+executable = my_script.sh
+transfer_input_files = s3://s3dev.chtc.wisc.edu/my-bucket/large-input.file
+arguments = large-input.file
+...
+
+ +

B. Moving Large Output Files

+ +

To have your job automatically copy data back to your CHTC S3 bucket, +add file mappings to a transfer_output_remaps command inside your +submit file:

+ +
transfer_output_remaps = "large-output.file = s3://s3dev.chtc.wisc.edu/my-bucket/large-output.file"
+
+ +

4. Checking Your Data Use and File Counts

+ +

To check what files are in your bucket and the size of the files:

+
[alice@submit]$ mc ls chtc/my-bucket
+
+ +

To check your bucket’s total data usage:

+
[alice@submit]$ mc du chtc/my-bucket
+
+ +

To check your bucket’s file count:

+
[alice@submit]$ mc find chtc/my-bucket | wc -l
+
+ + + + +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/file-avail-squid.html b/preview-fall2024-info/uw-research-computing/file-avail-squid.html new file mode 100644 index 000000000..9567366d0 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/file-avail-squid.html @@ -0,0 +1,868 @@ + + + + + + +Transfer Large Input Files Via Squid + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Transfer Large Input Files Via Squid +

+ +

Which Option is the Best for Your Files?

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input SizesOutput SizesLink to GuideFile LocationHow to TransferAvailability, Security
0 - 100 MB per file, up to 500 MB per job0 - 5 GB per jobSmall Input/Output File Transfer via HTCondor/homesubmit file; filename in transfer_input_filesCHTC, UW Grid, and OSG; works for your jobs
100 MB - 1 GB per repeatedly-used fileNot available for outputLarge Input File Availability Via Squid/squidsubmit file; http link in transfer_input_filesCHTC, UW Grid, and OSG; files are made *publicly-readable* via an HTTP address
100 MB - TBs per job-specific file; repeatedly-used files > 1GB4 GB - TBs per jobLarge Input and Output File Availability Via Staging/stagingjob executable; copy or move within the joba portion of CHTC; accessible only to your jobs
+ + + + + +

SQUID Web Proxy

+ +

CHTC maintains a SQUID web proxy from which pre-staged input files and +executables can be downloaded into jobs using CHTC's proxy HTTP +address.

+ + + +

+ +

1. Applicability

+ +
    +
  • +

    Intended Use:
    + The SQUID web proxy is best for cases where many jobs will use the + same large file (or few files), including large software. It is not + good for cases when each of many jobs needs a different large + input file, in which case our large data staging + location should be used. Remember that + you're always better off by pre-splitting a large input file into + smaller job-specific files if each job only needs some of the large + files's data. If each job needs a large set of many files, you + should create a .tar.gz file containing all the files, and this + file will still need to be less than 1 GB.

    +
  • +
  • +

    Access to SQUID:
    + is granted upon request to chtc@cs.wisc.edu. A user on CHTC submit + servers may will be granted a user directory within /squid, which + users should transfer data into via the CHTC transfer server + (transfer.chtc.wisc.edu). As for all CHTC file space, users should + minimize the amount of data on the SQUID web proxy, and should clean + files from the /squid location regularly. CHTC staff reserve the + right to remove any file from /squid when needed to preserve + availability and performance for all users.

    +
  • +
  • +

    Advantages:
    + Files placed on the SQUID web proxy can be downloaded by jobs + running anywhere, because the files are world-readable.

    +
  • +
  • Limitations and Policies: +
      +
    • SQUID cannot be used for job output, as there is no way to +change files in SQUID from within a job.
    • +
    • SQUID is also only capable of delivering individual files up to +1 GB in size.
    • +
    • A change you make to a file within your /squid directory may +not take effect immediately on the SQUID web proxy if you use +the same filename. Therefore, it is important to use a new +filename when replacing a file in your /squid directory.
    • +
    • Jobs should still ALWAYS and ONLY be submitted from within the +user's /home location.
    • +
    • Only the "http" address should be listed in the +"transfer_input_files" line of the submit file. File +locations starting with "/squid" should NEVER be listed in +the submit file.
    • +
    • Users should only have data in /squid that is being use for +currently-queued jobs; CHTC provides no back ups of any data in +CHTC systems, and our staff reserve the right to remove any data +causing issues. It is the responsibility of users to keep copies +of all essential data in preparation for potential data loss or +file system corruption.
      +
    • +
    +
  • +
  • Data Security:
    + Files placed in SQUID can only be edited by the owner of the user + directory within /squid, but will end up being world-readable on + the SQUID web proxy in order to be readily downloadable by jobs + (with the proper HTTP address); thus, large files that should be + "private" should not be placed in your user directory in /squid, + and should instead use CHTC's large data staging + space for large-file staging.
  • +
+ +

+ +

2. Using SQUID to Deliver Input Files

+

+
    +
  1. +

    Request a directory in SQUID. Write to chtc@cs.wisc.edu describing the data you'd like to place in SQUID, and indicating your username and submit server hostname (i.e. submit-5.chtc.wisc.edu).

    +
  2. +
  3. +

    Place files within your /squid/username directory via a CHTC +transfer server (if from your laptop/desktop) or on the submit +server.

    + +

    From your laptop/desktop:

    +
    [username@computer]$ scp large_file.tar.gz username@transfer.chtc.wisc.edu:/squid/username/
    +
    + +

    If the file already exists within your /home directory on a submit +server:

    + +
    [username@submit]$ cp large_file.tar.gz /squid/username/
    +
    + +

    Check the file from the submit server:

    + +
    [username@submit]$ ls /squid/username/
    +
    +
  4. +
  5. +

    Have HTCondor download the file to the working job using the +http://proxy.chtc.wisc.edu/SQUID address in the +transfer_input_files line of your submit file:

    + +
    transfer_input_files = other_file1,other_file2,http://proxy.chtc.wisc.edu/SQUID/username/large_file.txt
    +
    + +

    Important:Make sure to replace "username" with your username +in the above address. All other files should be staged before job +submission.
    +
    +If your large file is a .tar.gz file that untars to include other +files, remember to remove such files before the end of the job; +otherwise, HTCondor will think that such files are new output that +needs to be transferred back to the submit server. (HTCondor will +not automatically transfer back directories.)

    +
  6. +
+ + + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/file-availability.html b/preview-fall2024-info/uw-research-computing/file-availability.html new file mode 100644 index 000000000..3979af2b4 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/file-availability.html @@ -0,0 +1,966 @@ + + + + + + +Small Input and Output File Availability Via HTCondor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Small Input and Output File Availability Via HTCondor +

+ +

Which Option is the Best for Your Files?

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input SizesOutput SizesLink to GuideFile LocationHow to TransferAvailability, Security
0 - 100 MB per file, up to 500 MB per job0 - 5 GB per jobSmall Input/Output File Transfer via HTCondor/homesubmit file; filename in transfer_input_filesCHTC, UW Grid, and OSG; works for your jobs
100 MB - 1 GB per repeatedly-used fileNot available for outputLarge Input File Availability Via Squid/squidsubmit file; http link in transfer_input_filesCHTC, UW Grid, and OSG; files are made *publicly-readable* via an HTTP address
100 MB - TBs per job-specific file; repeatedly-used files > 1GB4 GB - TBs per jobLarge Input and Output File Availability Via Staging/stagingjob executable; copy or move within the joba portion of CHTC; accessible only to your jobs
+ + + + + + +

+ +

HTCondor File Transfer

+ +

Due to the distributed configuration of the CHTC HTC pool, more often than not, +your jobs will need to bring along a copy (i.e. transfer a copy) of +data, code, packages, software, etc. from the submit server where the job +is submitted to the execute node where the job will run. This requirement +applies to any and all files that are needed to successfully execute and +complete your job.

+ +

Any output that gets generated by your jobs is specifically written to +the execute node on which the job ran. In order to get access to +your output files, a copy of the output must be transferred back +to an user accessible location like the submit server.

+ +

The mechanism that you use for file transfers will depend on the size +of the individual input and output files of your jobs. This guide +specifically describes input and output file transfer for input files +<100MB in size (and <500MB of total input file transfer) and output +files <4GB in size using the standard solution built into HTCondor +job scheduling. More information about file transfer on a system +without a shared filesystem is available in the +HTCondor manual.

+ +

+ +

Applicability

+ +
    +
  • +

    Intended use:
    +Good for delivering any type of data to jobs, but with file-size +limitations (see below). Remember that you can/should split up a large +input file into many smaller files for cases where each job only needs a +portion of the data. By default, the submit file executable, +output, error, and log files are ALWAYS transferred.

    +
  • +
  • +

    Advantages:
    +HTCondor file transfer is robust and is available on ANY of CHTC's +accessible HTC resources including the UW Grid of campus pools, and the +OS Pool.

    +
  • +
  • +

    Data Security:
    +Files transferred with HTCondor transfer are owned by the job and +protected by user permissions in the CHTC pool. When signaling your jobs +to run on the UW Grid (Flocking) or the OS Pool (Glidein), +your files will exist on someone else's computer only for the duration +of each job. Please feel free to email us if you have data security +concerns regarding HTCondor file transfer, as encryption options are +available.

    +
  • +
+ +

+ +

Transferring Input Files

+ +

To have HTCondor transfer small (<100MB) input files needed by +your job, include the following attributes in your CHTC HTCondor submit files:

+ +
# my job submit file
+
+should_transfer_files = YES
+when_to_transfer_output = ON_EXIT
+transfer_input_files = file1, ../file2, /home/username/file3, dir1, dir2/
+
+... other submit file details ...
+
+
+ +

By default, the submit file executable, output, and +error files are ALWAYS transferred.

+ +

Important Considerations

+ +
    +
  • +

    DO NOT use transfer_input_files for files within /staging; +for files in /squid only http links (e.g. http://proxy.chtc.wisc.edu/SQUID/username/file) should be +used instead of direct file paths. These policies are in place to prevent severe performance issues for your +jobs and those of other users. Jobs should should never be submitted +from within /squid or /staging.

    +
  • +
  • +

    HTCondor's file transfer can cause issues for submit server performance +when too many jobs are transferring too much data at the same time. +Therefore, HTCondor file transfer is only good for input files up to +~20 MB per file IF the number of concurrently-queued jobs will be 100 +or greater. Even when individual files are small, there are issues when +the total amount of input data per-job approaches 500 MB. For cases +beyond these limitations, one of our other CHTC file delivery methods +should be used. Remember that creating a tar.gz file of directories +and files can give your input and output data a useful amount of +compression.

    +
  • +
  • +

    Comma-separated files and directories to-be-transferred should be +listed with a path relative to the submit directory, or can be +listed with the absolute path(s), as shown above for file3. The +submit file executable is automatically transferred and does not +need to be listed in transfer_input_files.

    +
  • +
  • +

    All files that are transferred to a job will appear within the top +of the working directory of the job, regardless of how they are +arranged within directories on the submit server.

    +
  • +
  • +

    A whole directory and it's contents will be transferred when listed +without the trailing forward slash ("/") after the directory name. When a directory is +listed with the trailing forward slash ("/") after the directory name, only the directory +contents will be transferred. Care should be taken when transferring whole directories +so that only the files needed by your jobs will be transferred. +Generally, we recommend creating a tar.gz file of directories +and files to be used a job inputs - this will help streamline the process of input +file transfer and help speed up transfer times by reducing the overall size of +files that will be transferred.

    +
  • +
  • +

    Jobs will be placed on hold by HTCondor if any of the files or +directories do not exist or if you have a typo.

    +
  • +
  • +

    Learn more about HTCondor input files transfer.

    +
  • +
+ +

+ +

Transferring Output Files

+ +

All of your HTCondor submit files should have the following attributes:

+ +
# my job submit file
+
+should_transfer_files = YES
+when_to_transfer_output = ON_EXIT
+
+
+ +

when_to_transfer_output = ON_EXIT will instruct HTCondor to automatically transfer +ALL new or modified files in the top level directory of the job (where it ran on the execute +server), back to the job’s initial directory on the submit server. Please note: this behavior +only applies to files in the job’s top-level working directory, meaning HTCondor will ignore +any files created in subdirectories of the job’s main working directory. Several options exist for modifying +this default output file transfer behavior - see below for some examples.

+ +

Only individual output files <4GB should be transferred back to your home directory +using HTCondor’s default behavior described here. Large output files >4GB should instead +use CHTC’s large data filesystem called staging, more information is available at +Managing Large Data in HTC Jobs. To help reduce output file +sizes, and help speed up file transfer times, we recommend creating a tar.gz file of all +desired output before job completion (and to also delete the “un-tar'd” +files so they are not also transferred back); see our example below.

+ +

+ +

Group Multiple Output Files For Convenience

+ +

If your jobs will generate multiple output files, we recommend combining all output into a compressed +tar archive for convenience, particularly when transferring your results to your local computer from +the submit server. To create a compressed tar archive, include commands in your your bash executable script +to create a new subdirectory, move all of the output to this new subdirectory, and create a tar archive. +For example:

+ +
#! /bin/bash
+
+# various commands needed to run your job
+
+# create output tar archive
+mkidr my_output
+mv my_job_output.csv my_job_output.svg my_output/
+tar -czf my_job.output.tar.gz my_ouput/
+
+ +

The example above will create a file called my_job.output.tar.gz that contains all the output that +was moved to my_output. Be sure to create my_job.output.tar.gz in the top-level directory of where +your job executes and HTCondor will automatically transfer this tar archive back to your /home +directory.

+ +

+ +

Select Specific Output Files to Transfer to /home

+ +

As described above, HTCondor will transfer ALL new or modified files in the top level +directory of the job (where it ran on the execute server), back to the job’s initial directory +on the submit server. If your jobs will produce multiple output +files but you only need to retain a subset of these output files, we recommend deleting the unrequired +output files or moving them to a subdirectory as a step in the bash +executable script of your job - only the output files that remain in the top-level +directory will be transferred back to your /home directory. This will help keep ample +space free and available on your /home directory on the submit server and help prevent +you from exceeding the disk quota.

+ +

For jobs that use large input files from /staging, you must include steps in your bash script +to either remove these files or move them to a subdirectory before the job terminates. Else, +these large files will be transferred back to your /home directory. For more details, please +see Managing Large Data in HTC Jobs.

+ +

In cases where a bash script is not used as the excutable of your job and you wish to have only specific +output files transferred back, please contact us.

+ +

+ +

Get Additional Options For Managing Job Output

+ +

Several options exist for managing output file transfers back to your /home directory and we +encourage you to get in touch with us at chtc@cs.wisc.edu to +help identify the best solution for your needs.

+ +

Request a Quota Change

+ +

If you find that you are need of more space in you /home directory to handle the number +of jobs that you want to run, please see our Request a Quota Change guide.

+ + + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/files/template.sub b/preview-fall2024-info/uw-research-computing/files/template.sub new file mode 100644 index 000000000..9b9fec863 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/files/template.sub @@ -0,0 +1,20 @@ +# template.sub +# starter submit file for CHTC jobs + +universe = vanilla +log = job_$(Cluster).log +error = job_$(Cluster)_$(Process).err +output = job_$(Cluster)_$(Process).out + +executable = +arguments = + +should_transfer_files = YES +when_to_transfer_output = ON_EXIT +transfer_input_files = + +request_cpus = 1 +request_memory = 1GB +request_disk = 1GB + +queue 1 diff --git a/preview-fall2024-info/uw-research-computing/form.html b/preview-fall2024-info/uw-research-computing/form.html new file mode 100644 index 000000000..df579e7d9 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/form.html @@ -0,0 +1,369 @@ + + + + + + +Account Request Form + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ Account Request Form +

+ +

Please fill out the below form in order to provide us with +information about the computing work you would like +to do (to the best of your knowledge). If you are unsure of an answer to a +below question, leave the question blank or indicate that you don't know.

+ +

After filling out this form, CHTC Facilitation staff will follow up to +create your account and offer times for an initial consultation. The consultation +is only mandatory for new groups to CHTC, but we strongly encourage anyone who +is getting started to take advantage of this valuable opportunity to discuss your +work one-on-one with a CHTC Research Computing Facilitator.

+ +

For more information, see How to Request a CHTC Account.

+ +

Account Request Form

+ +

The following link leads to a Qualtrics form that we use for the account request process.

+ + + +

If you do not receive an automated email from chtc@cs.wisc.edu within a few hours of completing the form, + OR if you do not receive a response from a human within two business days (M-F), please email chtc@cs.wisc.edu.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/get-help.html b/preview-fall2024-info/uw-research-computing/get-help.html new file mode 100644 index 000000000..6524bef5e --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/get-help.html @@ -0,0 +1,474 @@ + + + + + + +Get Help + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ Get Help +

+ +

There are multiple ways to get help from CHTC staff. See below:

+ +

Research Computing Facilitators

+ +
+ + +
+
+ Andrew Owen's Headshot +
Andrew Owen
Research Computing Facilitator
+
+
+ +
+
+ Christina Koch's Headshot +
Christina Koch
Research Facilitator Manager
+
+
+ +
+
+ Rachel Lombardi's Headshot +
Rachel Lombardi
Research Computing Facilitator
+
+
+ +
+ +

To help researchers effectively utilize computing resources, our Research +Computing Facilitators (RCFs) not only assist your in +implementing your computational work on CHTC compute resources +resources, but can also point you to other on- and off-campus services +related to research computing and data needs. For example, RCFs can:

+ +
    +
  • Assist with planning your computational approach for a research +problem
  • +
  • Teach you to submit jobs to CHTC compute systems
  • +
  • Help you with troubleshooting on CHTC compute systems
  • +
  • Connect you with other researchers using similar software or methods
  • +
  • Point to learning materials for programming and software development
  • +
  • Help you identify applicable non-CHTC data storage options provided by DoIT
  • +
  • Find the person who knows the answer to your question, even if the RCF doesn’t
  • +
  • … and other helpful activities to facilitate your use of cyberinfrastructure
  • +
+ +

Get An Account

+ +

If you don’t have an account yet, please fill out our Request +Form, and we’ll follow up quickly to set up a meeting time +and create accounts. If you don’t have an account but just have general +questions, feel free to send an email to chtc@cs.wisc.edu (see below).

+ +

Request a Quota Change

+ +

If you’d like to request a change in your quotas for one of our data +storage locations, please see our Request a Quota Change guide.

+ +

Help Via Email

+ +

We provide support via email at the address +chtc@cs.wisc.edu, and it’s never a bad idea +to start by sending questions or issues via email. You can typically +expect a first response within a few business hours.

+ +

When emailing us for assistance in troubleshooting an issue, please provide which system you are using, +an explanation of what you expected to happen versus what actually happened, and
+include relevant files (or provide the locations of them on the system), such as:

+ +
    +
  • The job submit file (.sub)
  • +
  • The job executable (.sh) or list of commands used in an interactive job
  • +
  • Standard error and standard output files (usually .out or .err)
  • +
  • If on the HTC system, the HTCondor log file (.log)
  • +
+ +

We will use this information to give you more effective responses and solutions.

+ +

Office Hours

+ + + + + +

For users who already have accounts, we have drop-in office hours, online, during the following times:

+ +
    +
  • Tuesday morning: 10:30 am - 12:00 pm. CANCELED OVER THE SUMMER (May 28 through August 27)
  • +
  • Thursday afternoon: 3:00 - 4:30 pm.
  • +
+ +

To drop in, find the videoconference link in either your email or in the +login message when you log into a CHTC server.

+ +

As always, if the times above don’t work for you, please email us +at our usual support address to schedule a separate meeting.

+ +

Click to sign-in for office hours

+ + + +

Make an Appointment

+ +

We are happy to arrange meetings outside of designated Office Hours, per +your preference. Simply email us at the address above, and we will set +up a time to meet!

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/get-started.html b/preview-fall2024-info/uw-research-computing/get-started.html new file mode 100644 index 000000000..04d528a0a --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/get-started.html @@ -0,0 +1,357 @@ + + + + + + +Getting Started With CHTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+ + + +
+
+
+

+ Getting Started With CHTC +

+ + +

Anyone on the UW-Madison campus, and even off-campus collaborators, +may use the CHTC. In order to get started, we need some information +to understand how best to help you. +Please fill out the form here, +being as thorough as you can.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/get-submit-node.html b/preview-fall2024-info/uw-research-computing/get-submit-node.html new file mode 100644 index 000000000..078f1d2d3 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/get-submit-node.html @@ -0,0 +1,411 @@ + + + + + + +Getting a Submit Node + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Getting a Submit Node +

+ +

In order to submit jobs to our campus-wide collection of resources, you +will need access to a submit node. There are several options for getting +access to a submit node:

+ +
    +
  1. Use ours. We operate a submit node that +is shared by many researchers. This is a great way to get started +quickly, and it is sufficient if you do not need to run tens of +thousands of jobs with heavy data transfer requirements.
  2. +
  3. Use your department's. Perhaps your department already has its +own submit node, in which case you can contact your local +administrator for an account. You will still need to provide all the +info requested on the getting started form, so +we can set up things on our end. The benefits of using a +departmental or group submit node are: access to data on local file +systems; limited impact from other, potentially new users; and, +greater scalability in the number of simultaneous jobs you can run, +as well as the amount of data you can transfer.
  4. +
  5. +

    Set up a new submit node on a server. If you do not already have +one and need access to data on local file systems, or if you believe +that you will have a significant job and/or data volume, getting +your own submit node is probably the best way to go. Here's an +example system configuration that we've found works well for a +variety of submit work loads. You can expect to spend around +$4,000 - $5,000 for such a system.

    + +

    Typical submit node configuration

    + +
      +
    • A 1U rack-mount enclosure, like a Dell PowerEdge 410.
    • +
    • Two processors with 12 cores total, for example Intel Xeon +E5645, 2.4GHz 6-core processors
    • +
    • 24GB of 1.3 GHz RAM
    • +
    • Two drives for the operating system. 500GB each is enough. You +can use mirroring or a RAID configuration like RAID-6 for +reliability.
    • +
    • Two or more 2-3TB drives for data, depending on your needs.
    • +
    +
  6. +
  7. Use your desktop. Depending on your department's level of +system adminstration support, you may be able to have HTCondor +installed on your desktop and configured to submit into our campus +resources. Another option that is under development is +Bosco, a +user-installable software package that lets you submit jobs into +resources managed by HTCondor, PBS or SGE.
  8. +
+ +

Still not sure what option is right for you? No worries. This is one of +the topics we discuss in our initial consultation. To schedule an +initial consultation, fill out our getting started +form.

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/github-remote-access.html b/preview-fall2024-info/uw-research-computing/github-remote-access.html new file mode 100644 index 000000000..0d58a7264 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/github-remote-access.html @@ -0,0 +1,580 @@ + + + + + + +Access a Private GitHub Repository Remotely + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Access a Private GitHub Repository Remotely +

+ +

This guide describes how to remotely access a private GitHub repository from the HTC and HPC clusters, specifically

+
    +
  • +

    how to generate an SSH key pair

    +
  • +
  • +

    how to add a public SSH key to your GitHub account

    +
  • +
  • +

    how to remotely access your private GitHub repository

    +
  • +
+ +

You will need to have access to a CHTC cluster. You will also need to have a GitHub account with access to the private repository of interest.

+ +

A. Generate the SSH Key Pair

+ +

We will be following the instructions provided by GitHub to generate the SSH key pair (Generating a new SSH key…).

+ +
    +
  1. +

    Log in to the submit node as usual (Connecting to CHTC).

    +
  2. +
  3. Generate the SSH key by running the following command, where the example email is replaced with the email that you use for your GitHub account. +
    ssh-keygen -t ed25519 -C "your_email@example.com"
    +
    + +

    A message will appear stating that the key pair is being generated.

    +
  4. +
  5. A second message will appear prompting you to enter the location where the SSH keys should be stored: +
    Enter a file in which to save the key (/home/your_NetID/.ssh/ed25519):
    +
    + +

    Simply hit the enter key to accept the specified file path.

    +
    +

    Note: If a SSH key already exists at the displayed path it will be overwritten by this action. +This can be avoided by typing in an alternate path before pressing the enter key.

    +
    +
  6. +
  7. You will be prompted to create a passphrase. Type your desired passphrase and then hit enter. Repeat a second time when asked to confirm your passphrase. +
    +

    Warning: If you leave the passphrase empty (hit enter without typing anything), a passphrase will not be created nor required for using the SSH connection. In principle, this means anyone with access to the private key can access and modify your GitHub account remotely.

    +
    +
  8. +
  9. A message will appear confirming the creation of the SSH key pair, as well as the paths and names of the private and public keys that were generated. Make note of these paths for use in the following steps.
  10. +
+ +

B. Add the SSH Key to Your GitHub Account

+ +

Now we will be adding the SSH public key to your GitHub account, following the instructions provided by GitHub (Adding a new SSH key to your GitHub account).

+ +
    +
  1. +

    Copy the contents of the public SSH key file (id_ed25519.pub) created in Part A. There are several ways of doing this.

    + +
    +

    If you provided an alternate file name in Step 3. of Part A., then the public SSH key will be the name of that file plus the .pub extension.

    +
    + +
      +
    • Print the contents of the file to the screen by entering the following command, replacing your_NetID with your actual NetID. +
       cat /home/your_NetID/.ssh/id_ed25519.pub
      +
      +
    • +
    • Use a terminal editor (nano, vi, etc.) to open and view the file
    • +
    • Use a file transfer method to transfer the file to your local computer (Transferring Files).
    • +
    +
  2. +
  3. Next, log in to github.com using the same email that you used in Step 2. of Part A.
  4. +
  5. Go to your account settings by clicking on your profile icon in the top right corner of the webpage, then click on Settings within the drop-down menu. If your browser window is small, the Settings button can be found by clicking the menu button at the top left of the webpage.
  6. +
  7. Go to the SSH and GPG keys section. Under the SSH keys section, click New SSH key.
  8. +
  9. Paste the contents of the SSH public key from Step 1. into the Key textbox.
  10. +
  11. Name the SSH key using the Title textbox. We recommend “CHTC” plus the name of the login node. For example: “CHTC ap2001”.
  12. +
  13. Click Add SSH key. The SSH key will now appear in the SSH keys section in your GitHub account settings.
  14. +
+ +

C. Accessing Your Private GitHub Repository from the Cluster

+

Once the SSH key has been added to your GitHub account, you can access your private repository using the repository’s SSH address.

+ +
    +
  1. In your web browser and while logged in to your GitHub account, go to webpage for the private repository.
  2. +
  3. Click the <>Code button, then select the Local tab and then the SSH tab.
  4. +
  5. Copy the SSH address that is shown.
  6. +
  7. +

    On the CHTC submit node, you can now access the repository using git commands by using the SSH address in place of the HTTPS address. For example,

    + +
    git clone git@github.com:username/user-private-repository.git
    +
    +
  8. +
  9. If prompted for a passphrase when running commands with the SSH address, provide the passphrase you created in Step 4. of Part A.
  10. +
+ +

From an interactive job

+ +

Because the interactive job takes place on a different node than the submit node, it will not know about the SSH key that you set up above. Use the following instructions to transfer and use the private identity key in the interactive job (see Compiling or Testing Code with an Interactive Job for more information on interactive jobs).

+ +
    +
  1. +

    When creating the submit file for your interactive job, include the path to the private SSH key identity file as a value for the transfer_input_files keyword. This will ensure that the identity file is copied to the interactive job directory. For example,

    + +
    transfer_input_files = /home/your_NetID/.ssh/id_ed25519, /path/to/include/other/files
    +
    + +
    +

    Note: Make sure that you are transferring the private SSH key file, not the public. The public SSH key should have the .pub extension, while the private SSH key does not.

    +
    +
  2. +
  3. Once your submit file is set up, start the interactive job using condor_submit -i and then the name of your submit file. When the interactive job has started, you will see that the private SSH key file is included in the initial directory. The SSH program, however, still needs to be told to use it.
  4. +
  5. +

    Initialize an SSH agent using the command

    + +
    eval "$(ssh-agent -s)"
    +
    +
  6. +
  7. +

    Add the private SSH to the SSH agent by using the ssh-add command followed by the name of the private SSH key file that you transferred. You will be prompted to enter the passphrase that you created when you created the SSH key pair. For example,

    + +
    ssh-add id_ed25519
    +
    + +

    You will now be able to access the repository during the interactive job.

    +
  8. +
+ +

Additional Notes

+ +
    +
  • If you forget the passphrase you created in Step 4. of Part A., you will need to repeat this guide to create a new SSH key pair to replace the previous one.
  • +
  • +

    When using the SSH address to your repository with non-git commands, you may need to replace the colon (:) in the address with a forward slash (/). For example,

    + +

    Original SSH address

    +
     git@github.com:username/user-private-repository.git
    +
    + +

    Modified SSH address

    +
     git@github.com/username/user-private-repository.git
    +
    +
  • +
+ +
+
+ + + + +
+ + + + + + +
General Guides
+
+ + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/globus.html b/preview-fall2024-info/uw-research-computing/globus.html new file mode 100644 index 000000000..51eb4e845 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/globus.html @@ -0,0 +1,748 @@ + + + + + + +Use Globus to Transfer Files to and from CHTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Use Globus to Transfer Files to and from CHTC +

+ +

Globus is a data management service that lets you +move files files between endpoints, computers that are connected to the +Globus file transfer network. +Globus is primarily useful when you need to move large amounts of data to or +from somewhere that is already providing a Globus endpoint. +For example, a collaborator might provide shared data through Globus, +or might expect you to send data to them through Globus.

+ +

This guide will show you how to execute such transfers to and from CHTC using the CHTC +Globus endpoint, which may be simpler than trying to move the files to your +own computer first.

+ +

Prerequisites

+ +

All file transfer via Globus at CHTC requires:

+ +
    +
  • access to a directory in the /staging or /projects folders
  • +
  • login access to the transfer.chtc.wisc.edu server.
  • +
+ +

Contact us at chtc@cs.wisc.edu if you need either of the above.

+ +

You will also need to be able to +log in to the Globus web interface; +you can use your UW-Madison NetID (if you have one, or similar) by selecting +University of Wisconsin-Madison from the drop down and pressing “Continue”.

+ +

Using the CHTC Globus Endpoints

+ +

You can use the Globus web interface to transfer files to and from CHTC. +In the web interface, you can select two endpoints and then initiate a transfer +between them.

+ +

The first step is to find the CHTC Globus endpoints. They can be found in the Globus web interface +by searching endpoints for “CHTC Staging” or “CHTC Projects”.

+ +

CHTC Globus endpoints

+ +

Or can be found at these links:

+ + + +

If you need the actual endpoint UUID, it is listed on the above pages near the bottom +of the “Overview”.

+ +

To use an endpoint, you must first activate it. +Activations are usually time-limited, and transfers can only proceed while +both the source and destination endpoints are activated. +Activating an endpoint generally requires logging in. +You should log in using your UW - Madison NetID. +You can see how long your activation will last on the endpoint information page +in the Globus web interface.

+ +

To begin a file transfer, go to the +File Manager. +In the top-right corner of the page, make sure you are in the “two panel” view. +Select the two endpoints you want to transfer between +(they are called “Collections” on this page). +You should see a directory listing appear in the middle of each of the panes; +select a directory or file and click “Start” at the bottom of the page to +move that directory or file to the other endpoint. +The item will be moved to the currently-selected directory on the other endpoint.

+ +

Globus transfers are asynchronous, and you do not need to leave the web +interface open while they run. +You will receive emails updates on the progress of the transfer, and you can +view the status of in-progress and historical transfers +on the Activity page.

+ +

You may find some of the “transfer settings”, available by clicking the +“Transfer & Sync Options” dropdown, useful. +In particular, sync will help reduce the amount of time it takes to transfer +when some data has already been transferred.

+ +

Running a Personal Globus Endpoint

+ +

The CHTC Globus endpoint is a “Globus Connect Server”, designed for shared use +on a dedicated machine. +It is also possible to run +Globus Connect Personal, +a lighter-weight package that adds a Globus endpoint to your own computer, +like a laptop or lab computer. +Installers are available at that link for Mac, Linux, and Windows.

+ +

We only recommend using Globus Connect Personal if you are also working with +some other Globus endpoint (not just CHTC and your computer). +If you are just moving files between CHTC and your own computer, traditional +file transfer tools like rsync will likely be more efficient.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/gpu-jobs.html b/preview-fall2024-info/uw-research-computing/gpu-jobs.html new file mode 100644 index 000000000..43ed96c69 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/gpu-jobs.html @@ -0,0 +1,1074 @@ + + + + + + +Use GPUs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Use GPUs +

+ +

Overview

+ +

GPUs (Graphical Processing Units) are a special kind of computer +processor that are optimized for running very large numbers of simple +calculations in parallel, which often can be applied to problems related +to image processing or machine learning. Well-crafted GPU programs for +suitable applications can outperform implementations running on CPUs by +a factor of ten or more, but only when the program is written and +designed explicitly to run on GPUs using special libraries like CUDA. +For researchers who have problems that are well-suited to GPU +processing, it is possible to run jobs that use GPUs in CHTC. Read on to +determine:

+ + + +

A. Available CHTC GPUs

+ +

1. GPU Lab

+ +

CHTC has a set of GPUs that are available for use by any CHTC user with an +account on our high throughput computing (HTC) system +via the CHTC GPU Lab, which includes templates and a campus GPU community.

+ +

Our expectation is that most, if not all, of CHTC users running GPU jobs should utilize +the capacity of the GPU Lab to run their work.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Number of ServersNamesGPUs / ServerGPU Type (DeviceName)Hardware Generation CapabilityGPU Memory GlobalMemoryMB
2gpu2000, gpu20012Tesla P100-PCIE-16GB6.016GB
4gpulab2000 - gpulab20038GeForce RTX 2080 Ti7.510GB
2gpulab2004, gpulab20054A100-SXM4-40GB8.040GB
10gpu2002 - gpu20114A100-SXM4-80GB8.080GB
3gpu4000 - gpu400210L408.945GB
+ +

Special GPU Lab Policies

+ +

Jobs running on GPU Lab servers have time limits and job number limits +(differing from CHTC defaults across the rest of the HTC System).

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Job typeMaximum runtimePer-user limitation
Short12 hrs2/3 of CHTC GPU Lab GPUs
Medium24 hrs1/3 of CHTC GPU Lab GPUs
Long7 daysup to 4 GPUs in use
+ +

There are a certain number of slots in the GPU Lab reserved for interactive use. Interactive +jobs that use GPU Lab servers are restricted to using a single GPU and a 4 hour runtime.

+ +

2. Other Capacity

+ +

There is additional dedicated and backfill GPU capacity available in CHTC and beyond; +see GPU capacity beyond the GPU Lab for details.

+ +

B. Submit Jobs Using GPUs in CHTC

+ + + +

The following options are needed in your HTCondor submit file in order +to access the GPUs in the CHTC GPU Lab and beyond:

+ +
    +
  • Request GPUs (required): All jobs that use GPUs must request GPUs in their submit file (along +with the usual requests for CPUs, memory, and disk). +
    request_gpus = 1
    +
    +
  • +
  • Request the CHTC GPU Lab: To use CHTC’s shared use GPUs, you need to opt-in to the GPU Lab. To +do so, add the +following line to your submit file: +
    +WantGPULab = true
    +
    +
  • +
  • Indicate Job Type: We have categorized three “types” +of GPU jobs, characterized in the table above. Indicate which job type you would +like to submit by using the submit file option below. +
    +GPUJobLength = "short" 
    +# Can also request "medium" or "long"
    +
    +

    If you do not specify a job type, the medium job type will be used as the default. If + your jobs will run in less than 12 hours, it is advantageous to indicate that they are + “short” jobs because you will be able to have more jobs running at once.

    +
  • +
  • Request Specific GPUs or CUDA Functionality Using require_gpus (optional): If your software or code requires a certain +type of GPU, or has some other special requirement, there is a special submit file line +to request these capabilities, require_gpus. For example, if you want a certain +class of GPU, represented by +the attribute Capability, your require_gpus statement would look like this: +
    require_gpus = (Capability > 7.5)
    +
    + +

    You can see a table of the different attributes that HTCondor tracks + about the GPU nodes, and how to explore their values, in the section + on Using condor_status to explore GPUs.

    + +

    It may be tempting to add requirements for specific GPU servers or + types of GPU cards. However, when possible, it is best to write your + code so that it can run across GPU types and without needing the + latest version of CUDA.

    +
  • +
  • Specify Multiple GPU Requirements (optional): Multiple requirements can be specified by using && statements: +
    require_gpus = (Capability >= 7.5) && (GlobalMemoryMb >= 11000)
    +
    +

    Ensure all specified requirements match the attributes of the GPU/Server of interest. HTCondor matches jobs to GPUs that match all specified requirements. Otherwise, the jobs will sit idle indefinitely.

    + +
    +

    We are testing a new set of submit commands for specifying the requirements of the GPU:

    + +
    gpus_minimum_capability = <version>
    +gpus_maximum_capability = <version>
    +gpus_minimum_memory = <quantity in MB>
    +
    + +

    More information on these commands can be found in the HTCondor manual.

    +
    +
  • +
  • +

    Indicate Software or Data Requirements Using requirements: If your data is large enough to + use our /staging data system (see more information here), + or you are using modules or other software in our shared /software system, include + the needed requirements.

    +
  • +
  • Indicate Shorter/Resumable Jobs: if your jobs are shorter than 4-6 hours, or have + the ability to checkpoint at least that frequently, we highly recommend taking + advantage of the additional GPU servers in CHTC that can run these kind of jobs + as backfill! Simply add the following option to your submit file: +
    +is_resumable = true
    +
    + +

    For more information about the servers that you can run on with this option, + and what it means to run your jobs as “backfill” see + the section below on Accessing Research Group GPUs.

    +
  • +
+ +

2. Sample Submit File

+ +

A sample submit file is shown below. There are also example submit files and +job scripts in this GPU Job Templates repository +in CHTC’s Github organization.

+ +
# gpu-lab.sub
+# sample submit file for GPU Lab jobs
+
+universe = vanilla
+log = job_$(Cluster)_$(Process).log
+error = job_$(Cluster)_$(Process).err
+output = job_$(Cluster)_$(Process).out
+
+# Fill in with whatever executable you're using
+executable = run_gpu_job.sh
+#arguments = 
+
+should_transfer_files = YES
+when_to_transfer_output = ON_EXIT
+# Uncomment and add input files that are in /home
+# transfer_input_files = 
+
+# Uncomment and add custom requirements
+# requirements = 
+
++WantGPULab = true
++GPUJobLength = "short"
+
+request_gpus = 1
+request_cpus = 1
+request_memory = 1GB
+request_disk = 1GB
+
+queue 1
+
+
+ +

3. Notes

+ +

It is important to still request at least one CPU per job to do the +processing that is not well-suited to the GPU.

+ +

Note that HTCondor will make sure your job has access to the GPU; it will +set the environment variable CUDA_VISIBLE_DEVICES to indicate which GPU(s) +your code should run on. The environment variable will be read by CUDA to select the appropriate +GPU(s). Your code should not modify this environment variable or manually +select which GPU to run on, as this could result in two jobs sharing a GPU.

+ +

It is possible to request multiple GPUs. Before doing so, make sure you’re +using code that can utilize multiple GPUs and then submit a test job to confirm +success before submitting a bigger job. Also keep track of how long jobs +are running versus waiting; the time you save by using multiple GPUs may be +not worth the extra time that the job will likely wait in the queue.

+ +

C. GPU Capacity Beyond the CHTC GPU Lab

+ +

The following resources are additional CHTC-accessible servers with GPUs. They do not have the +special time limit policies or job limits of the GPU Lab. However, some of them are +owned or prioritized by specific groups. The implications of this +on job runtimes is noted in each section.

+ +

Note that all GPU jobs need to include the request_gpus option in their submit file, +even if they are not using the GPU Lab.

+ +

1. Access Research Group GPUs

+ +

Certain GPU servers in CHTC are prioritized for the +research groups that own them, but are available to run other jobs when +not being used by their owners. When running on these servers, jobs +forfeit our otherwise guaranteed runtime of 72 hours, and have the potential to be interrupted. However, for +shorter jobs or jobs that have implemented self-checkpointing, this is not a drawback and allowing jobs to run on these +additional servers opens up more capacity.

+ +

Therefore, these servers are a good fit for GPU jobs that run in a few hours +or less, or have implemented self-checkpointing (the capability to save progress +to a file and restart from that progress). Use the is_resumable option shown +above in the list of submit file options.

+ +

2. Use the gzk Servers

+ +

These are servers that are similar to the GPU Lab severs with two important differences +for running GPU jobs:

+
    +
  • they do not have access to CHTC’s large data /staging file system
  • +
  • they do not have Docker capability
  • +
+ +

You do not need to do anything specific to allow jobs to run on these servers.

+ +

3. Using GPUs in CHTC’s OSG Pool and the UW Grid

+ +

CHTC, as a member of the OSG Consortium can access GPUs that +are available on the OS Pool. CHTC is +also a member of a campus computing network called the UW Grid, where groups on campus +share computing capacity, including access to idle GPUs.

+ +

See this guide to know +whether your jobs are good candidates for the UW Grid or OS Pool and then get in touch +with CHTC’s Research Computing Facilitators to discuss details.

+ +

D. Using condor_status to explore CHTC GPUs

+ +

You can find out information about GPUs in CHTC through the +condor_status command. All of our servers with GPUs have a TotalGPUs +attribute that is greater than zero; thus we can query the pool to find +GPU-enabled servers by running:

+ +
[alice@submit]$ condor_status -compact -constraint 'TotalGpus > 0'
+
+ +

To print out specific information about a GPU server and its GPUs, you +can use the “auto-format” option for condor_status and the names of +specific server attributes. In general, when querying attributes using +condor_status, a “GPUs_” prefix needs to be added to the attribute name. +For example, the tables at the top of the guide can be mostly +recreated using the attributes Machine, TotalGpus, +GPUs_DeviceName and GPUs_Capability:

+ +
[alice@submit]$ condor_status -constraint 'Gpus > 0' \
+				-af Machine TotalGpus GPUs_DeviceName GPUs_Capability
+
+ +

In addition, HTCondor tracks other GPU-related attributes for each +server, including:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Attribute Explanation
GpusNumber of GPUs in an individual job slot on a server (one server can be divided into slots to run multiple jobs).
TotalGPUsThe total number of GPUs on a server.
(GPUs_)DeviceNameThe type of GPU card.
(GPUs_)CapabilityRepresents various capabilities of the GPU. Can be used as a proxy for the GPU card type when + requiring a specific type of GPU. Wikipedia + has a table showing the compute capability for specific GPU architectures and cards. + More details on what the capability numbers mean can be found on the + + NVIDIA website.
(GPUs_)DriverVersionNot the version of CUDA on the server or the NVIDIA driver version, but the maximum CUDA runtime version supported by the NVIDIA driver on the server.
(GPUs_)GlobalMemoryMbAmount of memory available on the GPU card.
+ +

E. Prepare Software Using GPUs

+ +

Before using GPUs in CHTC you should ensure that the use of GPUs will +actually help your program run faster. This means that the code or +software you are using has the special programming required to use GPUs +and that your particular task will use this capability.

+ +

If this is the case, there are several ways to run GPU-enabled software +in CHTC:

+ +
+

Machine Learning
+ For those using machine learning code specifically, we have a guide +with more specific recommendations here: Run Machine Learning Jobs on +HTC

+
+ +

1. Compiled Code

+ +

You can use our conventional methods of creating a portable installation +of a software package (as in our R/Python guides) to run on GPUs. Most +of our build servers or GPU servers have copies of the CUDA Runtime that +can be used to compile code. To access these servers, submit an +interactive job, following the instructions in our Build Job +Guide or by submitting a GPU job submit file with the +interactive flag for condor_submit. Once on a build or GPU server, see +what CUDA versions are available by looking at the path +/user/local/cuda-*.

+ +

Note that we strongly recommend software installation strategies that +incorporate the CUDA runtime into the final installed code, so that jobs +are able to run on servers even if a different version of the CUDA +runtime is installed (or there’s no runtime at all!). For compiled code, +look for flags that enable static linking or use one of the solutions +listed below.

+ +

2. Docker

+ +

CHTC’s GPU servers have “nvidia-docker” installed, a specific version of +Docker that integrates Docker containers with GPUs. If you can find or +create a Docker image with your software that is based on the +nvidia-docker container, you can use this to run your jobs in CHTC. See +our Docker guide for how to use Docker in CHTC.

+ +

Currently we recommend using +“nvidia/cuda” containers with a tag beginning with “12.1.1-devel” +for best integration with our system.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/gpu-lab.html b/preview-fall2024-info/uw-research-computing/gpu-lab.html new file mode 100644 index 000000000..3e7a25f43 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/gpu-lab.html @@ -0,0 +1,433 @@ + + + + + + +Welcome to the CHTC GPU Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ Welcome to the CHTC GPU Lab +

+ +
+
+
+ + +
+
+
+ +

The CHTC GPU Lab was created by a UW2020-funded project to expand shared GPU +computing infrastructure at UW-Madison. It includes:

+ +
+
+
+ +
+
+
+
+
+ Hardware Icon +
+

Hardware

+

A pool of shared GPU servers managed by CHTC.

+
+
+
+
+
+
+
+ People Icon +
+

Expertise

+

A community of technical experts.

+
+
+
+
+
+
+
+ Book Icon +
+

Documentation

+

A library of sharable software and documentation.

+
+
+
+
+
+
+ +
+
+
+ +

Get Involved

+ +

If you want to use GPU resources in CHTC for your research:

+ + + +

The CHTC GPU Lab mailing list is used to announce new GPU hardware availability and +GPU-related events, solicit feedback from GPU users, and share best practices for +GPU computing in CHTC. Any CHTC user can subscribe to the list by +emailing chtc-gpu-lab+managers@g-groups.wisc.edu +and asking to join. +Their subscription request will be reviewed by the list administrators.

+ +
+

The CHTC GPU Lab is led by Anthony Gitter, Christina Koch, Brian Bockelman, and Miron Livny.

+
+ +
+

The original UW2020 project was led by Anthony Gitter, Lauren Michael, Brian Bockelman, and Miron Livny and +funded by the Office of the Vice Chancellor for Research and Graduate +Education and the Wisconsin Alumni Research Foundation.

+
+ +

For more information about the CHTC GPU Lab project contact Anthony Gitter.

+ +
+
+
+ + +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/checkmark.png b/preview-fall2024-info/uw-research-computing/guide-icons/checkmark.png new file mode 100644 index 000000000..455a5e46e Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/checkmark.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/conda-icon.png b/preview-fall2024-info/uw-research-computing/guide-icons/conda-icon.png new file mode 100644 index 000000000..09300e897 Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/conda-icon.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/data.png b/preview-fall2024-info/uw-research-computing/guide-icons/data.png new file mode 100644 index 000000000..e1788bc18 Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/data.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/gear.png b/preview-fall2024-info/uw-research-computing/guide-icons/gear.png new file mode 100644 index 000000000..b3275eeb5 Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/gear.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/java-icon.png b/preview-fall2024-info/uw-research-computing/guide-icons/java-icon.png new file mode 100644 index 000000000..f9dd2ba5a Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/java-icon.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/julia-icon.png b/preview-fall2024-info/uw-research-computing/guide-icons/julia-icon.png new file mode 100644 index 000000000..0a97ce548 Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/julia-icon.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/laptop_arrow.png b/preview-fall2024-info/uw-research-computing/guide-icons/laptop_arrow.png new file mode 100644 index 000000000..7cd84ebeb Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/laptop_arrow.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/matlab-icon.png b/preview-fall2024-info/uw-research-computing/guide-icons/matlab-icon.png new file mode 100644 index 000000000..15ede2640 Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/matlab-icon.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/noun_gpu_2528527.png b/preview-fall2024-info/uw-research-computing/guide-icons/noun_gpu_2528527.png new file mode 100644 index 000000000..613d14caa Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/noun_gpu_2528527.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/noun_open book_1179297.png b/preview-fall2024-info/uw-research-computing/guide-icons/noun_open book_1179297.png new file mode 100644 index 000000000..4da3340e7 Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/noun_open book_1179297.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/noun_people_1188645.png b/preview-fall2024-info/uw-research-computing/guide-icons/noun_people_1188645.png new file mode 100644 index 000000000..af772b071 Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/noun_people_1188645.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/processor.png b/preview-fall2024-info/uw-research-computing/guide-icons/processor.png new file mode 100644 index 000000000..a02d1a67b Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/processor.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/python-icon.png b/preview-fall2024-info/uw-research-computing/guide-icons/python-icon.png new file mode 100644 index 000000000..2de41b35f Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/python-icon.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/r-icon.png b/preview-fall2024-info/uw-research-computing/guide-icons/r-icon.png new file mode 100644 index 000000000..a9ea75258 Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/r-icon.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/servers.png b/preview-fall2024-info/uw-research-computing/guide-icons/servers.png new file mode 100644 index 000000000..cc666200a Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/servers.png differ diff --git a/preview-fall2024-info/uw-research-computing/guide-icons/trouble.png b/preview-fall2024-info/uw-research-computing/guide-icons/trouble.png new file mode 100644 index 000000000..62f51c563 Binary files /dev/null and b/preview-fall2024-info/uw-research-computing/guide-icons/trouble.png differ diff --git a/preview-fall2024-info/uw-research-computing/guides.html b/preview-fall2024-info/uw-research-computing/guides.html new file mode 100644 index 000000000..9e658dca8 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/guides.html @@ -0,0 +1,762 @@ + + + + + + +Computing Guides + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ Computing Guides +

+ +

+Below is a list of guides for some of the most common tasks our users need to +carry out as they begin and continue to use the resources at the CHTC. +Some of these are general computing solutions; others are specific to HTCondor +or to the configuration of CHTC computing resources. +

+

+Guides will be added to the list as we can provide them. Please contact us +(email at bottom of page) if you find any of the information to be incorrect. +

+ + + +

User Expectations

+ +Read through these user expectations and policies before using CHTC services. + + + + + + +

HTC Documentation

+ +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + +

Handling Data in Jobs

+
+ + + Transfer Small Input and Output + + + + Transfer Large Input Files Via Squid + + + + Use Large Input and Output Files Via Staging + + +
+
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + +

Troubleshooting

+
+ + + Windows / Linux Incompatibility + + + + Explore and Test Docker Containers + + + + Known Issues on the HTC + + +
+
+ + +
+ +

HPC Documentation

+ + + +

External Documentation

+ + + + +Icon Credits + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/helloworld.html b/preview-fall2024-info/uw-research-computing/helloworld.html new file mode 100644 index 000000000..d3f4f27c8 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/helloworld.html @@ -0,0 +1,14 @@ + + +
+ +
diff --git a/preview-fall2024-info/uw-research-computing/high-memory-jobs.html b/preview-fall2024-info/uw-research-computing/high-memory-jobs.html new file mode 100644 index 000000000..3dc658d50 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/high-memory-jobs.html @@ -0,0 +1,942 @@ + + + + + + +Submit High Memory Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Submit High Memory Jobs +

+ +

The examples and information in the below guide areuseful ONLY if:

+
    +
  • you already have an account on a CHTC-administered submit server
  • +
  • your jobs will use more than ~120 GB of memory
  • +
+ +

To best understand the below information, users should already befamiliar with:

+
    +
  1. Using the command-line to: navigate directories, +create/edit/copy/move/delete files and directories, and run intended +programs (aka "executables").
  2. +
  3. CHTC's Intro to Running HTCondor Jobs
  4. +
  5. CHTC's guides for handling large data (Guide +here) and software installation.
  6. +
+ +

Overview

+ +

A high-memory job is one that requires a significantly larger amount of +memory (also known as RAM) than a typical high throughput job usually +over 200 GB and up to 1-4 TB. In the following guide, we cover resources +and recommendations for running high-memory work in CHTC. However, +please make sure to email us if you believe you will need to run +"high-memory" work for the first time, or are planning the execution +of new "high-memory" work that is different from what you've run +before. We'll happily help you with some personalized tips and +considerations for getting your work done most efficiently.

+ +
    +
  1. High Memory Resources in CHTC
  2. +
  3. Getting Started
  4. +
  5. Running High Memory Jobs
  6. +
+ +

+ +

1. High Memory Resources in CHTC

+ +

Our high memory servers have the following specs:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
Number of serversMemory per serverCPUs per serverLocal disk space on serverNames
16512 GB401.2 TBe2003-e2018
22 TB803.5+ TBmem2001, mem2002
+ +

+ +

2. Getting Started

+ +

+ +

A. Identifying High Memory Jobs

+ +

Jobs that request over 200GB of memory in their submit file +can run on our dedicated high memory machines. However, if your job +doesn't need quite that much memory, it's good to request less, as +doing so will allow your job(s) to run on more servers, since CHTC has +hundreds of servers with up to 100 GB of memory and dozens of servers +with up to 250 GB of memory.

+ +

+ +

B. Testing

+ +

Before running a full-size high-memory job, make sure to use a small +subset of data in a test job. Not only will this give you a chance to +try out the submit file syntax and make sure your job runs, but it can +help you estimate how much memory and/or disk you will need for a job +using your full data.

+ +

You can also use interactive jobs to test commands that will end up in +your "executable" script. To run an interactive job, prepare your +submit file as usual. Note that for an interactive job, you should use a +smaller memory request (and possibly lower CPU and disk as well) than +for the final job (so that the interactive job starts) and plan to +simply test commands, not run the entire program. To submit interactive +job, use the -i flag with condor_submit:

+ +
[alice@submit]$ condor_submit -i submit.file
+
+ +

After waiting for the interactive job to start, this should open a bash +session on an execute machine, which will allow you to test your +commands interactively. Once your testing is done, make the appropriate +changes to your executable, adjust your resource requests, and submit +the job normally.

+ +

+ +

C. Consult with Facilitators

+ +

If you are unsure how to run high-memory jobs on CHTC, or if you're not +sure if everything in this guide applies to you, get in touch with a +research computing facilitator by emailing chtc@cs.wisc.edu.

+ +

+ +

3. Running High Memory Job

+ +

+ +

A. Submit File

+ +

The submit file shown in our Hello World example is +a good starting point for building your high memory job submit file. The +following are places where it's important to customize:

+ +
    +
  • +

    request_memory: It is crucial to make this request as accurate +as you can by testing at a small scale if possible (see +above). Online documentation/help pages or your colleagues' +experience is another source of information about required memory.

    +
  • +
  • Long running jobs: If your high memory job is likely to run +longer than our 3-day time limit, please email us for options on how +to run for longer. In the past, high memory jobs received an extra +time allowance automatically but this is no longer the case.
  • +
  • +

    request_cpus: Sometimes, programs that use a large amount of +memory can also take advantage of multiple CPUs. If this is the case +for your program, you can request multiple CPUs. However, it is +always easier to start jobs that request fewer number of cores, +rather than more. We recommend:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Requesting ___ of memory?Request fewer than ___ CPUs
    up to 100 GB4
    100-500 GB8
    500GB-1TB16
    1-1.5TB20
    1.5-2TB20
    2TB or greater32
    + +

    If you think a higher CPU request would significantly improve your +job's performance, contact a facilitator.

    +
  • +
  • +

    request_disk: Request the maximum amount of data your job will +ever have within the job working directory on the execute node, +including all output and input (which will take up space before some +of it is removed from the job working directory at the end of the +job).

    +
  • +
  • Other requirements: if your job uses files from our large data +space, or Docker for +software, add the necessary requirements for +these resources to your submit file.
  • +
+ +

Altogether, a sample submit file may look something like this:

+ +
### Example submit file for a single staging-dependent job
+
+universe = vanilla
+
+# Files for the below lines will all be somewhere within /home/username,
+# and not within /staging/username
+log = run_myprogram.log
+executable = run_Trinity.sh
+output = $(Cluster).out
+error = $(Cluster).err
+transfer_input_files = trinityrnaseq-2.0.1.tar.gz
+should_transfer_files = YES
+
+# Require execute servers that have large data staging
+Requirements = (Target.HasCHTCStaging == true)
+
+# Memory, disk and CPU requests
+request_memory = 200GB
+request_disk = 100GB
+request_cpus = 4
+
+# Submit 1 job
+queue 1
+### END
+
+ +

+ +

B. Software

+ +

Like any other job, the best option for high memory work is to create a +portable installation of your software. We have guides for scripting +languages and using +Docker, and can otherwise provide individual +support for program installation during office hours or over +email.

+ +

+ +

C. "Executable" script

+ +

As described in many of our guides (for +software or for using large +data), you will need to write a script +that will run your software commands for you and that will serve as the +submit file "executable". Things to note are:

+ +
    +
  • If using files from our large data staging space, follow the +recommendations in our guide.
  • +
  • If using multiple cores, make sure that you request the same number +of "threads" or "processes" in your command as you requested in +your submit file.
  • +
+ +

Altogether, a sample script may look something like this (perhaps called +run_Trinity.sh):

+ +
#!/bin/bash
+# Copy input data from /staging to the present directory of the job
+# and un-tar/un-zip them.  
+cp /staging/username/reads.tar.gz ./
+tar -xzvf reads.tar.gz
+rm reads.tar.gz
+
+# Set up the software installation in the job working directory, and 
+# add it to the job's PATH
+tar -xzvf trinityrnaseq-2.0.6-installed.tar.gz
+rm trinityrnaseq-2.0.6-installed.tar.gz
+export PATH=$(pwd)/trinityrnaseq-2.0.6:$PATH
+
+# Run software command, referencing input files in the working directory and 
+# redirecting "stdout" to a file.  Backslashes are line continuation.
+Trinity --seqType fq --left reads_1.fq \
+--right reads_2.fq --CPU 4 --max_memory \
+20G > trinity_stdout.txt
+
+# Trinity will write output to the working directory by default, 
+# so when the job finishes, it needs to be moved back to /staging
+tar -czvf trinity_out_dir.tar.gz trinity_out_dir
+cp trinity_out_dir.tar.gz trinity_stdout.txt /staging/username/
+rm reads_*.fq trinity_out_dir.tar.gz trinity_stdout.txt
+
+### END
+
+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/how-tos.html b/preview-fall2024-info/uw-research-computing/how-tos.html new file mode 100644 index 000000000..70b123aaa --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/how-tos.html @@ -0,0 +1,375 @@ + + + + + + +How To's + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ How To's +

+ +
+
+
+ +
+
+
+ + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/howto_overview.html b/preview-fall2024-info/uw-research-computing/howto_overview.html new file mode 100644 index 000000000..6e2322ec4 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/howto_overview.html @@ -0,0 +1,360 @@ + + + + + + +CHTC Tools for Matlab, R, and Python Portability + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ CHTC Tools for Matlab, R, and Python Portability +

+ +

This page is deprecated. Please see our guide Overview: How to Use +Software instead.

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/hpc-el8-to-el9.html b/preview-fall2024-info/uw-research-computing/hpc-el8-to-el9.html new file mode 100644 index 000000000..f9fb6a6d8 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/hpc-el8-to-el9.html @@ -0,0 +1,831 @@ + + + + + + +HPC System Transition to a New Linux Version (CentOS Stream 9) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+

+ HPC System Transition to a New Linux Version (CentOS Stream 9) +

+ +

Starting in May 2024, CHTC’s high performance computing (HPC) cluster began upgrading +the Linux distribution and version we use on our servers to CentOS Stream 9 (EL9). This transition is expected to complete in June 2024.

+ +

Note that this page only applies to a transition on the HPC cluster. For information +on the HTC system transition, see HTC System Transition to a New Linux Version (CentOS Stream 9)

+ +

All updates to the HPC Cluster will be reflected on this page; significant changes may +also include a notification to the chtc-users mailing list.

+ +

Important Dates

+ +
    +
  • May 31: Log in to upgraded cluster login node is available. Worker nodes start transitioning from the existing cluster to upgraded cluster partitions.
  • +
  • May 31 - June 17: Users should rebuild their code and test jobs on the upgraded cluster. Users should be running primarily on the upgraded cluster.
  • +
  • June 17: Most nodes will have been upgraded and transitioned.
  • +
  • June 24: The old cluster partitions are closed.
  • +
+ +

What is Changing

+ +

As part of this transition, there will be a new login node for +the HPC cluster: spark-login.chtc.wisc.edu.

+ +

If you log into spark-login, you will have access to a new +module stack, compiled on CentOS Stream 9, and the partitions available will +have worker nodes that are running CentOS Stream 9.

+ +

The files in your /home and /scratch directories will be unchanged.

+ +

What You Need to Do

+ +

As soon as possible, do the following:

+ +
    +
  1. +

    Log into the new login node spark-login.chtc.wisc.edu.

    +
  2. +
  3. +

    Run a typical job as a test. It is highly likely that certain codes will +fail on the new worker nodes, as the underlying dependencies of your code, including +the operating system, and any modules used, have changed.

    +
  4. +
  5. +

    If your jobs no longer run, archive your previous software installation(s) and +rebuild your software. The Software Changes section below has +more specific information about how to do this.

    +
  6. +
  7. +

    If you recompiled your code, run a few small test jobs to confirm that the +code is working correctly.

    +
  8. +
+ +

If you are having trouble getting your jobs to run successfully on the new operating system, +please contact the facilitation team at chtc@cs.wisc.edu or come to office hours

+ +

Software Changes

+ +

Almost all packages and libraries have been upgraded as part of the operating system transition. +Unless your code is fairly simple, you will likely need to recompile it.

+ +

Remember to always compile your code/programs in a (interactive) Slurm job! How To

+ +
+

Not only does this help avoid stressing the resources of the login server, but the upgraded login server uses a newer CPU architecture than the worker nodes in the cluster. +Most compilers auto-detect the CPU architecture and adapt the compilation to use that architecture. +Attempting to use such compiled code on a different/older CPU architecture can lead to “Illegal instruction” errors, among others.

+
+ +

Modules

+ +

Most of the modules on the upgraded cluster have been kept, but with upgraded versions. +The following table is a comparison of the modules on the old operating system (EL8) versus the new operating system (EL9). +(Adapted from the output of module avail on the respective servers.)

+ +

You will likely need to recompile your code to use the new module versions. +Remember to also update any module load commands that specify a particular version of the module, +otherwise you may encounter “module(s) are unknown” errors.

+ +

Module comparison

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Module nameOld version (EL8)New version (EL9)
abaqus2018-hotfix-1904TBD
ansys2022r242024r1
aocc3.2.04.2.0
cmake3.27.73.27.9
comsol6.0, 6.1, 6.26.2
gcc11.3.013.2.0
hdf5 (intel-oneapi-mpi)1.12.2dropped
hdf5 (openmpi)1.12.21.14.3
intel-oneapi-compilers2023.2.12024.1.0
intel-oneapi-mkl2023.2.02024.0.0
intel-oneapi-mpi2021.10.02021.12.1
intel-tbb2021.9.0deprecated
lmstat.comsol6.0TBD
lumerical-fdtd2022-r2.42024-R1.2
matlabR2021b, R2022bR2024a
mvapich22.3.7-1deprecated
mvapichn/a3.0
netcdf-c4.8.14.9.2
netcdf-cxx44.3.14.3.1
netcdf-fortran4.5.44.6.1
openmpi (aocc)4.1.3dropped
openmpi (gcc)4.1.35.0.3
patchelf (gcc)0.17.20.17.2
patchelf (intel)0.18.0dropped
patchelf (oneapi)0.18.00.17.2
petsc3.18.13.21.1
pmixn/a5.0.1
+ +
+

Different versions of module packages, or packages that are “dropped” or “deprecated” may be manually installed by the user using Spack.

+
+ +

Spack

+ +

Spack is a package manager platform that allows users to install software without admin privileges. +CHTC also uses Spack to install the software underlying the system-wide modules discussed above.

+ +
+

If you have not used Spack before, you can skip this section and go directly to the Set Up Spack on HPC guide.

+
+ +

Here is the general process for setting up your software on the upgraded EL9 system; detailed instructions are provided after the general process:

+ +
    +
  1. +

    Identify the environments you currently have and which you want to reproduce on the upgraded system.

    +
  2. +
  3. +

    Remove your existing Spack folders.

    +
  4. +
  5. +

    Do a clean installation of Spack.

    +
  6. +
  7. +

    In an interactive job, create your Spack environment(s) and install the packages as you did previously.

    +
  8. +
  9. +

    Update your job submission scripts and/or recompile programs as needed to use the new Spack environment(s).

    +
  10. +
+ +

The following instructions assume that you previously installed Spack in your home (~/) directory for individual use.

+ +

1. Identify your environments

+ +

You can see your Spack environments with

+ +
spack env list
+
+ +

Activate an environment that you want to replicate with

+ +
spack env activate environment_name
+
+ +

Then list your package “specs” with the command

+ +
spack find
+
+ +

There is a section “==> Root specs” that lists the package specs you explicity added when you created your environment. +Save a copy of these specs somewhere safe, so that you can use them to replicate the environment later on. +You can ignore the “installed packages” section, as that will certainly change on the new system.

+ +

Repeat the above steps for each environment you want to replicate on the upgraded system.

+ +

2. Remove your existing Spack folders

+ +

The easiest way to update Spack for the upgraded system is to remove the current Spack installation and reinstall from scratch.

+ +
+

Before proceeding, you may want to make a backup of each folder using

+ +
tar -czf folder_name.tar.gz ~/folder_name
+
+
+ +

For most users, the following commands should work:

+ +
cd ~/
+rm -rf spack spack_programs spack_modules .spack
+
+ +

The command may take a while to run.

+ +

3. Fresh install of Spack

+ +

Next, follow the instructions in our guide Set Up Spack on HPC to do a fresh installation of Spack. +The commands in the guide have been updated for setting up Spack on the new operating system.

+ +

4. Recreate your environments

+ +

Follow the instructions in our guide Install Software Using Spack to create your desired environments +using the “root specs” that you saved earlier.

+ +

NOTE: We’ve made small but important change to this guide: you should always start an interactive Slurm job before creating or modifying a Spack environment. +The login server uses different hardware than the execute servers, and the mismatch leads to Spack using the wrong settings for installing packages. +Of course, as before, you should only install packages while in interactive Slurm job.

+ +

Behind the scenes, we’ve made a few changes to the configuration that will hopefully make the package installation much smoother.

+ +

5. Update your workflow

+ +

Finally, remember to update your workflow to use the new Spack environments and the packages installed therein.

+ +
    +
  • +

    If you explicitly provide paths to packages installed using Spack, be sure to update those paths in your compiler configuration or in your job submission script.

    +
  • +
  • +

    If you used Spack to provide dependencies for manually compiling a program, remember to recompile the program.

    +
  • +
  • +

    If you changed the name of your environment, be sure to update the name in your job submission script.

    +
  • +
+ +
+
+ + + + +
+ + + + + + +
HPC Guides
+
+ + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + + + + + + + + + + +
+ +
+ + + +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/hpc-job-submission.html b/preview-fall2024-info/uw-research-computing/hpc-job-submission.html new file mode 100644 index 000000000..a40e63227 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/hpc-job-submission.html @@ -0,0 +1,836 @@ + + + + + + +Submitting and Managing Jobs Using SLURM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+

+ Submitting and Managing Jobs Using SLURM +

+ +

The HPC Cluster uses SLURM to manage jobs on the HPC Cluster. This page describes +how to submit and manage jobs using SLURM.

+ +

Contents

+ +
    +
  1. Submitting Jobs Using SLURM
  2. +
  3. Viewing Jobs in the Queue
  4. +
  5. Viewing Additional Job Information
  6. +
  7. Removing or Holding Jobs
  8. +
+ +

The following assumes that you have been granted access to the HPC cluster +and can log into the head node spark-login.chtc.wisc.edu. If this is not +the case, please see the CHTC account application page or email +the facilitation team at chtc@cs.wisc.edu.

+ +

1. Submitting Jobs Using SLURM

+ +

A. Submitting a Job

+ +

Jobs can be submitted to the cluster using a submit file, sometimes also +called a “batch” file. The top half of the file consists of #SBATCH +options which communicate needs or parameters of the job – these lines +are not comments, but essential options for the job. The values for +#SBATCH options should reflect the size of nodes and run time limits +described here.

+ +

After the #SBATCH options, the submit file should contain the commands +needed to run your job, including loading any needed software modules.

+ +

An example submit file is given below. It requests 1 nodes of 64 cores +and 4GB of memory each (so 64 cores and 256 GB of memory total), on the +shared partition. It also specifies a run time limit of 4.5 hours.

+ +
#!/bin/sh
+#This file is called submit-script.sh
+#SBATCH --partition=shared       # default "shared", if not specified
+#SBATCH --time=0-04:30:00       # run time in days-hh:mm:ss
+#SBATCH --nodes=1               # require 1 nodes
+#SBATCH --ntasks-per-node=64    # cpus per node (by default, "ntasks"="cpus")
+#SBATCH --mem=4000             # RAM per node in megabytes
+#SBATCH --error=job.%J.err
+#SBATCH --output=job.%J.out
+# Make sure to change the above two lines to reflect your appropriate
+# file locations for standard error and output
+
+# Now list your executable command (or a string of them).
+# Example for code compiled with a software module:
+module load mpimodule
+srun --mpi=pmix -n 64 /home/username/mpiprogram
+
+ +

Once the submit file is created, it can be submitted using the sbatch command:

+ +
[alice@login]$ sbatch submit-script.sh
+
+ +

B. Optimizing Your Submit File

+ +

The new cluster has different partition names and different sized nodes. We always recommend requesting cores per node (instead of total cores), using a multiple of 32 cores as your request per node. Requesting multiple nodes is not advantageous if your jobs are smaller than 128 cores. We also now recommend requesting memory per core instead of memory per node, for similar reasons, using the --mem-per-cpu flag with units of MB. Here are our recommendations for different sized jobs:

+ + + + + + + + + + + + + + + + + + +
Job sizeRecommended #SBATCH flags
32-128 coresExample for 64 cores:
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=64 # recommend multiples of 32
+#SBATCH --mem-per-cpu=4000
128 - 256 coresSplit over a few nodes, for example for 128 cores:
+#SBATCH --nodes=2
+#SBATCH --ntasks-per-node=64 # designate cores per node
+#SBATCH --mem-per-cpu=4000
+
128 or 256 cores (whole nodes)Example for 256 cores:
+#SBATCH --nodes=2
+#SBATCH --ntasks-per-node=128
+#SBATCH --mem-per-cpu=4000
+ +

C. Requesting an Interactive Job ("int" and "pre" partitions)

+ +

If you want to run your job commands yourself, as a test before submitting +a job as described above, you can request an interactive job on the cluster.

+ +

There is a dedicated partition +for interactive work called int; you may request up to 16 CPUS and 64GB of memory +when requesting an interactive session in the "int" partition. By default,
+the session is limited to 60 minutes though you can request up to 4 hours. +Using another partition (like pre) will +mean your interactive job is subject to the limits of that partition instead.

+ +

For simple testing or compiling

+ +

The command to request an interactive job is srun --mpi=pmix, and includes the partition +in which you’d like to run the interactive job.

+ +
[alice@login]$ srun --mpi=pmix -n4 -N1 -p int --pty bash
+
+ +
+

Note: You will not be able to run MPI code in this interactive session.

+
+ +

The above example indicates a request for 4 CPUs (-n4) on a single +node (-N1) in the "int" partition (-p int). Adding "-t 15" would +indicate a request for 15 minutes, if desired, rather than the 60-minute +default. After the interactive shell is created to a compute node with +the above command, you'll have access to files on the shared file +system and be able to execute code interactively as if you had directly +logged in to that node. It is important to exit the interactive shell +when you're done working by typing exit.

+ +

For running MPI code

+ +

To run an MPI program in an interactive session, you will need to (1) allocate the +resources using salloc, then (2) use srun to run the MPI code, and finally (3) +give up the allocated resources.

+ +
    +
  1. +

    Request resources

    + +
    [alice@login]$ salloc -n4 -N1 -p int
    +
    + +

    This command requests 4 CPUs (-n4) on a single node (-N1) in the "int" +partition (-p int), and assigns the resources to a new terminal session +on the login node. When the allocation has started, you will see a message +like this:

    +
    salloc: Granted job allocation 18701
    +            Guest on spark-a005.chtc.wisc.edu
    +
    + +

    To run code in this allocation, be sure to use srun as described in the next step!

    +
  2. +
  3. +

    Use resources

    + +

    At this point, your terminal is still running on the login node. To run +commands using the resources in the allocation, you will need to use srun.

    +
    [alice@login]$ srun --mpi=pmix /path/to/mpi/script
    +
    + +

    This will execute the specified script using the allocated resources. +When the srun calculation has finished, you will remain in the allocation +session, allowing you to run srun multiple times in quick succession.

    + +

    You can also use the allocated resources interactively with

    +
    [alice@login]$ srun --mpi=pmix --pty bash
    +
    + +

    which will start an interactive terminal session in your allocation (this +is evident by the change in the command prompt from [alice@login] to +[alice@spark-a###]). Keep in mind that you will not be able to use +MPI inside the interactive session. You can exit the interactive session +and return to the allocation by entering exit.

    +
  4. +
  5. +

    Give up resources

    + +

    To end your allocation, simply enter exit. You will see a message like +this:

    +
    exit
    +salloc: Relinquishing job allocation 18701
    +salloc: Job allocation 18701 has been revoked.
    +
    +
  6. +
+ +
+

It can be difficult to remember whether or not you are currently using an +allocation. A quick way of checking is to see if the SLURM_JOB_ID is set +by entering echo $SLURM_JOB_ID. If you are in an allocation, this command +will return the job ID number that corresponds to an entry in your SLURM queue +(see below).

+ +

A more convenient option is to update your .bashrc file so that the command +prompt changes when you are in an allocation. This can be done using the +following commands:

+
echo 'PS1="$SLURM_JOB_ID[\u@\h \W]\$ " ' >> ~/.bashrc
+echo 'export PS1' >> ~/.bashrc
+
+ +

Now when you run salloc, your command prompt will start with the corresponding +SLURM job ID number. This will also be the case for the interactive srun +command. For example,

+
[alice@login]$ salloc -n4 -N1 -p int
+salloc: Granted job allocation 18701
+            Guest on spark-a005.chtc.wisc.edu
+	       
+18701[alice@login]$ echo 'I am running an allocation.'
+I am running an allocation.
+18701[alice@login]$ srun --mpi=pmix --pty bash
+
+18701[alice@spark-a006] echo 'I am using the resources interactively.'
+I am using the resources interactively.
+18701[alice@spark-a006] exit
+exit
+18701[alice@login]$ exit
+exit
+salloc: Relinquishing job allocation 18701
+[alice@login]$
+
+ +
    +
  • This can be undone by removing the two added lines from the .bashrc file + in your home directory.
  • +
+ +
+

More advanced users can manipulate their bash prompt further.
+The SLURM_JOB_ID variable is created for the allocation, and +a SLURM_JOB_UID variable is created for the interactive srun.

+
+
+ +

2. Viewing Jobs in the Queue

+ +

To view your jobs in the SLURM queue, use the following command:

+ +
[alice@login]$ squeue -u username
+
+ +

Issuing squeue alone will show all user jobs in the queue. You can +view all jobs for a particular partition with squeue -p shared.

+ +

3. Viewing Additional Job Information

+ +

Accounting information for jobs that are invoked with SLURM are logged. The sacct command displays job accouting data in a variety of forms for your analysis.

+ +

If you are having trouble viewing output from sacct try running this command first

+ +
[alice@login]$ sacct --start=2018-01-01
+
+ +

How To Select Jobs

+ +
    +
  • +

    To display information about a specific job or list of jobs use -j or --jobs followed by a job number or comma separated list of job numbers.

    + +
      [alice@login]$ sacct --jobs job1,job2,job3
    +
    + +
  • +
  • +

    To select information about jobs in a certain date range use --start and --end Without it, sacct will only return jobs from the current day.

    + +
      [alice@login]$ sacct --start=YYYY-MM-DD
    +
    +
  • +
  • To select information about jobs in a certian time range use --starttime and --endtime The default start time is 00:00:00 of the current day, unless used with -j, then the default start time is Unix Epoch 0. The default end time is time of running the command. Valid time formats are +
      HH:MM[:SS] [AM|PM]
    +  MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
    +  MM/DD[/YY]-HH:MM[:SS]
    +  YYYY-MM-DD[THH:MM[:SS]] 
    +
    + +
      [alice@login]$ sacct --starttime 08/23 --endtime 08/24
    +
    +
  • +
  • +

    To display another user’s jobs use --user

    + +
      [alice@login]$ sacct --user BuckyBadger
    +
    +

    +
  • +
  • +

    To only show statistics relevant to the job allocation itself, not taking steps into consideration use -X. This can be useful when trying to figure out which part of a job errored out.

    + +
      [alice@login]$ sacct -X
    +
    +

    +
  • +
+ +

Displaying Specific Fields

+ +

sacct can display different fields about your jobs. You can use the --helpformat flag to get a full list.

+ +
[alice@login]$ sacct --helpformat
+
+ + + +

When looking for information about your jobs CHTC recommends using these fields

+
elapsed
+end
+exitcode
+jobid
+ncpus
+nnodes
+nodelist
+ntasks
+partition
+start
+state
+submit
+user
+
+ +

For example run

+ +
sacct --start=2020-01-01 --format=jobid
+
+ +

to see jobIDs of all jobs ran since 1/1/2020.

+ +

4. Removing or Holding Jobs

+ +

You can kill and/or remove your job from the +queue with the following:

+ +
[alice@login]$ scancel job#
+
+ +

where job# is the number shown for your job in the squeue output.

+ +

If you want to leave a job in the queue, but prevent it from running immediately, +you can “hold” a submitted job by using:

+ +
[alice@login]$ scontrol hold job#
+
+ +

To release jobs that are held so that they can run, use this command:

+ +
[alice@login]$ scontrol release job#
+
+ +
+
+ + + + +
+ + + + + + +
HPC Guides
+
+ + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + + + + + + + + + + +
+ +
+ + + +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/hpc-overview.html b/preview-fall2024-info/uw-research-computing/hpc-overview.html new file mode 100644 index 000000000..fdf88cf80 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/hpc-overview.html @@ -0,0 +1,753 @@ + + + + + + +HPC Cluster Overview + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+

+ HPC Cluster Overview +

+ + + + +

High-Performance Computing at CHTC

+ +

The CHTC high-performance computing (HPC) cluster provides dedicated support for large, +singular computations that use specialized software (i.e. MPI) to achieve internal +parallelization of work across multiple servers of dozens to hundreds of cores.

+ +

Is high-performance computing right for me? Only computational work that +fits that above description is appropriate for the HPC Cluster. Computational +work that can complete on a single node in less than a few days will be +best supported by our larger high-throughput computing (HTC) system (which also +includes specialized hardware for extreme memory, GPUs, and other cases). For more +information, please see Our Approach.

+ +

To get access to the HPC Cluster, please complete our +New User Consultation Form. After your request is received, +a Research Computing Facilitator will follow up to discuss the computational needs +of your research and connect you with computing +resources (including non-CHTC services) that best fit your needs.

+ +

HPC Cluster User Policies

+ +

See our User Policies and Expectations for details on general CHTC and HPC cluster policies.

+ +

HPC Hardware and Configuration

+ +

The HPC Cluster consists of two login nodes and many compute (aka execute) +nodes. All users log in at a login node, and all user files on the shared file sytem are accessible on all nodes. +Additionally, all nodes are tightly networked (200 Gbit/s Infiniband) so +they can work together as a single "supercomputer", depending on the +number of CPUs you specify.

+ +

Operating System and Software

+ +

All nodes in the HPC Cluster are running CentOS 8 Stream Linux.

+ +

The SLURM scheduler version is 22.05.6.

+ +

To see more details of other software on the cluster, see the HPC Software page.

+ +

Login Nodes

+ +

The login node for the cluster is: spark-login.chtc.wisc.edu

+ +

For more details on logging in, see the “Connecting to CHTC” guide linked above.

+ +

Execute Nodes and Partitions

+ +

Only execute nodes will be used for performing your computational work. +The execute nodes are organized into several "partitions", including +the shared, pre, and int partitions which are available to +all HPC users as well as research group specific partitions that consist +of researcher-owned hardware and which all HPC users can access on a +backfill capacity via the pre partition (more details below).

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Partitionp-name# nodes (N)t-defaultt-maxmax cores/jobcores/node (n)RAM/node (GB)
Sharedshared451 day7 day32064 or 128512
Interactiveint21 hr4 hrs1664 or 128512 (max 64 per job)
Pre-emptable (backfill)pre454 hrs24 hrs32064 or 128512
Ownersunique1924 hrs7 daysunique64 or 128512
+ +
    +
  • +

    shared compute nodes each have 64 or 128 cores and 512 GB of RAM.
    +Jobs submitted to this partition +can request and use up to 7 days of running time.

    +
  • +
  • +

    int consists of two compute nodes is intended for short and immediate interactive +testing on a single node (up to 16 CPUs, 64 GB RAM). Jobs submitted to this partition +can run for up to 4 hours.

    +
  • +
  • +

    pre (i.e. pre-emptable) is an under-layed partition encompassing all HPC Cluster +nodes and is intended for more immediate turn-around of shorter, smaller, and/or +interactive sessions requiring more than the 4 hour time limit of the int partition. +Jobs submitted to pre are run as back-fill on any idle nodes, including researcher-owned +compute nodes, meaning these jobs may be pre-empted by higher priority +jobs. By default, pre-empted jobs will be re-queued (to run again) if they were submitted with +an sbatch script.

    +
  • +
+ +

Fair Share Allocation

+ +

To promote fair access to HPC computing resources, all users are limited to 10 concurrently +running jobs (if you need to queue more, please get in touch). Additionally, users are restricted to a total of 720 cores +across all running jobs (core limits do not apply on research group partitions of +more than 720 cores).

+ +

When determining which order to run jobs, the following policies are applies, in order or significance +to job priority determinations:

+ +

A. User priority decreases as the user accumulates hours of CPU time over the last 21 days, across +all queues. This “fair-share” policy means that users who have run many/larger jobs in the near-past +will have a lower priority, and users with little recent activity will see their waiting jobs start sooner. +(The cluster does not have a strict “first-in-first-out” queue policy.)

+ +

B. Job priority increases with job wait time. After the history-based user priority calculation in (A), +the next most important factor for each job’s priority is the amount of time that each job has already +waited in the queue. For all the jobs of a single user, these jobs will most closely follow a “first-in-first-out” policy.

+ +

C. Job priority increases with job size, in cores. This least important factor slightly favors larger jobs, so that +the scheduler can take advantage when large numbers of newly-available nodes happen to become available (requiring less +wasted time to deliberately drain nodes for larger jobs). So, among a user’s jobs submitted at roughly the same time, +a larger job may run first, if the number of nodes necessary for the larger job is already available.

+ +

Data Storage and Management

+ +

Data space in the HPC Cluster filesystem is not backed-up and should be +treated as temporary by users. Only files necessary for +actively-running jobs should be kept on the filesystem, and files +should be removed from the cluster when jobs complete. A primary copy of any +essential files (e.g. software, submit files, input) should be kept in an +alternate, non-CHTC storage location.

+ +

Each user will receive two primary data storage locations:

+ +
    +
  1. +

    /home/username with an initial disk quota of 30GB +and 250,000 items. Your home directory is meant to be used for files +you use repeatedly, like submit file templates, source code, software +installations, and reference data files.

    +
  2. +
  3. +

    /scratch/username with an initial disk quota of 100GB and +250,000 items. Jobs should always be submitted and run out of +/scratch. It is the space for all working data, including individual +job inputs, job outputs, and job log/stderr/stdout files.

    +
  4. +
+ +
+

What about /software?

+ +

If you are installing software meant to be shared within a group, +we can create a dedicated folder for you in the /software space +email us (chtc@cs.wisc.edu) if this is you!

+
+ +

To check how many files and directories you have in +your /home or /scratch directory see the +instructions below.

+ +

Changes to quotas for either of these locations are available upon request +per our Request a Quota Change guide. If you don't +know how many files your installation creates, because it's more than +the current items quota, simply indicate that in your request.

+ +

CHTC Staff reserve the right to remove any significant amounts of data +on the HPC Cluster in our efforts to maintain filesystem performance +for all users.

+ +

Local scratch space is available on each execute node in /local/$USER. +This space is NOT automatically cleaned out, so if you use this space, +be sure to remove the files before the end of your job script or +interactive session.

+ +

Tools for managing home and software space

+ +

You can use the command get_quotas to see what disk +and items quotas are currently set for a given directory path. +This command will also let you see how much disk is in use and how many +items are present in a directory:

+ +
[username@hpclogin1 ~]$ get_quotas /home/username /scratch/username
+
+ +

Alternatively, the ncdu command can also be used to see how many +files and directories are contained in a given path:

+ +
[username@hpclogin1 ~]$ ncdu /home/username
+[username@hpclogin1 ~]$ ncdu /scratch/username
+
+ +

When ncdu has finished running, the output will give you a total file +count and allow you to navigate between subdirectories for even more +details. Type q when you're ready to exit the output viewer. More +info here: https://lintut.com/ncdu-check-disk-usage/

+ +
+
+ + + + +
+ + + + + + +
HPC Guides
+
+ + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + + + + + + + + + + +
+ +
+ + + +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/hpc-software.html b/preview-fall2024-info/uw-research-computing/hpc-software.html new file mode 100644 index 000000000..69d4dd021 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/hpc-software.html @@ -0,0 +1,650 @@ + + + + + + +Use Software on the HPC Cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+

+ Use Software on the HPC Cluster +

+ +

This page describes how to install and run software on CHTC’s HPC Cluster.

+ +

Contents

+ +
    +
  1. General Software Policies
  2. +
  3. Using Pre-Installed Software in Modules
  4. +
  5. Installing Software on the Cluster
  6. +
  7. Using Software in Jobs
  8. +
+ +

1. General Software Policies

+ +

In CHTC, we install a minimal set of software for use +on our systems. On the HPC Cluster, CHTC staff manage installations of +the following types of programs:

+ +
    +
  • Compilation tools and common dependencies (e.g. MPI, different GCC versions)
  • +
  • Software that requires a shared license (e.g. COMSOL)
  • +
+ +

Information on how to access CHTC-managed installations is in the next +section of this guide. If you need to use a program not in that group, the instructions +for creating your own installation follow.

+ +

If you have questions or concerns about installing your own software or +the available dependencies, contact the facilitation team at chtc@cs.wisc.edu.

+ +

2. Using Pre-Installed Software in Modules

+ +

All software on the cluster that is installed by CHTC staff is available via +a tool called “modules”.

+ +

A. See Available Software Modules

+ +

There are two ways to search through the software modules on the HPC cluster:

+ +
    +
  1. View all modules + This command will show all software modules available: +
    [alice@login]$ module avail
    +
    +
  2. +
  3. Search for specific modules + If you are searching for a specific software module, you can use the + module spider command with part of the software name. For example, to + search for Open MPI modules, you would type: +
    [alice@login]$ module spider openmpi
    +
    +
  4. +
+ +

B. Access Software in Modules

+ +

Once you find a software module that you want to use, you need to “load” it +into your command line environment to make it active, filling in module_name with +the name you found through one of the above steps.

+ +
[alice@login]$ module load module_name
+
+ +
+

When to Load Modules

+ +

You can load modules to compile code (see below). If you do this, make sure to load +the same modules as part of your job script before running the main command.

+ +

You can also load modules to run specific software. If done for interactive +testing, this should be done in an interactive job; otherwise, the module +should be loaded in the job submit file.

+
+ +

C. Unload Software in Modules

+ +

If you no longer want to use a specific software installation, you can “unload” +the software module with the following command:

+ +
[alice@login]$ module unload module_name
+
+ +

If you want to clear your command line environment and start over, run the following:

+ +
[alice@login]$ module purge
+
+ +

3. Installing Software on the Cluster

+ +

A. Overview

+ +

Unless you are using a licensed software program provided via modules, you +are able to compile and install the software you need on the HPC Cluster.

+ +

Compilation can be done via an interactive job as described in +our HPC Job Submission Guide. +Software should be installed to your /home/username +directory. If using CHTC’s provided compilation tools via modules, make +sure to load the needed modules before compiling and to load the same +modules in your job submission.

+ +

For groups that would like to share software installations among group +members, please contact us about getting a shared “group” directory.

+ +

If you are new to software installation, see the section below for +a more step-by-step description of the process.

+ +

B. Step by Step Process

+ +
    +
  1. Download Source Code - download the source code for your desired program. We + recommend downloading it to your /home/username directory on the login node. + You should only need the source code until the software is properly installed, but if desired, you may keep a zipped copy of + the source code in /home.
  2. +
  3. Read the Docs - try to find the installation instructions, either online or + in the downloaded source code. In particular, you’ll want to note if there are + any special requirements for dependencies like MPI or the compiler needed.
  4. +
  5. Load Modules - if you are using software modules to help you build your + code, load them now. Keep track of what you use so that you can load them + in your job submit file later. We also recommend doing a module purge before + loading your compiling modules to make sure you’re starting from a clean environment.
  6. +
  7. Install - most scientific software follows the three step installation process + of configure - make - make install. +
      +
    1. configure- this step checks for tools and requirements needed to compile + the code. This is the step where you set the final installation location of + a program. The option for setting this location is typically called the + “prefix”; a common syntax is: $ ./configure --prefix=/home/user. + This is where you will want to set the installation location to be your + /home directory.
    2. +
    3. make - this step compiles and links the code, turning it from human-readable + source code to compiled binary code. This is usually the most time consuming + step of the installation process.
    4. +
    5. make install - this step copies compiled files to the final installation location + (usually specified in the configure step).
    6. +
    +
  8. +
  9. Clean Up - the final installation should place all needed files into a + subdirectory of your /home directory. The source code and location where + you ran the compilation commands can be removed at this point.
  10. +
+ + + +

4. Using Software in Jobs

+ +

The commands to run your software will go in the job’s submit file, as described +in our HPC job submission guide.

+ +

If you used one of the software modules to compile your code, make sure you +load it in your job’s submit file before running your main command.

+ +

You can access your software by including the path to its location in your +/home directory, or by setting the PATH environment variable to include +the software location and then running the command.

+ + +
+
+ + + + +
+ + + + + + +
HPC Guides
+
+ + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + + + + + + + + + + +
+ +
+ + + +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/hpc-spack-install.html b/preview-fall2024-info/uw-research-computing/hpc-spack-install.html new file mode 100644 index 000000000..1e095e6ae --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/hpc-spack-install.html @@ -0,0 +1,833 @@ + + + + + + +Install Software Using Spack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+

+ Install Software Using Spack +

+ +

CHTC uses Spack (https://github.com/spack/spack) for installing and managing software packages on the HPC cluster for all users to use, via the module command. Recently, Spack has developed a feature that allows for users to integrate their local installation of Spack with the system-wide installation. This means that when a user installs software with their local installation of Spack, they can automatically incorporate the system-wide packages to satisfy their software’s dependencies (similar to Conda and Miniconda).

+ +

This guide describes how to install and manage software using Spack, including how to install and use a specific compiler.

+ +

This guide assumes you or your group has already set up your local installation of Spack. If you have not installed Spack, follow the instructions in Setting Up Spack on HPC.

+ +

Contents

+ +
    +
  1. Installing Software Using Spack
  2. +
  3. Using Software Installed in Spack
  4. +
  5. Installing and Using a Specific Compiler
  6. +
+ +

1. Installing Software Using Spack

+ +

Once your local installation of Spack has been properly configured, you are now ready to install software using Spack.

+ +

Check the documentation for the program you want to install to see if they have instructions for installation using Spack. Even if your program can’t be installed using Spack, you can still use it to install the dependencies that your program needs.

+ +
+

Note: For a group installation of Spack, you will not be able to modify or remove the packages installed by a different user. We recommend that you consult with the rest of your group for permission before proceeding.

+
+ +

A. Start an Interactive Job

+ +

Before creating a Spack environment or installing packages using Spack, first start an interactive Slurm job:

+ +
srun --mpi=pmix -n4 -N1 -t 240 -p int --pty bash
+
+ +

For more information on interactive Slurm jobs, see our guide Submitting and Managing Jobs Using SLURM.

+ +
+

When creating an environment, Spack automatically detects the hardware of the machine being used at the time and configures the packages as such. +Since the login server uses newer hardware than the execute servers, creating an environment on the login server (not in an interactive job) is a bad idea.

+
+ +

B. Creating and Using a Spack Environment

+ +

Software installations with Spack should be done inside of a Spack environment, to help manage the shell and the paths to access programs and libraries needed for a particular software installation.

+ +

To create a Spack environment, run the command

+ +
spack env create yourEnvironmentName
+
+ +

where you should replace yourEnvironmentName with your desired name for the environment. You can then activate the environment with

+ +
spack env activate yourEnvironmentName
+
+ +

You will need to activate the environment when you wish to use the software that was installed in that environment.

+ +
+

You can see a list of your available environments using

+ +
spack env list
+
+ +

and you can see which environment you are currently using with

+ +
spack env status
+
+ +

To deactivate the environment, run

+ +
spack env deactivate
+
+ +

or close the terminal session.

+
+ +

C. Finding Program Packages in Spack

+ +

Once inside an active Spack environment, you can run the following command to see what packages are installed in the current environment

+ +
spack find
+
+ +

For a new environment, this will show that there are no programs installed. The output of this command will update after you install program packages in the environment.

+ +

To search for packages to install using Spack, use the command

+ +
spack list nameOfProgram
+
+ +

where you should replace nameOfProgram with the program that you are interested in finding. Spack will search for the package and print out a list of all the packages that match that name. (The first time you run this command may take several minutes while Spack downloads a current list of packages that can be installed.)

+ +

To learn more about an available package, use the exact name of the program and run

+ +
spack info exactNameOfProgram
+
+ +

This will print out information about the program, including a short description of the program, a link to the developer’s website, and the available versions of the program and its dependencies.

+ +

D. Adding Package Specifications to the Environment

+ +

Once you find the packages that you want to install, add their specifications to the environment using

+ +
spack add exactNameOfProgram
+
+ +

Spack will automatically decide which version of the program to use at installation time based on the other packages that you’ve added.

+ +

If you want a specific version of a package, you can specify it by appending @= to the end of the package name, followed by the version number. For example,

+ +
spack add python@=3.10
+
+ +

will tell the environment that you want to install version 3.10 of Python. There are additional ways of defining specifications for package versions, the compiler to be used, and dependencies. The documentation for Spack provides the details on how this is done.

+ +

If you need to install a compiler, or need to use a specific compiler to install the desired packages, see section 3. Installing and Using a Specific Compiler.

+ +

E. Installing Packages in an Environment

+ +

Once you have identified the package(s) you would like to install and have added the specifications to your environment,

+ +

i. Create the local scratch directory

+ +

Using the default configuration from Setting Up Spack on HPC, Spack will try to use the machine’s local disk space for staging and compiling files before transferring the finished results to the final installation directory. Using this space will greatly improve the speed of the installation process. Create the local directory with the command

+ +
mkdir /local/yourNetID/spack_build
+
+ +

where you should replace yourNetID with your NetID. At the end of the session, remember to delete this directory so that other people can use the disk space in their jobs.

+ +
+

If the directory already exists, that means you forgot to remove it after one of your previous Spack installation sessions. Simply remove the directory and make it again.

+ +
rm -rf /local/yourNetID/spack_build
+mkdir /local/yourNetID/spack_build
+
+
+ +

ii. Check the programs/packages to be installed

+ +

If you’ve added the installation specifications to the environment, then you can check the installation plan using the command

+ +
spack spec -lI
+
+ +

(the first letter after the hyphen is a lowercase “L” and the second letter is an uppercase “i”).

+ +
+

This command identifies what dependencies Spack needs in order to install your desired packages along with how it will obtain them. Assuming their are no problems, then it will print a list of the packages and their dependencies, where entries that begin with a green [+] have already been installed somewhere in your local Spack installation, while those that begin with a green [^] are referencing the system installation, and those beginning with a gray - will need to be downloaded and installed.

+
+ +

Most users should see a bunch of packages with a green [^] in the first column. +If you do not, then there are several possible explanations:

+ + + +

If you are satisfied with the results, then you can proceed to install the programs.

+ +

iii. Install the environment packages

+ +

Assuming that you are in an interactive Slurm session and have activated the desired environment containing the package specifications, you can run

+ +
spack install -j 4
+
+ +

to install the packages inside of the Spack environment, where the number that comes after -j needs to match the number that you noted from when you started the interactive session (the one after -n when you ran the srun command for the interactive session). You can also add the -v option to have the installation be verbose, which will cause Spack to print the compile and make outputs in addition to the standard Spack output.

+ +

Depending on the number and complexity of the programs you are installing, and how much can be bootstrapped from the system installation, the installation step can take anywhere from several minutes to several hours.

+ +
+

If something goes wrong or your connection is interrupted, the installation process can be resumed at a later time without having to start over from the beginning. Make sure that you are in an interactive Slurm session and that you have activated the Spack environment, then simply rerun the spack install command again.

+
+ +

iv. Finishing the installation

+ +

After the installation has successfully finished, you should be able to see that the programs have been installed by running

+ +
spack find
+
+ +

which should list the programs under the compiler heading used for installing the programs.

+ +

You may need to deactivate and reactivate the environment in order to properly use the programs that have been installed.

+ +
spack env deactivate
+spack env activate yourEnvironmentName
+
+ +

Once you are satisfied that the programs have been installed properly, you can remove packages that are build-only (not used for running the packages you installed) using the command

+ +
spack gc
+
+ +

Finally, remove the local build directory that Spack used during the installation with

+ +
rm -rf /local/yourNetID/spack_build
+
+ +

and then enter exit to end the interactive session.

+ +

To use the packages that you installed, follow the instructions in the next section, 2. Using Software Installed in Spack. If you want to create custom modules using the installed packages, see our guide Creating Custom Modules Using Spack.

+ +

F. Removing an Environment and Uninstalling Unneeded Packages

+ +

You may find it necessary to remove a Spack environment, or packages installed using Spack. To uninstall a package, simply run

+ +
spack uninstall yourPackageName
+
+ +

where you should replace yourPackageName with the name of the package that you want to remove. This command will only work for packages that you ‘added’ to the Spack environment, as described above.

+ +

To remove an environment, first make sure that you have deactivated the environment with

+ +
spack env deactivate
+
+ +

and then run

+ +
spack env rm yourEnvironmentName
+
+ +

where you should replace yourEnvironmentName with the name of the environment that you want to remove. Note that this will not necessarily remove the packages that were installed in the environment! After the environment has been removed, you can uninstall the packages that are no longer needed using the command

+ +
spack gc
+
+ +

2. Using Software Installed in Spack

+ +

If your account is configured correctly for using Spack, and the software has been installed inside of a Spack environment, then to use the software all you need to do is activate the corresponding environment. Simply use the command

+ +
spack env activate yourEnvironmentName 
+
+ +

and Spack will update your shell accordingly. (Remember that you can see the available Spack environments by running the command spack env list). Once the environment has been activated, you should be able to use the packages just as normal. You can confirm you are using a command installed using Spack by running

+ +
which nameOfYourCommand
+
+ +

where you replace nameOfYourCommand with the name of the command. The command will output a path, and you should see something like spack/var/spack/environments/yourEnvironmentName/ in that path.

+ +

For submitting jobs using Slurm, you will need to make sure that you activate the Spack environment near the beginning of your sbatch file before the srun command. For example,

+ +
#!/bin/sh
+# This file is called submit-script.sh
+#SBATCH --partition=shared       # default "shared", if not specified
+#SBATCH --time=0-04:30:00        # run time in days-hh:mm:ss
+#SBATCH --nodes=1                # require 1 nodes
+#SBATCH --ntasks-per-node=64     # cpus per node (by default, "ntasks"="cpus")
+#SBATCH --mem-per-cpu=4000       # RAM per node in megabytes
+#SBATCH --error=job.%J.err
+#SBATCH --output=job.%J.out
+
+# v---Remember to activate your Spack environment!! 
+spack env activate yourEnvironmentName
+
+srun --mpi=pmix -n 64 /home/username/mpiprogram
+
+ +

When you submit this job to Slurm and it executes the commands in the sbatch file, it will first activate the Spack environment, and then your program will be run using the programs that are installed inside that environment.

+ +
+

Some programs include explicit module load commands in their execution, which may override the paths provided by the Spack environment. If your program appears to use the system versions of the packages instead of the versions installed in your Spack environment, you may need to remove or modify these explicit commands. Consult your program’s documentation for how to do so. You may want to create your own custom modules and modify your program to explicitly load your custom modules. See Creating Custom Modules Using Spack for more information on how to create your own modules using Spack.

+
+ +

3. Installing and Using a Specific Compiler

+ +

By default, Spack will attempt to compile packages it installs using one of the system compilers, most likely with GCC version 11.3.0. Some programs, however, may need to be compiled using a specific compiler, or require that their dependencies be built using a specific compiler. While this is possible using Spack, the process for installing and using a compiler is a bit more complicated than that for installing “regular” packages as was described above.

+ +

In brief, you will first create a separate environment for installing the compiler. Then you will add that compiler to the list of available compilers that Spack can use. Finally, you can install your desired packages as in a new environment, but you will need to specify which compiler to use.

+ +

A. Install the Compiler in its Own Environment

+ +

i. Identify the compiler and version

+ +

The first step is to identify the compiler and version you need for your program. Consult your program’s documentation for the requirements that it has. Then follow the instructions in C. Finding Program Packages in Spack to find the package name and confirm the version is available.

+ +

ii. Create the compiler’s environment

+ +

Next, create and activate an environment for installing the desired compiler. For example,

+ +
spack env create compilerName_compilerVersion
+spack env activate compilerName_compilerVersion
+
+ +

where you should replace compilerName and compilerVersion with the name and version of the desired compiler.

+ +

iii. Add the compiler specification to its environment

+ +

Once you’ve activated the environment, add the exact specification for the compiler to the Spack environment with

+ +
spack add compilerName@=compilerVersion
+
+ +

where you need to replace compilerName and compilerVersion with the name and version of the compiler that you identified above.

+ +

iv. Install the compiler in its environment

+ +

Next, follow the instructions in E. Installing Packages in an Environment to install the desired compiler in this environment. Installing the compiler may take several hours, so consider increasing the number of threads to speed up the installation.

+ +

B. Add the Compiler to Spack

+ +

i. Identify the compiler’s installation path

+ +

After installing the compiler, you need to find its location. First, activate the compiler’s environment with spack env activate compilerName_compilerVersion. Next, use the following command to save the path to the compiler as the shell variable compilerPath:

+ +
compilerPath=$(spack location -i compilerName@=compilerVersion)
+
+ +

where you need to replace compilerName and compilerVersion with the name and version of the compiler that you installed. You can see print out the path using the command echo $compilerPath.

+ +

ii. Give the compiler’s path to Spack

+ +

Now that you know where the compiler is installed, deactivate the environment with spack env deactivate. Then run the following command to tell Spack to add the compiler to its list of available compilers:

+ +
spack compiler add $compilerPath
+
+ +

iii. Confirm compiler has been added to Spack

+ +

The command

+ +
spack compiler list
+
+ +

will print out the list of compilers that Spack can use, and should now show compilerName@compilerVersion in the results.

+ +

C. Install Packages Using the New Compiler

+ +

Once the compiler has been installed and recognized by Spack, you can now create and activate a new environment for installing your desired packages, following the instructions in Installing Software Using Spack.

+ +

To make sure the packages are installed using your desired compiler, you need to include the compiler when you add the package specification to the environment (D. Adding Package Specifications to the Environment). To include the compiler in the specification, you need to add the symbol % followed by the compiler name and version to the end of the spack add command. For example,

+ +
spack add python@=3.10 %gcc@=9.5.0
+
+ +

will use GCC version 9.5.0 to compile Python 3.10 when installing the package. As a general rule, you should use the same compiler for installing all of your packages within an environment, unless your program’s installation instructions say otherwise.

+ +
+
+ + + + +
+ + + + + + +
HPC Guides
+
+ + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + + + + + + + + + + +
+ +
+ + + +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/hpc-spack-modules.html b/preview-fall2024-info/uw-research-computing/hpc-spack-modules.html new file mode 100644 index 000000000..a2343d840 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/hpc-spack-modules.html @@ -0,0 +1,622 @@ + + + + + + +Create Custom Modules Using Spack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+

+ Create Custom Modules Using Spack +

+ +

CHTC uses Spack (https://github.com/spack/spack) for installing and managing software packages on the HPC cluster for all users to use, via the module command. Recently, Spack has developed a feature that allows for users to integrate their local installation of Spack with the system-wide installation. This means that when a user installs software with their local installation of Spack, they can automatically incorporate the system-wide packages to satisfy their software’s dependencies (similar to Conda and Miniconda).

+ +

This guide describes how to create and use custom personal and shared modules for software packages installed using Spack. For instructions on how to install software using Spack for you and/or your research group, see our guide Installing Software Using Spack.

+ +

Contents

+ +
    +
  1. Location of the Module Files
  2. +
  3. Using Custom Modules
  4. +
  5. Creating Custom Modules Using Spack
  6. +
  7. Working with Multiple Environments
  8. +
  9. Using Hierarchy Based Modules
  10. +
+ +

1. Location of the Module Files

+ +

In order to load a software package using the module command, there must be a corresponding “module file” containing the information that the module command needs in order to load the software package. Spack will automatically generate the required content of the module files, but Spack will need to know where these module files should be saved. Similarly, the module command will need to know where the module files are stored.

+ +

If you followed the instructions in Setting Up Spack on HPC, then the default location of your module files is /home/yourNetID/spack_modules where yourNetID is your NetID.

+ +
+

If you are using a shared installation of Spack for a group, and if the person who set up the installation followed the instructions in Setting Up Spack on HPC, then the default location of the shared module files is likely /home/groups/yourGroupName/spack_modules. You can confirm this by running the command spack config get modules | grep -A 2 'roots' and examining the listed paths (make sure you do not have a Spack environment activated when you do so). If the paths are /home/$user/spack_modules, then you should follow the instructions in iii. Updating location of module files in Setting Up Spack on HPC before proceeding.

+
+ +

2. Using Custom Modules

+ +

Spack will automatically create module files for the packages that you explicitly install, in the location described above.

+ +

To update the module command with the default location of the new module files, run the command

+ +
module use ~/spack_modules
+
+ +
+

For a group installation of Spack, you’ll need to modify the module use command to specify the path to your group’s directory. The following should work if your group followed our instructions when setting up Spack:

+ +
module use /home/groups/yourGroupName/spack_modules
+
+
+ +

Now if you run module avail you should see the your custom modules listed in the first section, with the system modules listed in the following section. You can then use the module load command as usual to load your custom module for use in the current terminal session.

+ +

Note: Spack will not automatically create module files for the “upstream” dependencies (packages already installed on the system). If your module load test does not work, follow the instructions in the next section to generate these additional module files.

+ +

To have your custom modules found automatically by the module command, add the above module use command to the end of your ~/.bash_profile file.

+ +

3. Creating Custom Modules Using Spack

+ +

You may need to manually create the custom module files, especially after editing any of the modules configuration for Spack. To create the module files, first activate the desired environment with

+ +
spack env activate yourEnvironmentName
+
+ +

(where you should replace yourEnvironmentName with the your environment name) and then enter the following command:

+ +
spack module tcl refresh
+
+ +

Spack will print out a list of all the packages installed in the current environment, and you’ll be asked to confirm if you wish to create module files for all of these packages.

+ +

To remove old module files, or to update the directory structure, add the option --delete-tree, i.e.

+ +
spack module tcl refresh --delete-tree
+
+ +

If you tried to load a module but received the error(s) ‘Executing this command requires loading “{module file}” which failed while processing the following…‘, then you will need to generate the “upstream” module files in order to use your desired module. In this case, the following command should resolve the issue:

+ +
spack module tcl refresh --upstream-modules
+
+ +
+

Note: You should only run this command inside of an activated Spack environment, otherwise you will be prompted to create module files for ALL Spack packages, including those installed system-wide, regardless of whether they are required dependencies!

+
+ +

Lastly, note that Spack will not directly create module files for software installed independently of Spack (for example, using pip install).

+ +

4. Working with Multiple Environments

+ +

If you have more than one Spack environment that you wish to create modules for, we recommend that you modify the above procedure in order to better organize the list of modules.

+ +

For each environment that you wish to create module files for, activate the environment and then edit the configuration so that the module files are saved into a sub-directory named for that environment. For example,

+ +
spack env activate my-first-env
+spack config add modules:default:roots:tcl:/home/\$user/spack_modules/my-first-env
+
+ +

first activates my-first-env and then updates the configuration to save the module files to /home/yourNetID/spack_modules/my-first-env.

+ +
+

For a group installation of Spack, you should instead specify the path to your group’s directory. For example,

+ +
spack env activate my-first-env
+spack config add modules:default:roots:tcl:/home/groups/yourGroupName/spack_modules/my-first-env
+
+ +

You should similarly modify the following commands to account for the different paths.

+
+ +

Repeat the process for your other environments.

+ +

To use the modules for a particular environment, run the module use command but specify the path to the environment’s subdirectory. Continuing with our example,

+ +
module use ~/spack_modules/my-first-env
+
+ +

will update the module command with the location of the modules for using my-first-env.

+ +

If you want to switch environments, we recommend that you “unuse” the first environment and then “use” the second, i.e.

+ +
module unuse ~/spack_modules/my-first-env
+module use ~/spack_modules/my-second-env
+
+ +

While you can have more than one environment in “use” by the module command, this increases the chance of loading modules with conflicting dependencies that could result in unexpected behavior.

+ +

5. Using Hierarchy Based Modules

+ +

There are two “flavors” of the module system: tcl and lmod. We use tcl for managing the system modules, and have recommended using tcl throughout this guide. The main difference between the two “flavors” of modules is that tcl uses a “flat” directory structure (all the module files are located in the same central directory) whereas lmod uses a “hierarchy” directory structure (where the module files are grouped by their compiler or MPI version). The hierarchal structure of lmod can be very useful in organizing duplicate module files that differ only by how they were compiled.

+ +

To use the lmod style module files, you should first edit your modules configuration to enable lmod and disable tcl, then refresh your module files.

+ +
spack config add 'modules:default:enable:lmod'
+spack config remove 'modules:default:enable:tcl'
+spack module refresh lmod --delete-tree
+
+ +

More advanced options regarding the naming and structure of the lmod module files can be configured by editing the modules.yaml (described in iii. Updating location of module files in Setting Up Spack on HPC). See the Spack documentation for more information on how to configure module files: https://spack.readthedocs.io/en/latest/module_file_support.html#.

+ +
+
+ + + + +
+ + + + + + +
HPC Guides
+
+ + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + + + + + + + + + + +
+ +
+ + + +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/hpc-spack-setup.html b/preview-fall2024-info/uw-research-computing/hpc-spack-setup.html new file mode 100644 index 000000000..6fcd242fe --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/hpc-spack-setup.html @@ -0,0 +1,741 @@ + + + + + + +Set Up Spack on HPC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+

+ Set Up Spack on HPC +

+ +

CHTC uses Spack (https://github.com/spack/spack) for installing and managing software packages on the HPC cluster for all users to use, via the module command. Recently, Spack has developed a feature that allows for users to integrate their local installation of Spack with the system-wide installation. This means that when a user installs software with their local installation of Spack, they can automatically incorporate the system-wide packages to satisfy their software’s dependencies (similar to Conda and Miniconda).

+ +

This guide describes how to set up a local copy of Spack and integrate it with the system installation, either for an individual user or for a group of users. For instructions on how to install packages with Spack, see our other guide, Installing Software Using Spack.

+ +
+

If your group has already set up a shared group installation of Spack, you can skip to the end of this guide: 3. Using a Shared Group Installation.

+
+ +

Contents

+ +
    +
  1. Setting Up Spack for Individual Use
  2. +
  3. Setting Up Spack for Group Use
  4. +
  5. Using a Shared Group Installation
  6. +
+ +

1. Setting Up Spack for Individual Use

+ +

A. Downloading Spack (Individual)

+ +

First, log in to the HPC cluster.

+ +

You can then install Spack following its documentation. Download the Spack code from their GitHub repository:

+ +
git clone -c feature.manyFiles=true https://github.com/spack/spack.git
+
+ +

and then activate Spack by sourcing the setup script with the . command

+ +
. spack/share/spack/setup-env.sh
+
+ +

That’s it! You can test that Spack has been installed by entering spack and you should see the help text print out. But before trying to install packages using your Spack installation, you should configure it to recognize the system installation of Spack.

+ +
+

This guide assumes that you ran the git clone command in your home directory, i.e. /home/yourNetID. If you did not, then run the following command to print the full path to your Spack installation.

+ +
echo $SPACK_ROOT
+
+ +

We will refer to this path as the SpackRootPath and you will need to use this path where noted in the instructions below.

+
+ +

B. Using Spack in Future Sessions (Individual)

+ +

While Spack has been installed, for each session that you want to use it you will need to rerun the command

+ +
. /home/yourNetID/spack/share/spack/setup-env.sh
+
+ +

A more convenient option is simply to update your account to run this command whenever you log in. Add the command to the end of the .bash_profile file in your home directory, e.g. nano ~/.bash_profile, with the full path to the file. If you ran the git clone command in your home directory, then the line you add should be

+ +
. /home/yourNetID/spack/share/spack/setup-env.sh
+
+ +

where you need to replace yourNetID with your NetID.

+ +
+

If Spack was not installed to your home directory, use the following command instead, where you need to replace SpackRootPath with the path that you noted above.

+ +
. SpackRootPath/share/spack/setup-env.sh
+
+
+ +

C. Obtain the Provided Configuration Files (Individual)

+ +

To simplify the process of configuring your local installation of Spack, we have provided a folder with the necessary configuration files. All that you need to do is copy it to your home directory using the following command.

+ +
cp -R /software/chtc/el9/spack-user-config ~/.spack
+
+ +

Your local Spack installation will automatically find the configuration files and will now recognize the packages that are installed system-wide. You can confirm this with the command

+ +
spack find
+
+ +

This should show a list of packages, including those you see when you run the module avail command. A total of ~120 packages should be listed.

+ +

You are now ready to use Spack for installing the packages that you need! See the instructions in Installing Software Using Spack.

+ +

2. Setting Up Spack for Group Use

+ +

The following instructions for a group installation of Spack assumes that a shared directory has already been created for your group, and that you have access to this shared folder. We also recommend communicating with your colleagues before proceeding.

+ +

A. Downloading Spack (Group)

+ +

First, log in to the HPC cluster, and navigate to your group’s shared directory in /home with

+ +
cd /home/groups/yourGroupName
+
+ +

where you should replace yourGroupName with your group’s name. Note this path for use throughout this guide, and communicate it to your group members for configuring their access to the installation.

+ +

You can then install Spack following its documentation. Download the Spack code from their GitHub repository:

+ +
git clone -c feature.manyFiles=true https://github.com/spack/spack.git
+
+ +

and then activate Spack by sourcing the setup script with the . command.

+ +
. spack/share/spack/setup-env.sh
+
+ +

That’s it! You can test that Spack has been installed by entering spack and you should see the help text print out. But before trying to install packages using your Spack installation, you should configure it to recognize the system installation of Spack.

+ +
+

This guide assumes that you ran the git clone command in your group’s home directory, i.e. /home/groups/yourGroupName. If you did not, then run the following command to obtain the full path to your Spack installation. We will refer to this path as the SpackRootPath and you will need to use this path where noted in the instructions below.

+ +
echo $SPACK_ROOT
+
+
+ +

B. Using Spack in Future Sessions (Group)

+ +

While Spack has been installed, for each session that you want to use it you will need to rerun the command

+ +
. /home/groups/yourGroupName/spack/share/spack/setup-env.sh
+
+ +

A more convenient option is simply to update your account to run this command whenever you log in. You and your group members should add the command to the end of the .bash_profile file in your respective home directories, e.g. nano ~/.bash_profile, with the full path to the file. For a group installation, the line should look like

+ +
. /home/groups/yourGroupName/spack/share/spack/setup-env.sh
+
+ +

where you need to replace yourGroupName with the name of your group.

+ +
+

If Spack was not installed in your group’s home directory, use the following command instead, where you will need to replace SpackRootPath with the path that you noted above.

+ +
. SpackRootPath/share/spack/setup-env.sh
+
+
+ +

C. Obtain the Provided Configuration Files (Group)

+ +

i. Copy the configuration files

+ +

To simplify the process of configuring your local installation of Spack, we have provided a folder with the necessary configuration files. All that you need to do is copy it to your home directory using the following command.

+ +
cp -R /software/chtc/el9/spack-user-config/ /home/groups/yourGroupName/.spack
+
+ +

where you need to replace yourGroupName with your group’s name.

+ +

ii. Updating location of configuration files

+ +

The group installation of Spack needs to be instructed on where to find these configuration files. You can do this by running the command

+ +
export SPACK_USER_CONFIG_PATH=/home/groups/yourGroupName/.spack
+
+ +

and Spack should now recognize the packages that are installed system-wide. You can confirm this with the command

+ +
spack find
+
+ +

This should show a list of packages similar to what you see when you run the module avail command.

+ +

To ensure that the configuration files are found in future terminal sessions, you and your group members need to edit your respective ~/.bash_profile files to include the above export command. That is, use a command-line text editor to open the file at ~/.bash_profile and add the following line to the end of the file:

+ +
export SPACK_USER_CONFIG_PATH=/home/groups/yourGroupName/.spack
+
+ +

iii. Updating location of module files

+ +

If you or someone in your group is interested in creating custom modules following the instructions in the guide Creating Custom Modules Using Spack, then you should update the location where the module files will be saved. You can update the location with the following commands

+ +
spack config add 'modules:default:roots:lmod:/home/groups/yourGroupName/spack_modules'
+spack config add 'modules:default:roots:tcl:/home/groups/yourGroupName/spack_modules'
+
+ +

where you replace yourGroupName with your group’s name.

+ +

You are now ready to use Spack for installing the packages that you need! See the instructions in Installing Software Using Spack.

+ +

3. Using a Shared Group Installation

+ +

Users who want to use a shared group installation of Spack, but who did not set up the installation, only need to modify their ~/.bash_profile file with instructions regarding the path to the shared group installation and its configuration files.

+ +
    +
  1. Log in to the HPC cluster (Connecting to CHTC). +
    ssh yourNetID@spark-login.chtc.wisc.edu
    +
    +
  2. +
  3. +

    Edit the .bash_profile file in your home directory (/home/yourNetID). +You should be able to simply add the following two lines to the end of the file

    + +
    . /home/groups/yourGroupName/spack/share/spack/setup-env.sh
    +export SPACK_USER_CONFIG_PATH=/home/groups/yourGroupName/.spack
    +
    + +

    where yourGroupName should be replaced with the name of your group. Confirm the exact commands with the user who installed Spack for your group.

    + +
    +

    You should be able to find the requisite paths if necessary. For the first line, the command

    +
    find /home/groups/yourGroupName -type d -name spack | grep "share/spack"
    +
    + +

    should give the path you need; simply add “setup-env.sh” to the end of the path. For the second line, the command

    +
    find /home/groups/yourGroupName -type d -name .spack | sort -n | head -n 1
    +
    + +

    should give the path you need. If it doesn’t, try again without | sort -n | head -n 1 to see the full list of matches, and choose the appropriate one.

    +
    +
  4. +
  5. Source the .bash_profile with +
    . ~/.bash_profile
    +
    + +

    or else close the terminal and log in again.

    +
  6. +
+ +

Once configured, you can follow the instructions in our guide Installing Software Using Spack to install or use already-installed packages in Spack.

+ +

A. Switching Between Spack Installations

+ +

You can easily switch between different Spack installations by creating scripts containing the commands listed in Step 2. above, and then sourcing the one that you want to use.

+ +

For example, let’s say you want to use a personal installation of Spack for an independent research project, but want to use a group installation of Spack as part of a collaboration. In that case, you would create two scripts, load-my-spack.sh and load-group-spack.sh, and save them to some central location like ~/bin. In each script, you provide the path to the setup-env.sh file and the .spack configuration directory for the respective Spack installations. The example contents of these scripts are provided below, where you should replace yourNetID with your NetID and yourGroupName with the group name of your collaboration.

+ +

load-my-spack.sh

+ +
. /home/yourNetID/spack/share/spack/setup-env.sh
+export SPACK_USER_CONFIG_PATH=/home/yourNetID/.spack
+
+ +

load-group-spack.sh

+ +
. /home/groups/yourGroupName/spack/share/spack/setup-env.sh
+export SPACK_USER_CONFIG_PATH=/home/groups/yourGroupName/.spack
+
+ +

To activate the your personal Spack installation, simply run

+ +
. path/to/load-my-spack.sh
+
+ +

To activate the group Spack installation, run

+ +
. path/to/load-group-spack.sh
+
+ +

For submitting jobs, remember to load the correct Spack installation at the start of the submission script.

+ + +
+
+ + + + +
+ + + + + + +
HPC Guides
+
+ + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + + + + + + + + + + +
+ +
+ + + +
+
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/hpc/guides.html b/preview-fall2024-info/uw-research-computing/hpc/guides.html new file mode 100644 index 000000000..d823c8a17 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/hpc/guides.html @@ -0,0 +1,506 @@ + + + + + + +HPC Computing Guides + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ HPC Computing Guides +

+ +

+Below is a list of guides for some of the most common tasks our users need to +carry out as they begin and continue to use the HPC resources at the CHTC. +

+ +

User Expectations

+ +Read through these user expectations and policies before using CHTC services. + + + + + + +

HPC Documentation

+ +
+ + + + + + + + +
+
+
+ + + + + + + +

Job Submission

+
+ + + Submitting and Managing Jobs Using SLURM + + +
+
+ + + + + + + + + + + + + + + + +
+ +

External Documentation

+ + + + +Icon Credits + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/htc-el8-to-el9-transition.html b/preview-fall2024-info/uw-research-computing/htc-el8-to-el9-transition.html new file mode 100644 index 000000000..918e10510 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/htc-el8-to-el9-transition.html @@ -0,0 +1,750 @@ + + + + + + +HTC System Transition to a New Linux Version (CentOS Stream 9) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ HTC System Transition to a New Linux Version (CentOS Stream 9) +

+ +

Starting in March 2024, CHTC’s high throughput computing (HTC) system began upgrading +the Linux distribution and version we use on our servers to CentOS Stream 9. This transition is expected to complete in May 2024.

+ +

Note that this page only applies to a transition on the HTC system (submitting jobs +with HTCondor). The high performance computing (HPC) cluster will be upgraded in +the near future as well and have a separate transition +page.

+ +

All updates to the HTC system will be reflected on this page; significant changes may +also include a notification to the chtc-users mailing list.

+ +

Important Dates

+ +

Note: By default, CHTC-managed submit servers automatically add a job +requirement that requires jobs to run on servers running our primary operating system unless otherwise specified by the user.

+ +
    +
  • April 2023: HTC system will support CentOS 7, CentOS Stream 8, and CentOS Stream 9. By default, +all jobs not using a software container will continue to match to servers running CentOS 8, however, +users should begin using software containers or testing jobs on servers running CentOS Stream 9.
  • +
  • May 2023: Default operating system requirements for jobs will change from CentOS 8 to +CentOS Stream 9.
  • +
+ +

What You Need to Do

+ +

If your jobs use containers (Docker, Singularity/Apptainer)

+ +

No action is needed for researchers already using a Docker or Singularity/Apptainer software containers in their jobs. Becuase software containers have a small operating system installed inside of them, these jobs carry everything they need with them and do not rely signifcantly on the host operating system. By default, your jobs will match to any operating system in the HTC pool, including the new CentOS Stream 9 hosts.

+ +

All other jobs (not using containers)

+ +

Researchers not already using a Docker or Apptainer software container will need to either

+ +
    +
  • (a) test their software/code on a CentOS Stream 9 machine to see their software needs to be reinstalled. See Transition to New Operating System.
  • +
  • or
  • +
  • (b) switch to using a software container (recommended). See the below for additional information.
  • +
+ +

If you would like to access as much computing capacity as possible, consider using a Docker or Apptainer software container for your jobs so that your jobs can match to a variety of operating systems. See the information below for detailed instructions on creating and using software containers.

+ +

Options For Transitioning Your Jobs

+ +
    +
  1. Use a Container (recommended)
  2. +
  3. Transition to New Operating System
  4. +
+ + + +

Using a software container to provide a base version of Linux will allow you to +run on any nodes in the HTC system regardless of the operating system it is running, and not limit you to a subset of nodes.

+ +

CHTC provides helpful information for learning about creating and using Docker and Apptainer software commands:

+ +

Apptainer

+ + +

Docker

+ + +

CHTC users are welcome to reach out to the Facilitation team via email or in office hours for help installing their software into a container.

+ +

Option 2: Transition to a New Operating System

+ +

At any time, you can require a specific operating system +version (or versions) for your jobs. Instructions for requesting a specific operating system(s) are outlined here:

+ + + +

This option is more limiting because +you are restricted to operating systems used by CHTC, and the number of nodes +running that operating system.

+ +

Researchers that do not wish to use containers for their job should test their jobs on the CentOS Stream 9 machines as soon as possible so that jobs are not significantly disrupted by this transition. We recommend:

+ +
    +
  • Test your jobs by requesting the new operating system (chtc_want_el9 = true ) for 1-2 test jobs. (Note that after May 1, this option will not be required as EL9 will be default.)
  • +
  • If needed, recompile your code.
  • +
+ +

If you are having trouble getting your jobs to run successfully on the new operating system, +please contact the facilitation team at chtc@cs.wisc.edu or come to office hours

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/htc-known-issues.html b/preview-fall2024-info/uw-research-computing/htc-known-issues.html new file mode 100644 index 000000000..656816f43 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/htc-known-issues.html @@ -0,0 +1,736 @@ + + + + + + +Known Issues on the HTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Known Issues on the HTC +

+ +

This page documents some common and known issues encountered on the HTC system. While this page can be beneficial in troubleshooting, it does not contain a comprehensive list of errors.

+ +

Visit our Get Help page to find more resources for troubleshooting.

+ + + +
+ +

[General] When submitting a job, it doesn't run / goes on hold and shows the error "Job credentials are not available".

+ +

Cause:

+

This is a complicated bug that can strike randomly. We’re working on a fix.

+

Solution:

+

To work around this issue, run the following command on the access point before resubmitting the job.

+
echo | condor_store_cred add-oauth -s scitokens -i -
+
+ +
+ +

[Container] When building an Apptainer, "apt" commands in the %post block fail to run.

+ +

Example error message:

+
Couldn't create temporary file /tmp/apt.conf.9vQdLs for passing config to apt-key
+
+

Cause:

+

The container needs global read/write permissions in order to update or install packages using the apt command.

+

Solution:

+

Add chmod 777 /tmp to the front of your %post block. See the example below:

+
Bootstrap: docker
+From: ubuntu:22.04
+
+%post
+    chmod 777 /tmp
+    apt-get update -y
+
+

We also recommend using the -y option to prevent installation from hanging due to interactive prompts.

+ +
+ +

[Container] When attempting to run a Docker container, it fails with the error message "[FATAL tini (7)] exec ./myExecutable.sh failed: Exec format error".

+ +

Cause:

+

The Docker container is likely built on an Apple computer using an ARM processor, which is incompatible with Linux machines.

+

Solution:

+

To resolve this, when building your Docker container, use the command:

+
docker build --platform linux/amd64 .
+
+ +
+ +

[GPU] My GPU job has been in the queue for a long period of time and is not starting.

+ +

Cause:

+

Jobs default to using CentOS9, but most GPU nodes are currently running CentOS8.

+

Solution:

+

To your submit file, add the following line and resubmit:

+
requirements = (OpSysMajorVer > 7)
+
+ +


+
+ +

Can’t find your issue?

+

Visit our Get Help page.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/htc-modules.html b/preview-fall2024-info/uw-research-computing/htc-modules.html new file mode 100644 index 000000000..eb88310f5 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/htc-modules.html @@ -0,0 +1,979 @@ + + + + + + +Use Software Available in Modules + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Use Software Available in Modules +

+ +

This guide describes when and how to use software, using MPI as an example, that is available as pre-installed modules on the HTC system.

+ +

To best understand the below information, users should already have an +understanding of:

+ +
    +
  • Using the command line to: navigate within directories, +create/copy/move/delete files and directories, and run their +intended programs (aka "executables").
  • +
  • The CHTC's Intro to Running HTCondor Jobs
  • +
+ +

Overview

+ +
    +
  1. General Software Policies
  2. +
  3. Using Pre-Installed Software in Modules
  4. +
  5. Installing Software on the HTC System
  6. +
  7. Example: Using an MPI module in HTC Jobs
  8. +
+ +

1. General Software Policies

+ +

In CHTC, we install a minimal set of software for use +on our systems. On the HTC System, CHTC staff manage installations of +the following types of programs:

+ +
    +
  • Compilation tools and common dependencies (e.g. MPI, different GCC versions)
  • +
  • Software that requires a shared license (e.g. COMSOL)
  • +
+ +

Information on how to access CHTC-managed installations is in the next +section of this guide.

+ +

2. Using Pre-Installed Software in Modules

+ +

All software on the HTC system that is installed by CHTC staff is available via +a tool called “modules”.

+ +

A. See Available Software Modules

+ +

There are two ways to search through the software modules on the HTC system:

+ +
    +
  1. View all modules + This command will show all software modules available: +
    [alice@submit]$ module avail
    +
    +
  2. +
  3. Search for specific modules + If you are searching for a specific software module, you can use the + module spider command with part of the software name. For example, to + search for Open MPI modules, you would type: +
    [alice@submit]$ module spider openmpi
    +
    +
  4. +
+ +

B. Load Software in Modules

+ +

Once you find a software module that you want to use, you need to “load” it +into your command line environment to make it active, filling in module_name with the name you found through one of the above steps.

+ +
[alice@submit]$ module load module_name
+
+ +
+

When to Load Modules

+ +

You can load modules to compile code (see below). If you do this, make sure to load +the same modules as part of your job script before running the main command.

+ +

You can also load modules to run specific software. If done for interactive +testing, this should be done in an interactive job; otherwise, the module +should be loaded in the job submit file.

+
+ +

C. Unload Software in Modules

+ +

If you no longer want to use a specific software installation, you can “unload” +the software module with the following command:

+ +
[alice@submit]$ module unload module_name
+
+ +

If you want to clear your command line environment and start over, run the following:

+ +
[alice@submit]$ module purge
+
+ +

3. Installing Software on the HTC System

+ +

A. Overview

+ +

Unless you are using a licensed software program provided via modules, you +are able to compile and install the software you need on the HTC System.

+ +

Compilation can be done via an interactive job as described in +our HTC Compiling or Testing Code with an Interactive Job guide. +If using CHTC’s provided compilation tools via modules, make +sure to load the needed modules before compiling and to load the same +modules in your job submission.

+ +

For groups that would like to share software installations among group +members, please contact us about getting a shared “group” directory.

+ +

If you are new to software installation, see the section below for +a more step-by-step description of the process.

+ +

B. Step by Step Process

+

The process for installing software is described in more detail in our Compiling or Testing Code with an Interactive Job

+
    +
  1. Start an Interactive Job - it is necessary to build software in an interactive job as noted in Compiling or Testing Code with an Interactive Job
  2. +
  3. Download Source Code - download the source code for your desired program. + You should only need the source code until the software is properly installed, but if desired, you may keep a zipped copy of + the source code in your workspace.
  4. +
  5. Read the Docs - try to find the installation instructions, either online or + in the downloaded source code. In particular, you’ll want to note if there are + any special requirements for dependencies like MPI or the compiler needed.
  6. +
  7. Load Modules - if you are using software modules to help you build your + code, load them now. Keep track of what you use so that you can load them + in your job submit file later. We also recommend doing a module purge before + loading your compiling modules to make sure you’re starting from a clean environment.
  8. +
  9. Install - most scientific software follows the three step installation process + of configure - make - make install. +
      +
    1. configure- this step checks for tools and requirements needed to compile + the code. This is the step where you set the final installation location of + a program. The option for setting this location is typically called the + “prefix”; a common syntax is: $ ./configure --prefix=/home/user. + This is where you will want to set the installation location to be your + /home directory.
    2. +
    3. make - this step compiles and links the code, turning it from human-readable + source code to compiled binary code. This is usually the most time consuming + step of the installation process.
    4. +
    5. make install - this step copies compiled files to the final installation location + (usually specified in the configure step).
    6. +
    +
  10. +
+ +

+

4. Example: Using an MPI module in HTC Jobs

+ +

Below is the process of setting up HTC jobs that use the MPI modules to run. This process can be modified for other software available in modules as well.

+ +

Before you begin, review our below discussion of MPI requirements and +use cases, to make sure that our multi-core MPI capabilities +are the right solution for your computing problem.

+ +

Once you know that you need to run multi-core jobs that use MPI on our +HTC system, you will need to do the following:

+ +
    +
  1. Compile your code using our MPI module system
  2. +
  3. Create a script to that loads the MPI module you used for +compiling, and then runs your code
  4. +
  5. Make sure your submit file has certain key requirements
  6. +
+ +

+ +

A. Requirements and Use Cases

+ +

Most jobs on CHTC's HTC system are run on one CPU (sometimes called a +"processor", or "core") and can be executed without any special +system libraries. However, in some cases, it may be advantageous to run +a single program on multiple CPUs (also called multi-core), in order to +speed up single computations that cannot be broken up and run as +independent jobs.

+ +

Running on multiple CPUs can be enabled by the parallel programming +standard MPI. For MPI jobs to compile and run, CHTC has a set of MPI +tools installed to a shared location that can be accessed via software +modules.

+ +

+ +

B. View MPI Modules on the HTC System

+ +

MPI tools are accessible on the HTC system through software "modules", +which are tools to access and activate a software installation. To see +which MPI packages are supported in the HTC, you can type the following +command from the submit server:

+ +
[alice@submit]$ module avail
+
+ +

Your software may require newer versions of MPI libraries than those +available via our modules. If this is the case, send an email to +chtc@cs.wisc.edu, to find out if we can install +that library into the module system.

+ +

C. Submitting MPI jobs

+ +

+ +

1. Compile MPI Code

+ +

You can compile your program by submitting an interactive build job to +one of our compiling servers. Do not compile code on the submit server, +as doing so may cause performance issues. The interactive job is +essentially a regular HTCondor job, but without an executable; you +are the one running the commands instead (in this case, to compile the +program).

+ +

Instructions for submitting an interactive build/compile job are +available on our interactive submission guide. +The only line in the submit file that you need to change is +transfer_input_files to reflect all the source files on which your +program depends. Otherwise, go through the steps described in that guide +until immediately after running condor_submit -i.

+ +

Once your interactive job begins on one of our compiling servers, you +can confirm which MPI modules are available to you by typing:

+ +
[alice@build]$ module avail
+
+ +

Choose the module you want to use and load it with the following +command:

+ +
[alice@build]$ module load mpi_module
+
+ +

where mpi_module is replaced with the name of the MPI module you'd +like to use.

+ +

After loading the module, compile your program. If your program is +organized in directories, make sure to create a tar.gz file of +anything you want copied back to the submit server. Once typing exit +the interactive job will end, and any *files* created during the +interactive job will be copied back to the submit location for you.

+ +

If your MPI program is especially large (more than 100 MB, compiled), or +if it can only run from the exact location to which it was installed, +you may also need to take advantage of CHTC's shared software location +or our public web proxy called Squid. Email CHTC's Research Computing +Facilitators at chtc@cs.wisc.edu if this is the case.

+ +

+ +

2. Script For Running MPI Jobs

+ +

To run your newly compiled program within a job, you need to write a +script that loads an MPI module and then runs the program, like so:

+ +
#!/bin/bash
+
+# The following three commands are **REQUIRED** to enable modules, and then to load the appropriate MP/MPI module
+export PATH
+. /etc/profile.d/modules.sh
+module load mpi_module
+
+# Untar your program installation, if necessary
+tar -xzf my_install.tar.gz
+
+# Command to run your OpenMP/MPI program
+# (This example uses mpirun, other programs
+# may use mpiexec, or other commands)
+mpirun -np 8 ./path/to/myprogram
+
+ +

Replace mpi_module with the name of the module you used to compile +your code, myprogram with the name of your program, and X with the +number of CPUs you want the program to use. There may be additional +options or flags necessary to run your particular program; make sure to +check the program's documentation about running multi-core processes.

+ +

+ +

3. Submit File Requirements

+ +

There are several important requirements to consider when writing a +submit file for multicore jobs. They are shown in the sample submit file +below and include:

+ +
    +
  • +

    Require access to MPI modules. To ensure that your job will have +access to CHTC software modules, including MPI modules, you must +include the following in your submit file.

    + +
    requirements = (HasChtcSoftware == true)
    +
    +
  • +
  • +

    The script you wrote above (shown as run_mpi.sh below) should be +your submit file "executable", and your compiled program and any +files should be listed in transfer_input_files.

    +
  • +
+ +

A sample submit file for multi-core jobs is given below:

+ +
# multicore.sub
+# A sample submit file for running a single multicore (8 cores) job
+executable = run_mpi.sh
+# arguments = (if you want to pass any to the shell script)
+
+## Specify the name of HTCondor's log, standard error, and standard out files
+log = mc_$(Cluster).log
+output = mc_$(Cluster).out
+error = mc_$(Cluster).err
+
+# Tell HTCondor how to handle input files
+should_transfer_files = YES
+transfer_input_files = (this should be a comma separate list of input files if needed)
+
+# Requirement for accessing new set of modules
+requirements = ( HasChtcSoftware == true ) 
+
+## Request resources needed by your job
+request_cpus = 8
+request_memory = 8GB
+request_disk = 2GB
+
+queue
+
+ +

After the submit file is complete, you can submit your jobs using +condor_submit.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/htc-overview.html b/preview-fall2024-info/uw-research-computing/htc-overview.html new file mode 100644 index 000000000..7f3e7c843 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/htc-overview.html @@ -0,0 +1,861 @@ + + + + + + +HTC System Overview + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ HTC System Overview +

+ + + + +

+

High-Throughput Computing at CHTC

+ +

The CHTC high-throughput computing (HTC) cluster provides support a variety of computational research tasks. The HTC system offers CPUs/GPUs, high-memory nodes, and other specialized hardware. Workflows that run well on this system include RNA/DNA sequencing, machine learning workflows, weather modeling, monte carlo simulations, etc.

+ +

To get access to the HTC System, please complete our +New User Consultation Form. After your request is received, +a Research Computing Facilitator will follow up to discuss the computational needs +of your research and connect you with computing +resources (including non-CHTC services) that best fit your needs.

+ +

+ +

HTC System User Policies

+ +

See our User Policies and Expectations for details on general CHTC policies.

+ +

+ +

HTC System Specific Policies

+ +

Below are some of the default limits on CHTC’s HTC system. Note that as a large-scale +computing center, we want you to be able to run at a large scale - often much larger +than these defaults. Please contact the facilitation team whenever you encounter one +of these limits so we can adjust your account settings or discuss alternative ways to +achieve your computing goals.

+ +
    +
  • Jobs with long runtimes. There is a default run limit of 72 +hours for each job queued in the HTC System, once it starts running. +Jobs longer than this will be placed in HTCondor’s “hold” state. +If your jobs will be longer, contact the CHTC facilitation team, and we’ll help you to determine the +best solution.
  • +
  • Submitting many jobs from one submit file. HTCondor is designed +to submit thousands (or more) jobs from one submit file. If you are +submitting over 10,000 jobs per submit file or want to queue +more than 50,000 total jobs as a single user, +please email us as we have strategies to +submit that many jobs in a way that will ensure you have as many +jobs running as possible without also compromising queue performance.
  • +
  • Submitting many short jobs from one submit file. While HTCondor +is designed to submit thousands of jobs at a time, many short jobs +can overwhelm the submit server, resulting in other jobs taking much +longer to start than usual. If you plan on submitting over +1000 jobs per submit file, we ask that you ensure each job has a +minimum run time of 5 minutes (on average).
  • +
  • The default disk quota is 20 GB in your /home directory, as a +starting point. You can track your use of disk space and your quota value, +using our Quota Guide. If you need more space +for concurrent work, please send an email to chtc@cs.wisc.edu.
  • +
  • Submitting jobs with "large" files: HTCondor's +normal file transfer mechanism ("transfer_input_files") is good for +files up to 100MB in size (or 500MB total, per job). For jobs with larger +files, please see our guide on File Availability +Options, and contact us to make arrangements.
  • +
+ +

+ +

HTC Hardware and Configuration

+ +

The HTC System consists of several submit servers and many compute (aka execute) +nodes. All users log in at a login node, and submit their workflow as HTCondor jobs that run on execute points.

+ +

+ +

HTC Operating System and Software

+ +

Submit servers in the HTC System are running CentOS 7 Linux.

+ +

Due to the distributed and independent nature of the HTC system’s execute points, there can be a variety of operating systems on the pool of execution point resources (especially for users that opt into running jobs on the globally available OSPool operated by the OSG). However, the default operating system is CentOS 8 Stream Linux unless users request to run on a different operating system using their HTCondor submit file.

+ +

The HTC system is a test bed for the HTCondor Software Suite, and thus is typically running the latest or soon-to-be-released versions of HTCondor.

+ +

To see more details of other software on the cluster, see our HTC Guides page.

+ +

+ +

HTC Submit Servers

+ +

There are multiple submit servers for the HTC system. The two most common submit servers are ap2001.chtc.wisc.edu and ap2002.chtc.wisc.edu (formerly submit1.chtc.wisc.edu and submit2.chtc.wisc.edu, respectively). All users will be notified what submit server they should log into when their account is created.

+ +

+ +

HTC Execute Nodes

+ +

Only execute nodes will be used for performing your computational work.

+ +

By default, when users submit HTCondor jobs, their jobs will only run on execute points owned and managed by CHTC staff. As of January 2024, there are approximately 40,000 CPU slots and 80+ GPU slots available in the CHTC execute pool.

+ +

Some users, particularly those requesting GPUs, may wish to access additional execute points so that they may have more jobs running simultantiously. HTC users can opt in to allowing their jobs to run on additional execute points not owned or managed by CHTC staff. There are two additional execute pools that users can opt into using: the UW Grid and the OSG’s OSPool. There are many advantages to opting into running on these execute pools, such as accessing more GPUs, accessing different computer architectures, and having more jobs running in parallel. However, because these machines are not managed by CHTC and thus are backfilling on hardware owned by other entities, it is recommended that users only opt into using these resources if they have short (<~10 hours), inturruptable jobs. For more information, see the Scaling Beyond Local HTC Capacity guide.

+ +

Fair Share Allocation

+ +

To promote fair access to HTC computing resources, all users are subject to a fair-share policy. This “fair-share” policy means that users who have run many jobs in the near-past will have a lower priority, and users with little recent activity will see their waiting jobs start sooner. +(The HTC system does not have a strict “first-in-first-out” queue policy.)

+ +

Resource requests will also impact the number of jobs a user has running. Smaller jobs (those requesting smaller amounts of CPUs, memory, and disk) as well as more flexible jobs (those requesting to use a variety of GPUs instead of a specific GPU type) are able to match to more execute points than larger, less flexible jobs. Thus, these jobs will start sooner and more jobs will run in parallel.

+ +

+

Data Storage and Management

+ +

Data space in the HTC system is not backed-up and should be +treated as temporary by users. Only files necessary for +actively-running jobs should be kept on the filesystem, and files +should be removed from the system when jobs complete. A primary copy of any +essential files (e.g. software, submit files, input) should be kept in an +alternate, non-CHTC storage location.

+ +

CHTC Staff reserve the right to remove any significant amounts of data +on the HTC System in our efforts to maintain filesystem performance +for all users.

+ +

+ +

Tools for Managing /home and /staging Space

+ +

+

Check /home Quota and Usage

+

To see what disk and items quotas are currently set for your /home direcotry, use the +quota -vs command. See the example below:

+ +
[alice@submit]$ quota -vs
+Disk quotas for user alice (uid 20384): 
+     Filesystem   space   quota   limit   grace   files   quota   limit   grace
+      /dev/sdb1  12690M  20480M  30720M            161k       0       0        
+
+ +

The output will list your total data usage under blocks on the /dev/sbd1 filesystem that manages user /home data:

+
    +
  • space (MB): the amount of disk space you are currently using
  • +
  • quota (MB): your soft quota. This is the value we recommend you consider to be your “quota”.
  • +
  • limit (MB): the hard limit or absolute maximum amount of space you can use. This value is almost always 10GB larger than your soft quota, and is only provided as a helpful spillover space. Once you hit this hard limit value, you and your jobs will no longer be allowed to save data.
  • +
  • files: the number of files in your /home directory. /home does not typically restrict the number of files a user can have, which is why there are no values for file quota and limit
  • +
+ +

Each of the disk space values are given in megabytes (MB), which can be converted to gigabytes (GB) by dividing by 1024.

+ +

+ +

Check /staging Quota and Usage

+ +

To see your /staging quota and usage, use the get_quotas <NetID> command. For example,

+
[NetID@ap2001 ~]$ get_quotas /staging/NetID
+
+ +

If the output of this command is blank, it means you do not have a /staging directory. Contact CHTC staff to request one at any time.

+ +

+ +

Alternative Commands to Check Quotas

+

Alternatively, the ncdu command can also be used to see how many +files and directories are contained in a given path:

+ +
[NetID@ap2001 ~]$ ncdu /home/NetID
+[NetID@ap2001 ~]$ ncdu /staging/NetID
+
+ +

When ncdu has finished running, the output will give you a total file +count and allow you to navigate between subdirectories for even more +details. Type q when you're ready to exit the output viewer. More +info here: https://lintut.com/ncdu-check-disk-usage/

+ +

+

Request a Quota Increase

+

Increased quotas on either of these locations are available upon email +request to chtc@cs.wisc.edu after a user has +cleared out old data and run relevant test jobs to inform the request. In your request, +please include both size (in GB) and file/directory counts. If you don't +know how many files your installation creates, because it's more than +the current items quota, simply indicate that in your request.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/htc-roadmap.md b/preview-fall2024-info/uw-research-computing/htc-roadmap.md new file mode 100644 index 000000000..7e76d65f2 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/htc-roadmap.md @@ -0,0 +1,154 @@ + + + +HTC Getting Started Summary +==================================== + +Step One + +
+Introduction to the High Throughput Computing Strategy +
+Like nearly all large-scale compute systems, users of both CHTC's High Throughput and High Performance systems prepare their computational work and submit them as tasks called "jobs" to run on execution points. +
+
+High Throughput Computing systems specialize in running many small, independent jobs (< ~20 CPUs/job). On the other hand, High Performance Computing systems speicalize in running a few, very large jobs that run on more than one node (~30+ CPUs/job). +
+
+It is best to keep this distinction in mind when setting up your jobs. On the HTC system, smaller jobs (i.e., those requesting smaller amounts of CPU, memory, and disk resources per job) are easier to find a slot to run on. This means that users will notice they will have jobs start quicker and will have more running simultanioutsly. It is almost always beneficial to break up your analysis pipeline into smaller pieces to take advantage of getting more jobs up and running, quicker. +
+
+Unlike the High Performance System, CHTC staff do not limit the number of jobs a user can have running in parallel, thus it is to your advantage to strategize your workflow to take advantage of as many resources as possible. +
+
+More detailed information regarding CHTC's HTC system can be found in the HTC Overview Guide. +
+ + +Step Two + +
+Log on to a HTC System Access Point +
+Once your request for an account has been approved by a Research Computing Facilitator, you will be emailed your login information. +
+
+For security purposes, every CHTC user is required to be connected to either a University of Wisconsin internet network or campus VPN and to use two-factor authentication when logging in to your CHTC "access point" (also called a "submit server"). +
+
+ + +Step Three + +
+Understand the Basics of Submitting HTCondor Jobs +
+Computational work is run on the CHTC's execution machines by submitting it as “jobs” to the HTCondor job scheduler. Before submitting your own computational work, it is necessary to understand how HTCondor job submission works. The following guide is a short step-by-step tutorial on how to submit basic HTCondor jobs: Practice: Submit HTC Jobs using HTCondor. It is highly recommended that every user follow this short tutorial as these are the steps you will need to know to complete your own analyses. +
+ + +Step Four + +
+Learn to Run Many HTCondor Jobs using one Submit File +
+After following this tutorial, we highly recommend users review the Easily Submit Multiple Jobs guide to learn how you can configure HTCondor to automatically pass files or parameters to different jobs, return output to specific directories, and other easily automated organizational behaviors. +
+
+ + +Step Five + +
+Access your Data on the HTC System +
+Upload your data to CHTC +
+When getting started on the HTC system, it is typically necessary to upload your data files to our system so that they can be used in jobs. For users that do not want to upload data to our system, it is possible to configure your HTCondor jobs to pull/push files using `s3` file transfer, or pull data using standard unix commands (`wget`). +
+
+To learn how to upload data from different sources, including your laptop, see: + +
+Choose a Location to Stage your Data +
+When uploading data to the HTC system, users need to choose a location to store that data on our system. There are two primary locations: `/home` and `/staging`. +
+`/home` is more efficient at handling "small" files, while `/staging` is more efficient at handling "large" files. For more information on what is considered "small" and "large" data files and to learn how to use files stored in these locations for jobs, visit our HTC Data guides. +
+
+ + +Step Six + +
+Install your Software +
+Our “Software Solutions” guides contain information about how to install and use software on the HTC system. +
+
+Software Containers +
+In general, we recommend installing your software into a "container" if your software relies on a specific version of R/Python, can be installed with `conda`, if your software has many dependencies, or if it already has a pre-existing container (which many common software packages do). There are many advantages to using a software container; one example is that software containers contain their own operating system. As a result, jobs with software containers have the most flexibility with where they run on CHTC or the OSPool. The CHTC website provides several guides on building, testing, and using software containers. +
+
+Use Pre-installed Software in Modules +
+CHTC's infrastructure team has provided a limited collection of software as modules, which users can load and then use in their jobs. This collection includes tools shared across domains, including COMSOL, ANSYS, ABAQUS, GUROBI, and others. To learn how to load these software into your jobs, our Use Software Available in Modules and Use Licensed Software guides. +
+
+Access Software Building Tools on CHTC's Software Building Machines +
+The HTC system contains several machines designed for users to use when building their software. These machines have access to common compilers (e.g., gcc) that are necessary to install many software packages. To learn how to submit an interactive job to log into these machines to build your software, see Compiling or Testing Code with an Interactive Job. +
+ + +Step Seven + +
+Run Test Jobs +
+Once you have your data, software, code, and HTCondor submit file prepared, you should submit several test jobs. The table created by HTCondor in the `.log` file will help you determine the amount of resources (CPUs/GPUs, memory, and disk) your job used, which is beneficial for understanding future job resource requests as well as troubleshooting. The `.out` file will contain all text your code printed to the terminal screen while running, while the `.err` file will contain any standard errors that your software printed out while running. +
+
+Things to look for: +
    +
  • Jobs being placed on hold (hold messages can be viewed using `condor_q jobID -hold`)
  • +
  • Jobs producing expected your desired files
  • +
  • Size and number of output files (to make sure output is being directed to the correct location and that your quota is sufficient for all of your output data as you submit more jobs)
  • +
+
+ + + Step Eight +
+ Submit Your Workflow +
+Once your jobs succeed and you have confirmed your quota is sufficient to store the files your job creates, you are ready to submit your full workflow. For researchers interested in queuing many jobs or accessing GPUs, we encourage you to consider accessing additional CPUs/GPUs outside of CHTC. Information is provided in the following step. +
+ + Step Nine +
+ Access Additional Compute Capacity +
+ Researchers with jobs that run for less than ~10 hours, use less than ~20GB of data per job, and do not require CHTC modules, can take advantage of additional CPUs/GPUs to run there jobs. These researchers can typically expect to have more jobs running simultaneously. +
+
+ To opt into using this additional capacity, your jobs will run on hardware that CHTC does not own. Instead, your jobs will "backfill" on resources owned by research groups, UW-Madison departments and organizations, and a national scale compute system: the OSG's Open Science Pool. This allows researchers to access capacity beyond what CHTC can provide. To learn how to take advantage of additional CPUs/GPUs, visit Scale Beyond Local HTC Capacity. +
+ +Step Ten +
+ Move Your Data off CHTC +
+ Data stored on CHTC systems is not backed up. While CHTC staff try to maintain a stable compute environment, it is possible for unexpected outages to occur that may impact your data on our system. We highly recommend all CHTC users maintain copies of important scripts and input files on another compute system (your laptop, lab server, ResearchDrive, etc.) throughout their analysis. Additionally, as you complete your analysis on CHTC servers, we highly recommend you move your data off our system to a backed up storage location. +
+
+ CHTC staff periodically delete data of users that have not logged in or submitted jobs in several months to clear up space for new users. Eventually, all users should expect their data to be deleted off CHTC servers and should plan accordingly. Data on CHTC is meant to be used for analyses actively being carried out - CHTC is not a long-term storage solution for your data storage needs. +
+ diff --git a/preview-fall2024-info/uw-research-computing/htc/guides.html b/preview-fall2024-info/uw-research-computing/htc/guides.html new file mode 100644 index 000000000..c199f99f4 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/htc/guides.html @@ -0,0 +1,632 @@ + + + + + + +HTC Computing Guides + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ HTC Computing Guides +

+ +

+Below is a list of guides for some of the most common tasks our users need to +carry out as they begin and continue to use the HTC resources at the CHTC. +

+ +

User Expectations

+ +Read through these user expectations and policies before using CHTC services. + + + + + + +

HTC Documentation

+ +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + +

Handling Data in Jobs

+
+ + + Transfer Small Input and Output + + + + Transfer Large Input Files Via Squid + + + + Use Large Input and Output Files Via Staging + + +
+
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + +

Troubleshooting

+
+ + + Windows / Linux Incompatibility + + + + Explore and Test Docker Containers + + + + Known Issues on the HTC + + +
+
+ + +
+ +Icon Credits + +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/htcondor-job-submission.html b/preview-fall2024-info/uw-research-computing/htcondor-job-submission.html new file mode 100644 index 000000000..437fdeaeb --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/htcondor-job-submission.html @@ -0,0 +1,882 @@ + + + + + + +Practice: Submit HTC Jobs using HTCondor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Practice: Submit HTC Jobs using HTCondor +

+ +

Purpose

+ +

This guide discusses how to run jobs on the CHTC using HTCondor.

+ +

Workflow Overview

+ +

The process of running computational workflows on CHTC resources follows the following outline:

+ +

+ +

Terminology:

+ +
    +
  • Access point is where you login and stage your data, executables/scripts, and software to use in jobs.
  • +
  • HTCondor is a job scheduling software that will run your jobs out on the execution points.
  • +
  • The Execution Points is the set of resources your job runs on. It is composed of servers, as well as other technologies, that compose the cpus, memory, and disk space that will run the computations of your jobs.
  • +
+ +

Run Jobs using HTCondor

+ +

We are going to run the traditional ‘hello world’ program with a CHTC twist. In order to demonstrate the distributed resource nature of CHTC’s HTC System, we will produce a ‘Hello CHTC’ message 3 times, where each message is produced within is its own ‘job’. Since you will not run execution commands yourself (HTCondor will do it for you), you need to tell HTCondor how to run the jobs for you in the form of a submit file, which describes the set of jobs.

+ +
+

Note: You must be logged into a CHTC Access Point for the following example to work.

+
+ +

Prepare job executable and submit file on an Access Point

+ +
    +
  1. +

    First, create the executable script you would like HTCondor to run. +For our example, copy the text below and paste it into a file called hello-world.sh (we recommend using a command line text editor) in your home directory.

    + +
    #!/bin/bash
    +#
    +# hello-world.sh
    +# My CHTC job
    +#
    +# print a 'hello' message to the job's terminal output:
    +echo "Hello CHTC from Job $1 running on `whoami`@`hostname`"
    +#
    +# keep this job running for a few minutes so you'll see it in the queue:
    +sleep 180
    +
    + +

    This script would be run locally on our terminal by typing hello-world.sh <FirstArgument>. +However, to run it on CHTC, we will use our HTCondor submit file to run the hello-world.sh executable and to automatically pass different arguments to our script.

    +
  2. +
  3. +

    Prepare your HTCondor submit file, which you will use to tell HTCondor what job to run and how to run it. +Copy the text below, and paste it into file called hello-world.sub. +This is the file you will submit to HTCondor to describe your jobs (known as the submit file).

    + +
    # hello-world.sub
    +# My HTCondor submit file
    +   
    +# Specify your executable (single binary or a script that runs several
    +#  commands) and arguments to be passed to jobs. 
    +#  $(Process) will be a integer number for each job, starting with "0"
    +#  and increasing for the relevant number of jobs.
    +executable = hello-world.sh
    +arguments = $(Process)
    +   		
    +# Specify the name of the log, standard error, and standard output (or "screen output") files. Wherever you see $(Cluster), HTCondor will insert the 
    +#  queue number assigned to this set of jobs at the time of submission.
    +log = hello-world_$(Cluster)_$(Process).log
    +error = hello-world_$(Cluster)_$(Process).err
    +output = hello-world_$(Cluster)_$(Process).out
    +   
    +# This line *would* be used if there were any other files
    +# needed for the executable to use.
    +# transfer_input_files = file1,/absolute/pathto/file2,etc
    +   
    +# Tell HTCondor requirements (e.g., operating system) your job needs, 
    +# what amount of compute resources each job will need on the computer where it runs.
    +request_cpus = 1
    +request_memory = 1GB
    +request_disk = 5GB
    +   
    +# Tell HTCondor to run 3 instances of our job:
    +queue 3
    +
    + +

    By using the “$1” variable in our hello-world.sh executable, we are telling HTCondor to fetch the value of the argument in the first position in the submit file and to insert it in location of “$1” in our executable file.

    + +

    Therefore, when HTCondor runs this executable, it will pass the $(Process) value for each job and hello-world.sh will insert that value for “$1” in hello-world.sh.

    + +

    More information on special variables like “$1”, “$2”, and “$@” can be found here.

    +
  4. +
  5. +

    Now, submit your job to HTCondor’s queue using condor_submit:

    + +
    [alice@ap2002]$ condor_submit hello-world.sub
    +
    + +

    The condor_submit command actually submits your jobs to HTCondor. If all goes well, you will see output from the condor_submit command that appears as:

    + +
    Submitting job(s)...
    +3 job(s) submitted to cluster 36062145.
    +
    +
  6. +
  7. +

    To check on the status of your jobs in the queue, run the following command:

    + +
    [alice@ap2002]$ condor_q
    +
    + +

    The output of condor_q should look like this:

    + +
    -- Schedd: ap2002.chtc.wisc.edu : <128.104.101.92:9618?... @ 04/14/23 15:35:17
    +OWNER     BATCH_NAME     SUBMITTED   DONE   RUN    IDLE  TOTAL JOB_IDS
    +Alice ID: 3606214       4/14 12:31      2     1       _      3 36062145.0-2
    +   
    +3 jobs; 2 completed, 0 removed, 0 idle, 1 running, 0 held, 0 suspended
    +
    + +

    You can run the condor_q command periodically to see the progress of your jobs. +By default, condor_q shows jobs grouped into batches by batch name (if provided), or executable name. +To show all of your jobs on individual lines, add the -nobatch option.

    +
  8. +
  9. +

    When your jobs complete after a few minutes, they’ll leave the queue. +If you do a listing of your /home directory with the command ls -l, you should see something like:

    + +
    [alice@submit]$ ls -l
    +total 28
    +-rw-r--r-- 1 alice alice    0 Apr  14 15:37 hello-world_36062145_0.err
    +-rw-r--r-- 1 alice alice   60 Apr  14 15:37 hello-world_36062145_0.out
    +-rw-r--r-- 1 alice alice    0 Apr  14 15:37 hello-world_36062145_0.log
    +-rw-r--r-- 1 alice alice    0 Apr  14 15:37 hello-world_36062145_1.err
    +-rw-r--r-- 1 alice alice   60 Apr  14 15:37 hello-world_36062145_1.out
    +-rw-r--r-- 1 alice alice    0 Apr  14 15:37 hello-world_36062145_1.log
    +-rw-r--r-- 1 alice alice    0 Apr  14 15:37 hello-world_36062145_2.err
    +-rw-r--r-- 1 alice alice   60 Apr  14 15:37 hello-world_36062145_2.out
    +-rw-r--r-- 1 alice alice    0 Apr  14 15:37 hello-world_36062145_2.log
    +-rw-rw-r-- 1 alice alice  241 Apr  14 15:33 hello-world.sh
    +-rw-rw-r-- 1 alice alice 1387 Apr  14 15:33 hello-world.sub
    +
    + +

    Useful information is provided in the user log, standard error, and standard output files.

    + +

    HTCondor creates a transaction log of everything that happens to your jobs. +Looking at the log file is very useful for debugging problems that may arise. +Additionally, at the completion of a job, the .log file will print a table describing the amount of compute resources requested in the submit file compared to the amount the job actually used. +An excerpt from hello-world_36062145_0.log produced due the submission of the 3 jobs will looks like this:

    + +
    …
    +005 (36062145.000.000) 2023-04-14 12:36:09 Job terminated.
    +	(1) Normal termination (return value 0)
    +		Usr 0 00:00:00, Sys 0 00:00:00  -  Run Remote Usage
    +		Usr 0 00:00:00, Sys 0 00:00:00  -  Run Local Usage
    +		Usr 0 00:00:00, Sys 0 00:00:00  -  Total Remote Usage
    +		Usr 0 00:00:00, Sys 0 00:00:00  -  Total Local Usage
    +	72  -  Run Bytes Sent By Job
    +	265  -  Run Bytes Received By Job
    +	72  -  Total Bytes Sent By Job
    +	265  -  Total Bytes Received By Job
    +	Partitionable Resources :    Usage  Request  Allocated 
    +	   Cpus                 :        0        1          1 
    +	   Disk (KB)            :      118     1024 1810509281 
    +	   Memory (MB)          :       54     1024       1024 
    +   
    +	Job terminated of its own accord at 2023-04-14T17:36:09Z with exit-code 0.
    +
    + +

    And, if you look at one of the output files, you should see something like this: +Hello CHTC from Job 0 running on alice@e389.chtc.wisc.edu.

    +
  10. +
+ +

Congratulations. You’ve run an HTCondor job!

+ +

Important Workflow Elements

+ +

A. Removing Jobs

+ +

To remove a specific job, use condor_rm <JobID, ClusterID, Username>. +Example:

+ +
[alice@ap2002]$ condor_rm 845638.0
+
+ +

B. Importance of Testing & Resource Optimization

+ +
    +
  1. +

    Examine Job Success Within the log file, you can see information about the completion of each job, including a system error code (as seen in “return value 0”). +You can use this code, as well as information in your “.err” file and other output files, to determine what issues your job(s) may have had, if any.

    +
  2. +
  3. +

    Improve Efficiency Researchers with input and output files greater than 1GB, should store them in their /staging directory instead of /home to improve file transfer efficiency. +See our data transfer guides to learn more.

    +
  4. +
  5. +

    Get the Right Resource Requests +Be sure to always add or modify the following lines in your submit files, as appropriate, and after running a few tests.

    + + +  +    +    +  +  +    +    +  +  +    +    +  +  +    +    +  +
    Submit file entryResources your jobs will run on
    request_cpus = cpusMatches each job to a computer "slot" with at least this many CPU cores.
    request_disk = kilobytesMatches each job to a slot with at least this much disk space, in units of KB.
    request_memory = megabytesMatches each job to a slot with at least this much memory (RAM), in units of MB.
    +
  6. +
  7. +

    Determining Memory and Disk Requirements. +The log file also indicates how much memory and disk each job used, so that you can first test a few jobs before submitting many more with more accurate request values. +When you request too little, your jobs will be terminated by HTCondor and set to “hold” status to flag that job as requiring your attention. +To learn more about why a job as gone on hold, use condor_q -hold. +When you request too much, your jobs may not match to as many available “slots” as they could otherwise, and your overall throughput will suffer.

    +
  8. +
+ +

You have the basics, now you are ready to run your OWN jobs!

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/index.html b/preview-fall2024-info/uw-research-computing/index.html new file mode 100644 index 000000000..f32c8a641 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/index.html @@ -0,0 +1,650 @@ + + + + + + +UW Research Computing Home + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+ +
+ Emile working in the server room +
+

Your Home for
Research Computing

+
+
+ + +
+

+ Join Hundreds of UW-Madison Researchers using CHTC Resources to Complete: +

+
+
+ + + + RNA/DNA Sequencing Analyses +
+
+ + + + Machine Learning Workflows +
+
+ + + + Economic Simulations & Predictions +
+
+ + + + Weather Modeling Analyses +
+
+ + + + Chemical Reaction Predictions +
+
+ + + + Computer Vision +
+
+ + + + Artificial Intelligence +
+
+ + + + Modeling & Decision Making +
+
+ + + + Forestry and Wildlife Analyses +
+
+ + + + And More! +
+
+
+ + + + + +
+

+ Research Transformed Annually +

+
+
+
+
+
+
+ + +
+

Who We Are

+

+ We are the University of Wisconsin-Madison's core computational service provider for large scale computing. + CHTC services are open to UW-Madison staff, students, faculty, and external collaborators. +

+ We offer both a High Throughput Computing system and a High Performance Computing cluster. + Access to CPUs/GPUs, high-memory servers, data storage capacity, as well as personalized consultations and classroom support, + are provided at no-cost. +

+
+ + +
+

Departments with Researchers using CHTC Services

+ +
+ + +
+

+ Impact Stories +

+
+ + + + + + + + + +
+ +
+
+ + + HTC Week 2024 Photos + + +
+ +

High Throughput Community Builds Stronger Ties at HTC24 Week

+
+
+ +
+ By: Jordan Sklar and Cristina Encarnacion +
+ +
+ Jul 17, 2024 +
+
+ +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/inter-submit.html b/preview-fall2024-info/uw-research-computing/inter-submit.html new file mode 100644 index 000000000..cff35826b --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/inter-submit.html @@ -0,0 +1,912 @@ + + + + + + +Compiling or Testing Code with an Interactive Job + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Compiling or Testing Code with an Interactive Job +

+ +

To best understand the below information, users should already have an +understanding of:

+ + + +

Overview

+ +

This guide provides a generic overview of steps required to install +scientific software for use in CHTC. If you are using Python, R, or +Matlab, see our specific installation and use guides here: Guides for +Matlab, Python and R.

+ +

It is helpful to understand a little bit about normal “batch” HTCondor jobs +before submitting interactive jobs. Just like batch jobs, interactive jobs +can transfer input files (usually copies of source code or the software you +want to install) and will transfer new/updated files in the main working directory +back to the submit node when the job completes.

+ +
+

One exception to the file transfers working as usual is when running an interactive +job that uses a Docker container. If any output files are generated inside an +interactive Docker job, they will not be copied back to the submit node when you +exist the interactive job. Contact the facilitation team for workarounds to this behavior.

+
+ +

+ +

1. Building a Software Installation

+ +

You are going to start an interactive job that runs on the HTC build +servers. You will then install your packages to a folder and zip those +files to return to the submit server.

+ +

+ +

A. Submit an Interactive Job

+ +

First, download the source code for your software to the submit server. +Then create the following special submit file on the submit server, +calling it something like build.sub.

+ +

Note that you’ll want to use +IsBuildJob = true to specifically match to CHTC’s servers designated for compiling code (which include Matlab compilers and other compiling tools you may need). Compiling servers do not include specialized resources like GPUs, extreme amounts of RAM/disk, etc.; to build/test software in these cases, submit an interactive job without +IsBuildJob.

+ +
# Software build file
+
+universe = vanilla
+log = interactive.log
+
+# In the latest version of HTCondor on CHTC, interactive jobs require an executable.
+# If you do not have an existing executable, use a generic linux command like hostname as shown below.
+executable = /usr/bin/hostname
+
+# change the name of the file to be the name of your source code
+transfer_input_files = source_code.tar.gz
+
++IsBuildJob = true
+# requirements = (OpSysMajorVer =?= 8)
+request_cpus = 1
+request_memory = 4GB
+request_disk = 2GB
+
+queue
+
+ +

The only thing you should need to change in the above file is the name +of the source code tar.gz file - in the "transfer_input_files" +line.

+ +

Once this submit file is created, you will start the interactive job by +running the following command:

+ +
[alice@submit]$ condor_submit -i build.sub
+
+ +

The interactive build job should start in about a minute. Once it has +started, the job has a time limit of four hours - if you need more time +to compile a particular code, talk to CHTC's Research Computing +Facilitators.

+ +

B. Install the Software

+ +

Software installation typically goes through a set of standard steps -- +configuration, then compilation (turning the source code into binary +code that the computer can understand), and finally "installation", +which means placing the compiled code into a specific location. In most +install instructions, these steps look something like:

+ +
./configure
+make
+make install
+
+ +

There are two changes we make to this standard process. Because you are +not an administrator, you will want to create a folder for the +installation in the build job's working directory and use an option in +the configuration step that will install the software to this folder.

+ +

In what follows, note that anything in italics is a name that you can +(and should!) choose to be more descriptive. We use general names as +an example; see the LAMMPS case study lower down to see what you might +fill in for your own program.

+ +
    +
  1. +

    In the interactive job, create a new directory to hold your final +software installation:

    + +
    [alice@build]$ mkdir program
    +
    +
  2. +
  3. +

    You'll also want to un-tar the source code that you brought along, +and cd into the source code folder.

    + +
    [alice@build]$ tar -xzf source_code.tar.gz
    +[alice@build]$ cd source_code/
    +
    +
  4. +
  5. +

    Our next step will be to configure the installation. This involves +changing into the un-tarred source code directory, and running a +configuration script. It's at this step that we change the final +installation location of the software from its default, to be the +directory we created in the previous step. In a typical configure +script, this option is called the "prefix" and is given by the +--prefix flag.

    + +
    [alice@build]$ ./configure --prefix=$_CONDOR_SCRATCH_DIR/program
    +
    + +

    Note that there are sometimes different options used. Some program +use a helper program called cmake as their configuration script. +Often the installation instructions for a program will indicate what +to use as a prefix option, or, you can often run the configure +command with the --help flag, which will have all the options +which can be added to the configure command.

    +
  6. +
  7. +

    After the configuration step, you'll run the steps to compile and +install your program. This is usually these two commands:

    + +
    [alice@build]$ make
    +[alice@build]$ make install
    +
    +
  8. +
  9. +

    After this step, you can cd back up to the main working directory.

    + +
    [alice@build]$ cd ..
    +
    +
  10. +
  11. +

    Right now, if we exit the interactive job, nothing will be +transferred back because we haven't created any new files in +the working directory, just the new sub-folder with our software +installation. In order to transfer back our installation, we will +need to compress it into a tarball file - not only will HTCondor +then transfer back the file, it is generally easier to transfer a +single, compressed tarball file than an uncompressed set of +directories.

    + +

    Run the following command to create your own tarball of your +packages:

    + +
    [alice@build]$ tar -czf program.tar.gz program/
    +
    +
  12. +
+ +

We now have our packages bundled and ready for CHTC! You can now exit +the interactive job and the tar.gz file with your software installation +will return to the submit server with you (this sometimes takes a few +extra seconds after exiting).

+ +
[alice@build]$ exit 
+
+ +

+ +

2. Case Study, Installing LAMMPS

+ +

First download a copy of LAMMPS and copy it to the submit server -- in +this example, we've used the "stable" version under "Download a +tarball": LAMMPS download +page

+ +

Then, make a copy of the submit file above on the submit server, +changing the name of the file to be transferred to +lammps-stable.tar.gz. Submit the interactive job as described.

+ +

While waiting for the interactive build job to start, take a look at the +installation instructions for LAMMPS:

+ + + +

You'll see that the install instructions have basically the same steps +as listed above, with two changes:

+ +
    +
  1. +

    Instead of the "configure" step, LAMMPS is using the "cmake" +command. This means that we'll need to find the equivalent to the +--prefix option for cmake. Reading further down in the +documentation, you can see that there's this option:

    + +
    -D CMAKE_INSTALL_PREFIX=path
    +
    + +

    This is exactly what we need to set the installation prefix.

    +
  2. +
  3. +

    There's extra steps before the configure step -- that's fine, +we'll just add them to our list of commands to run.

    +
  4. +
+ +

With all these pieces together, this is what the commands will look like +to install LAMMPS in the interactive build job and then bring the +installed copy back to the submit server.

+ +

Create the folder for the installation:

+ +
[alice@build]$ mkdir lammps
+
+ +

Unzip and cd into a build directory:

+ +
[alice@build]$ tar -xf lammps-stable.tar.gz
+[alice@build]$ cd lammps-stable
+[alice@build]$ mkdir build; cd build 
+
+ +

Run the installation commands:

+ +
[alice@build]$ cmake -D CMAKE_INSTALL_PREFIX=$_CONDOR_SCRATCH_DIR/lammps ../cmake 
+[alice@build]$ make
+[alice@build]$ make install 
+
+ +

Move back into the main job directory and create a tar.gz file of the +installation folder.

+ +
[alice@build]$ cd ../..
+[alice@build]$ tar -czf lammps.tar.gz lammps
+[alice@build]$ exit
+
+ + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/java-jobs.html b/preview-fall2024-info/uw-research-computing/java-jobs.html new file mode 100644 index 000000000..aedfcd5e6 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/java-jobs.html @@ -0,0 +1,786 @@ + + + + + + +Running Java Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Running Java Jobs +

+ +

Quickstart: Java

+ +

To use Java on the HTC system, we recommend that you use the Java Development Kit (JDK).

+ +
    +
  1. +

    Obtain a copy of the pre-compiled JDK for “Linux/x64” from https://jdk.java.net/.

    +
  2. +
  3. +

    Include the JDK .tar.gz file in your submit file with the list of files to be transferred:

    + +
    transfer_input_files = openjdk-22_linux-x64_bin.tar.gz, program.jar
    +
    +
  4. +
  5. +

    Include instructions for using the JDK in your executable file:

    + +
    #!/bin/bash
    +
    +tar -xzf openjdk-22_linux-x64_bin.tar.gz
    +export JAVA_HOME=$PWD/jdk-22
    +export PATH=$JAVA_HOME/bin:$PATH
    +
    +java -jar program.jar
    +
    +
  6. +
+ + + +

More information

+ +

To obtain your copy of the Java Development Kit (JDK), go to https://jdk.java.net/. +Click the link for the JDK that is “Ready for use”. +There will be a download link “tar.gz” under the “Builds” section for “Linux/x64”. +You can then either (a) right-click the download link and copy the link address, sign in to the submit server, and use the wget command with that link, +or (b) click the link to download to your computer, then manually upload the file from your computer to the submit server.

+ +

The example above uses file names for JDK 22 as of 2024-04. +Be sure to change the file names for the version that you actually use. +We recommend that you test and explore your setup using an interactive job.

+ +

Executable

+ +

A bash .sh file is used as the executable file in order to unpack and set up the JDK environment for use by your script. +Here is the executable from the section with comments:

+ +
#!/bin/bash
+
+# Decompress the JDK
+tar -xzf openjdk-22_linux-x64_bin.tar.gz
+
+# Add the new JDK folder to the bash environment
+export JAVA_HOME=$PWD/jdk-22
+export PATH=$JAVA_HOME/bin:$PATH
+
+# Run your program
+java -jar program.jar
+
+ + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/julia-jobs.html b/preview-fall2024-info/uw-research-computing/julia-jobs.html new file mode 100644 index 000000000..a9c69bd57 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/julia-jobs.html @@ -0,0 +1,1012 @@ + + + + + + +Running Julia Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Running Julia Jobs +

+ +

Quickstart: Julia

+ + + +

Build a container with Julia & packages installed inside:

+ +
    +
  1. How to build your own container
  2. +
  3. Example container recipes for Julia
  4. +
  5. Use your container in your HTC jobs
  6. +
+ +

Option B

+ +

Use a portable copy of Julia and create your own portable copy of your Julia packages:

+ +
    +
  1. Follow the instructions in our guide Run Julia Jobs.
  2. +
+ +
+

This approach may be sensitive to the operating system of the execution point. +We recommend building a container instead, but are keeping these instructions as a backup.

+
+ + + +

More information

+ +

No CHTC machine has Julia pre-installed, so you must configure a portable copy of Julia to work on the HTC system. +Using a container as described above is the easiest way to accomplish this.

+ +

Executable

+ +

When using a container, you can use a .jl script as the submit file executable, provided that the first line (the “shebang”) in the .jl file is

+ +
#!/usr/bin/env julia
+
+ +

with the rest of the file containing the commands you want to run using Julia.

+ +

Alternatively, you can use a bash .sh script as the submit file executable, and in that file you can use the julia command:

+ +
#!/bin/bash
+
+julia my-script.jl
+
+ +

In this case, remember to include your .jl file in the transfer_input_files line of your submit file.

+ +

Arguments

+ +

For more information on passing arguments to a Julia script, see the +Julia documentation.

+ +

Option B: Create your own portable copy

+ +

Use a portable copy of Julia and create your own portable copy of your Julia packages

+ +

This approach may be sensitive to the operating system of the execution point. We recommend building a container instead, but are keeping these instructions as a backup.

+ +
    +
  1. +

    Download the precompiled Julia software from https://julialang.org/downloads/. +You will need the 64-bit, tarball compiled for general use on a Linux x86 system. The +file name will resemble something like julia-#.#.#-linux-x86_64.tar.gz.

    + +
      +
    • Tip: use wget to download directly to your /home directory on the +submit server, OR use transfer_input_files = url in your HTCondor submit files.
    • +
    +
  2. +
  3. +

    Submit an “interactive build” job to create a Julia project and +install packages, else skip to the next step.

    + + +
  4. +
  5. +

    Submit a job that executes a Julia script using the Julia precompiled binary +with base Julia and Standard Library.

    + +
     #!/bin/bash
    +
    + # extract Julia binaries tarball
    + tar -xzf julia-#.#.#-linux-x86_64.tar.gz
    +
    + # add Julia binary to PATH
    + export PATH=$_CONDOR_SCRATCH_DIR/julia-#.#.#/bin:$PATH
    +
    + # run Julia script
    + julia my-script.jl
    +
    + +
      +
    • For more details on the job submission, see the section + below: Submit Julia Jobs
    • +
    +
  6. +
+ +

Install Julia Packages

+ +

If your work requires additional Julia packages, you will need to peform a one-time +installation of these packages within a Julia project. A copy of the project +can then be saved for use in subsequent job submissions. For more details, +please see Julia’s documentation at Julia Pkg.jl.

+ +

Create An Interactive Build Job Submit File

+ +

To install your Julia packages, first create an HTCondor submit for +submitting an “interactive build” job which is a job that will run +interactively on one of CHTC’s servers dedicated for building +(aka compiling) software.

+ +

Using a text editor, create the following file, which can be named build.sub

+ +
# Julia build job submit file
+
+universe = vanilla
+log = julia-build.log
+
+# In the latest version of HTCondor on CHTC, interactive jobs require an executable.
+# If you do not have an existing executable, use a generic linux command like hostname as shown below.
+executable = /usr/bin/hostname
+
+# have job transfer a copy of precompiled Julia software
+# be sure to match the name of the version 
+# that you have downloaded to your home directory
+transfer_input_files = julia-#.#.#-linux-x86_64.tar.gz
+
++IsBuildJob = true
+
+request_cpus = 1
+request_memory = 4GB
+request_disk = 2GB
+
+queue
+
+ +

The only thing you should need to change in the above file is the name +of the Julia tarball file in the "transfer_input_files" line.

+ +

Submit Your Interactive Build Job

+ +

Once this submit file is created, submit the job using the following command:

+ +
[alice@submit]$ condor_submit -i build.sub
+
+ +

It may take a few minutes for the build job to start.

+ +

Install Julia Packages Interactively

+ +

Once the interactive jobs starts you should see the following +inside the job’s working directory:

+ +
bash-4.2$ ls -F
+julia-#.#.#-linux-x86_64.tar.gz   tmp/    var/
+
+ +

Run the following commands +to extract the Julia software and add Julia to your PATH:

+ +
bash-4.2$ tar -xzf julia-#.#.#-linux-x86_64.tar.gz
+bash-4.2$ export PATH=$_CONDOR_SCRATCH_DIR/julia-#.#.#/bin:$PATH
+
+ +

After these steps, you should be able to run Julia from the command line, e.g.

+ +
julia --version
+
+ +

Now create a project directory to install your packages (we’ve called +it my-project/ below) and tell Julia its name:

+ +
bash-4.2$ mkdir my-project
+bash-4.2$ export JULIA_DEPOT_PATH=$PWD/my-project
+
+ +

You can choose whatever name to use for this directory -- if you have +different projects that you use for different jobs, you could +use a more descriptive name than “my-project”.

+ +

We will now use Julia to install any needed packages to the project directory +we created in the previous step.

+ +

Open Julia with the --project option set to the project directory:

+ +
bash-4.2$ julia --project=my-project
+
+ +

Once you’ve started up the Julia REPL (interpreter), start the Pkg REPL, used to +install packages, by typing ]. Then install and test packages by using +Julia’s add Package syntax.

+ +
               _
+   _       _ _(_)_     |  Documentation: https://docs.julialang.org
+  (_)     | (_) (_)    |
+   _ _   _| |_  __ _   |  Type "?" for help, "]?" for Pkg help.
+  | | | | | | |/ _` |  |
+  | | |_| | | | (_| |  |  Version 1.0.5 (2019-09-09)
+ _/ |\__'_|_|_|\__'_|  |  Official https://julialang.org/ release
+|__/                   |
+
+julia> ]
+(my-project) pkg> add Package
+(my-project) pkg> test Package
+
+ +

If you have multiple packages to install they can be combined +into a single command, e.g. (my-project) pkg> add Package1 Package2 Package3.

+ +

If you encounter issues getting packages to install successfully, please +contact us at chtc@cs.wisc.edu.

+ +

Once you are done, you can exit the Pkg REPL by typing the Delete key and then +exit()

+ +
(my-project) pkg> 
+julia> exit()
+
+ +

Save Installed Packages For Later Jobs

+ +

To use this project, and the associated installed packages, in +subsequent jobs, we need to have HTCondor return some files to +the submit server by converting the my-project/ directory +to a tarball, before exiting the interactive job session:

+ +
bash-4.2$ tar -czf my-project.tar.gz my-project/
+bash-4.2$ exit
+
+ +

After the job exits, you will be returned to your /home directory on the +submit server (specifically where ever you were located when you submitted +the interactive build job). A copy of packages.tar.gz will be present. Be +sure to check the size of the project tarball before proceeding to subsequent job +submissions. If the file is >100MB please contact us at chtc@cs.wisc.edu so +that we can get you setup with access to our SQUID web proxy. More details +are available on our SQUID guide: File Availability with SQUID

+ +
[alice@submit]$ ls 
+build.sub     julia-#.#.#-linux-x86_64.tar.gz   julia-build.log
+my-project.tar.gz
+[alice@submit]$ ls -sh my-project.tar.gz
+
+ +

Submit Julia Jobs

+ +

To submit a job that runs a Julia script, create a bash +script and HTCondor submit file following the examples in this section. +These examples assume that you have downloaded a copy of Julia for Linux as a tar.gz +file and if using packages, you have gone through the steps above to install them +and create an additional tar.gz file of the installed packages.

+ +

Create Executable Bash Script

+ +

Your job will use a bash script as the HTCondor executable. This script +will contain all the steps needed to unpack the Julia binaries and +execute your Julia script (script.jl). Below are two example bash script, +one which can be used to execute a script with base Julia, and one that +will use packages installed in Julia project (see Install Julia Packages).

+ +

Example Bash Script For Base Julia Only

+ +

If your Julia script can run without additional packages (other than base Julia and +the Julia Standard library) use the example script directly below.

+ +
#!/bin/bash
+
+# julia-job.sh
+
+# extract Julia tar.gz file
+tar -xzf julia-#.#.#-linux-x86_64.tar.gz
+
+# add Julia binary to PATH
+export PATH=$_CONDOR_SCRATCH_DIR/julia-#.#.#/bin:$PATH
+
+# run Julia script
+julia script.jl
+
+ +

Example Bash Script For Julia With Installed Packages

+ +
#!/bin/bash
+
+# julia-job.sh
+
+# extract Julia tar.gz file and project tar.gz file
+tar -xzf julia-#.#.#-linux-x86_64.tar.gz
+tar -xzf my-project.tar.gz
+
+# add Julia binary to PATH
+export PATH=$_CONDOR_SCRATCH_DIR/julia-#.#.#/bin:$PATH
+# add Julia packages to DEPOT variable
+export JULIA_DEPOT_PATH=$_CONDOR_SCRATCH_DIR/my-project
+
+# run Julia script
+julia --project=my-project script.jl
+
+ +

Create HTCondor Submit File

+ +

After creating a bash script to run Julia, then create a submit file +to submit the job to run.

+ +

More details about setting up a submit file, including a submit file template, +can be found in our hello world example page at Run Your First CHTC Jobs.

+ +
# julia-job.sub
+
+universe = vanilla
+
+log = job_$(Cluster).log
+error = job_$(Cluster)_$(Process).err
+output = job_$(Cluster)_$(Process).out
+
+executable = julia-job.sh
+
+should_transfer_files = YES
+when_to_transfer_output = ON_EXIT
+transfer_input_files = julia-#.#.#-linux-x86_64.tar.gz, script.jl
+
+request_cpus = 1
+request_memory = 2GB
+request_disk = 2GB
+
+queue 1
+
+ +

If your Julia script needs to use packages installed for a project, +be sure to include my-project.tar.gz as in input file in julia-job.sub. +For project tar.gz files that are <100MB, you can follow the below example:

+ +
transfer_input_files = julia-#.#.#-linux-x86_64.tar.gz, script.jl, my-project.tar.gz
+
+ +

For project tar.gz files that are larger than 100MB, email a facilitator about +using SQUID.

+ +

Modify the CPU/memory request lines to match what is needed by the job. +Test a few jobs for disk space/memory usage in order to make sure your +requests for a large batch are accurate! Disk space and memory usage can be found in the +log file after the job completes.

+ +

Submit Your Julia Job

+ +

Once you have created an executable bash script and submit file, you can +submit the job to run using the following command:

+ +
[alice@submit]$ condor_submit julia-job.sub
+
+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/licensed-software.html b/preview-fall2024-info/uw-research-computing/licensed-software.html new file mode 100644 index 000000000..d65bf59c9 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/licensed-software.html @@ -0,0 +1,883 @@ + + + + + + +Use Licensed Software + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Use Licensed Software +

+ +

This guide describes when and how to run jobs that use licensed software in +CHTC’s high throughput compute (HTC) system.

+ +

To best understand the below information, users should already have an +understanding of:

+ +
    +
  • Using the command line to: navigate within directories, +create/copy/move/delete files and directories, and run their +intended programs (aka "executables").
  • +
  • The CHTC's Intro to Running HTCondor Jobs
  • +
+ +

Overview

+ +

Once you know that you need to use a licensed software program on our +HTC system, you will need to do the following:

+ + + +

+ +

A. CHTC's Licensed Software Policies on the HTC System

+ +

Our typical practice for software support in CHTC is for users to +install and manage their own software installations. We have multiple +guides to help users with common software +programs and additional support is always +available through CHTC's research computing +facilitators.

+ +

However, certain software programs require paid licenses which can make +it challenging for individual users to install the software and use the +licenses correctly. As such, we provide support for software +installation and use on our high throughput system. Installation of +licensed programs is by request to and at the discretion of CHTC staff.

+ +

We always recommend using a free or open-source software alternative +whenever possible, as certain software licenses restrict the amount of +computing that can contribute to your research.

+ +

+ +

B. Viewing Licensed Software on the HTC System

+ +

Software with paid licenses that has been installed on the high +throughput (HTC) system is accessible through software "modules", +which are tools to access and activate a software installation. To see +which software programs are available on the HTC system, run the +following command on an HTC submit server:

+ +
[alice@submit]$ module avail
+
+ +
+

Note: you should never run a program directly on the submit server. +Jobs that use licensed software/modules should always be submitted as +HTCondor jobs as described below.

+
+ +

Note that not all software modules are available to all CHTC users. Some +programs like ansys have a campus or shared license which makes them +available to all CHTC users. Other software, like lumerical and +abaqus, is licensed to a specific group and is only available to +members of that group.

+ +

+ +

C. Submitting Jobs Using Licensed Software Modules

+ +

The following sections describe how to create a bash script executable +and HTCondor submit file to run jobs that use software accessible via +the modules.

+ +

+ +

1. Script For Running Jobs with Modules

+ +

To run a job that uses a licensed software installation on the HTC +system, you need to write a script that loads the software module and +then runs the program, like so:

+ +
#!/bin/bash
+
+# Commands to enable modules, and then load an appropriate software module
+export PATH
+. /etc/profile.d/modules.sh
+module load software
+
+# For Lumerical (the license requires a home directory)
+export HOME=$_CONDOR_SCRATCH_DIR
+
+# Command to run your software from the command line
+cmd -options input.file
+
+ +

Replace software with the name of the software module you want to use, +found via the module avail command described above. Replace +the final command with the syntax to run your software, with the +appropriate options.

+ +

For example, to run a Comsol job, the script might look like this:

+ +
#!/bin/bash
+
+export PATH
+. /etc/profile.d/modules.sh
+module load COMSOL/5.4
+
+comsol batch -inputfile test.mph -outputfile test-results.mph
+
+ +

+ +

2. Submit File Requirements

+ +

There are several important requirements to consider when writing a +submit file for jobs that use our licensed software modules. They are +shown in the sample submit file below and include:

+ +
    +
  • +

    Require access to the modules. To ensure that your job will have +access to CHTC software modules you must include the following in +your submit file.

    + +
    requirements = (HasChtcSoftware == true)
    +
    +
  • +
  • +

    Add a concurrency limit. For software with limited licenses, we have +implemented concurrency limits, which control the number of jobs running +at once in the HTC system. If your software is in the table below, use +the concurrency limit name in your submit file like this:

    + +

    concurrency_limits = LIMIT_NAME:num_of_licenses_used

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    SoftwareLimit NameLimit
    ABAQUSABAQUS50
    ANSYSANSYS_RESEARCH20
    COMSOL (Physics)COMSOL_PHYSICS2
    LumericalLUMERICAL3
    + +

    So if you were planning to run a job that used one ANSYS license, you would +use:

    + +
    concurrency_limits = ANSYS_RESEARCH:1
    +
    +
  • +
  • Request accurate CPUs and memory. Run at least one test job and +look at the log file produced by HTCondor to determine how much +memory and disk space your jobs actually use. We recommend +requesting the smallest number of CPUs where your job will finish in +1-2 days.
  • +
  • The script you wrote above (shown as run_job.sh below) should be +your submit file "executable", and any input files should be +listed in transfer_input_files.
  • +
+ +

A sample submit file is given below:

+ +
# software.sub
+# A sample submit file for running a single job using software modules
+
+universe = vanilla
+log = job_$(Cluster).log
+output = job_$(Cluster).out
+error = job_$(Cluster).err
+
+# the executable should be the script you wrote above
+executable = run_job.sh
+# arguments = (if you want to pass any to the shell script)
+should_transfer_files = YES
+when_to_transfer_output = ON_EXIT
+transfer_input_files = (this should be a comma separate list of input files if needed)
+
+# Requirement for accessing new set of software modules
+requirements = ( HasChtcSoftware == true ) 
+
+# If required, add the concurrency limit for your software and uncomment
+# concurrency_limits = LIMIT_NAME:num_of_licenses_used
+
+request_cpus = 1
+request_memory = 2GB
+request_disk = 2GB
+
+queue
+
+ +

After the submit file is complete, you can submit your jobs using +condor_submit.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/machine-learning-htc.html b/preview-fall2024-info/uw-research-computing/machine-learning-htc.html new file mode 100644 index 000000000..ef749f1c5 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/machine-learning-htc.html @@ -0,0 +1,780 @@ + + + + + + +Run Machine Learning Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Run Machine Learning Jobs +

+ +

This guide provides some of our recommendations for success +in running machine learning (specifically deep learning) jobs in CHTC.

+ +
+

This is a new how-to guide on the CHTC website. Recommendations and +feedback are welcome via email (chtc@cs.wisc.edu) or by creating an +issue on the CHTC website Github repository: Create an issue

+
+ +

Overview

+ +

It is important to understand the needs of a machine learning job before submitting +it and have a plan for managing software. This guide covers:

+ +
    +
  1. Considering job requirements
  2. +
  3. Recommendations for managing software
  4. +
+ +

1. Job Requirements

+ +

Before digging into the nuts and bolts of software installation in the next section, +it is important to first consider a few other job requirements that might apply to +your machine learning job.

+ +

A. Do you need GPUs?

+ +

CHTC has about 4 publicly available GPUs and thousands of CPUs. When possible, using +CPUs will allow your jobs to start more quickly and to have many running at once. For +certain calculations, GPUs may provide a different advantage as some machine learning +algorithms are optimized to run significantly faster on GPUs. Consider whether you +would benefit from running one or two long-running calculations on a GPU or if your +work is better suited to running many jobs on CHTC’s available CPUs.

+ +

If you need GPUs for your jobs, you can see a summary of available GPUs in CHTC and +how to access them here:

+ + + +

Note that you may need to use different versions of your software, depending on whether or +not you are using GPUs, as shown in the software section of this guide.

+ +

B. How big is your data?

+ +

CHTC’s usual data recommendations apply for machine learning jobs. If your job is using +an input data set larger than a few hundred MB or generating output files larger than +a few GB, you will likely need to use our large data +file share. Contact the CHTC Research Computing Facilitators to get access and +read about the large data location here:

+ + + +

C. How long does your job run?

+ +

CHTC’s default job length is 72 hours. If your task is long enough that you will +encounter this limit, contact the CHTC Research Computing Facilitators (chtc@cs.wisc.edu) +for potential work arounds.

+ +

D. How many jobs do you want to submit?

+ +

Do you have the ability to break your work into many independent pieces? If so, +you can take advantage of CHTC’s capability to run many independent jobs at once, +especially when each job is using a CPU. See our guide for running multiple jobs here:

+ + + +

2. Software Options

+ +

Many of the tools used for machine learning, specifically deep learning and +convolutional neural networks, have enough dependencies that our usual installation +processes work less reliably. The following options are the best way to handle the complexity +of these software tools.

+ +

Please be aware of which CUDA library version you are using to run your code.

+ +

A. Using Docker Containers

+ +

CHTC’s HTC system has the ability to run jobs using Docker containers, which package +up a whole system (and software) environment in a consistent, reproducible, portable +format. When possible, we recommend using standard, publicly available +Docker containers to run machine learning jobs in CHTC.

+ +

To see how you can use Docker containers to run jobs in CHTC, see:

+ + +

You can also test and examine containers on your own computer:

+ + +

Some machine learning frameworks publish ready-to-go Docker images:

+ + +

If you can not find a Docker container with exactly the tools you need, you can build your +own, starting with one of the containers above. For instructions on how to build and +test your own Docker container, see this guide:

+ + + +

B. Using Conda

+ +

The Python package manager conda is a popular tool for installing and +managing machine learning tools. +See this guide for information on how +to use conda to provide dependencies for CHTC jobs.

+ +

Note that when installing TensorFlow using conda, it is important to install +not the generic tensorflow package, but tensorflow-gpu. This ensures that +the installation will include the cudatoolkit and cudnn dependencies +required for TensorFlow ‘s GPU capability.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/matlab-jobs.html b/preview-fall2024-info/uw-research-computing/matlab-jobs.html new file mode 100644 index 000000000..a982e6e55 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/matlab-jobs.html @@ -0,0 +1,1056 @@ + + + + + + +Running Matlab Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Running Matlab Jobs +

+ +

Quickstart: Matlab

+ +

Build a container with Matlab & toolboxes installed inside:

+ +
    +
  1. How to build your own container
  2. +
  3. Example container recipes for Matlab
  4. +
  5. Use your container in your HTC jobs
  6. +
+ +
+

Note: Because Matlab is a licensed software, you must add the following line to your submit file:

+ +
concurrency_limits = MATLAB:1
+
+ +

Failure to do so may cause your or other users’ jobs to fail to obtain a license from the license server.

+
+ + + +

More information

+ +

CHTC has a site license for Matlab that allows for up to 10,000 jobs to run at any given time across all CHTC users. +Hence the requirement for adding the line concurrency_limits = MATLAB:1 to your submit files, so that HTCondor can keep track of which jobs are using or will use a license.

+ +

Following the instructions above, you are able to install a variety of Matlab Toolboxes when building the container. +The Toolboxes available for each supported version of Matlab are described here: https://github.com/mathworks-ref-arch/matlab-dockerfile/blob/main/mpm-input-files/. +Navigate to the text file for the version of interest, and look at the section named “INSTALL PRODUCTS”. +The example recipes linked above provide instructions on how to specify the packages you want to install when building the container.

+ +

Executable

+ +

When using the Matlab container, we recommend the following process for executing your Matlab commands in an HTCondor job:

+ +
    +
  1. +

    Put your Matlab commands in a .m script. For this example, we’ll call it my-script.m.

    +
  2. +
  3. +

    Create the file run-matlab.sh with the following contents:

    + +
    #!/bin/bash
    +   
    +matlab -batch "my-script"
    +
    + +

    Note that in the script, the .m extension has been dropped from the file name (uses "my-script" instead of "my-script.m").

    +
  4. +
  5. +

    In your submit file, set the .sh script as the executable and list the .m file to be transferred:

    + +
    executable = run-matlab.sh
    +transfer_input_files = my-script.m
    +
    +
  6. +
+ +

Arguments

+ +

You can pass arguments from your submit file to your Matlab code via your executable .sh and the matlab -batch command. +Arguments in your submit file are accessible inside your executable .sh script with the syntax ${n}, where n is the nth value passed in the arguments line. +You can use this syntax inside of the matlab -batch command.

+ +

For example, if your Matlab script (my-script.m) is expecting a variable foo, you can add foo=${1} before calling my-script:

+ +
#!/bin/bash
+
+matlab -batch "foo=${1};my-script"
+
+ +

This will use the first argument from the submit file to define the Matlab variable foo. +By default, such values are read in by Matlab as numeric values (or as a Matlab function/variable that evaluates to a numeric function). +If you want Matlab to read in the argument as a string, you need to add apostrophes around the value, like this:

+ +
#!/bin/bash
+
+matlab -batch "foo=${1};bar='${2}';my-script"
+
+ +

Here, the value of bar is defined as the second argument from the submit file, and will be identified by Matlab as a string because it’s wrapped in apostrophes ('${2}').

+ +

If you have defined your script to act as a function, you can call the function directly and pass the arguments directly as well. +For example, if you have constructed your my-script.m as a function, then you can do

+ +
#!/bin/bash
+
+matlab -batch "my-script(${1}, ${2})"
+
+ +

Again, by default Matlab will interpret these value of these variables as numeric values, unless you wrap the argument in apostrophes as described above.

+ + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/mpi-jobs.html b/preview-fall2024-info/uw-research-computing/mpi-jobs.html new file mode 100644 index 000000000..0cfbbe4d5 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/mpi-jobs.html @@ -0,0 +1,14 @@ + + +
+ +
diff --git a/preview-fall2024-info/uw-research-computing/multiple-job-dirs.html b/preview-fall2024-info/uw-research-computing/multiple-job-dirs.html new file mode 100644 index 000000000..675ea2f09 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/multiple-job-dirs.html @@ -0,0 +1,770 @@ + + + + + + +Submitting Multiple Jobs in Individual Directories + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Submitting Multiple Jobs in Individual Directories +

+ +

This guide demonstrates how to submit multiple jobs, using a specific +directory structure. It is relevant to:

+ +
    +
  • Researchers who have used CHTC's "ChtcRun" tools in the past
  • +
  • Anyone who wants to submit multiple jobs, where each job has its own +directory for input/output files on the submit server.
  • +
+ +

1. Software and Input Preparation

+ +

The first time you submit jobs, you will need to prepare a portable +version of your software and a script (what we call the job's +"executable") that runs your code. We have guides for preparing:

+ + + +

Choose the right guide for you and follow the directions for compiling +your code (Matlab) or building an installation (Python, R). Also follow +the instructions for writing a shell script that runs your program. +These are typically steps 1 and 2 of the above guides.

+ +

2. Directory Structure

+ +

Once you've prepared your code and script, create the same directory +structure that you would normally use with ChtcRun. For a single batch +of jobs, the directories will look like this:

+ +
project_name/
+    run_code.sh
+    shared/
+        scripts, code_package
+        shared_input
+    job1/
+        input/
+            job_input
+    job2/
+        input/
+            job_input
+    job3/
+        input/
+            job_input
+
+ +

You'll want to put all your code and files required for every job in +shared/ and individual input files in the individual job directories +in an input folder. In the submit file below, it matters that the +individual job directories start with the word "job".

+ +
+

Note: the job directories need to be hosted in your /home directory +on the submit node. The following instructions will not work for files +hosted on /staging!

+
+ +

3. Submit File

+ +
+

Note: if you are submitting more than 10,000 jobs at once, you'll +need to use a different submit file. Please email the CHTC Research +Computing Facilitators at chtc@cs.wisc.edu if this is the case!

+
+ +

Your submit file, which should go in your main project directory, should +look like this:

+ +
# Specify the HTCondor Universe (vanilla is the default and is used
+#  for almost all jobs), the desired name of the HTCondor log file,
+#  and the desired name of the standard error and standard output file.  
+universe = vanilla
+log = process.log
+error = process.err
+output = process.out
+#
+# Specify your executable (single binary or a script that runs several
+#  commands) and arguments
+executable = run_code.sh
+# arguments = arguments to your script go here
+#
+# Specify that HTCondor should transfer files to and from the
+#  computer where each job runs. 
+should_transfer_files = YES
+when_to_transfer_output = ON_EXIT
+# Set the submission directory for each job with the $(directory)
+# variable (set below in the queue statement).  Then transfer all 
+# files in the shared directory, and from the input folder in the 
+# submission directory
+initialdir = $(directory)
+transfer_input_files = ../shared/,input/
+#
+# Tell HTCondor what amount of compute resources
+#  each job will need on the computer where it runs.
+request_cpus = 1
+request_memory = 1GB
+request_disk = 1GB
+#
+# Create a job for each "job" directory.
+queue directory matching job*
+
+ +

You must change the name of the executable to your own script, and +in certain cases, add arguments.

+ +

Note that the final line matches the pattern of your directory names +created in the second step. You can use a different name for the +directories (like data or seed), but you should use whatever word +they share in the final queue statement in place of "job".

+ +

Jobs can then be submitted as described in our Introduction to HTC +Guide, using condor_submit.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/multiple-jobs.html b/preview-fall2024-info/uw-research-computing/multiple-jobs.html new file mode 100644 index 000000000..6761468cc --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/multiple-jobs.html @@ -0,0 +1,1086 @@ + + + + + + +Submitting Multiple Jobs Using HTCondor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Submitting Multiple Jobs Using HTCondor +

+ + + + +

Overview

+

HTCondor has several convenient features for streamlining high-throughput +job submission. This guide provides several examples +of how to leverage these features to submit multiple jobs with a +single submit file.

+ +

Why submit multiple jobs with a single submit file?

+ +

Users should submit multiple jobs using a single submit file, or where applicable, as few +separate submit files as needed. Using HTCondor multi-job submission features is more +efficient for users and will help ensure reliable operation of the the login nodes.

+ +

Many options exist for streamlining your submission of multiple jobs, +and this guide only covers a few examples of what is truly possible with +HTCondor. If you are interested in a particular approach that isn’t described here, +please contact CHTC’s research computing facilitators and we will +work with you to identify options to meet the needs of your work.

+ +
+

Before you continue reading: While HTCondor is designed to submit many jobs at a +time using a single submit file, the hardware of the submit server can be overwhelmed +if there are a significant number of jobs submitted at once or rapidly starting and finishing. +Therefore, plan ahead for the following to scenarios:

+ +

1) If you plan to submit 10,000+ jobs at a time, please let us + know, so we can provide options that will protect the queue’s performance. +2) If you plan to submit 1000+ jobs, please make sure that each job + has a minimum run time of 10 minutes (on average). If your calculations are shorter than + 10 minutes, then modify your workflow to run multiple calculations per job.

+ +
+ +

+

1. Submit Multiple Jobs Using queue

+ +

All HTCondor submit files require a queue attribute (which must also be +the last line of the submit file). By default, queue will submit one job, but +users can also configure the queue attribute to behave like a for loop +that will submit multiple jobs, with each job varying as predefined by the user.

+ +

Below are different HTCondor submit file examples for submitting batches of multiple +jobs and, where applicable, how to indicate the differences between jobs in a batch +with user-defined variables. Additional examples and use cases are provided further below:

+ +
    +
  1. queue <N> - will submit N number of jobs. Examples +include performing replications, where the same job must be repeated N number +of times, looping through files named with numbers, and looping through +a matrix where each job uses information from a specific row or column.
  2. +
  3. queue <var> from <list> - will loop through a +list of file names, parameters, etc. as defined in separate text file (i.e. **). +This `queue` option is very flexible and provides users with many options for +submitting multiple jobs.
  4. +
  5. Organizing Jobs Into Individual Directories - +another option that can be helpful in organizing multi-job submissions.
  6. +
+ +

These queue options are also described in the following video from HTCondor Week 2020: + + 2020 HTCondor Week Presentation

+ +

Submitting Multiple Jobs Using HTCondor Video

+ +

What makes these queue options powerful is the ability to use user-defined +variables to specify details about your jobs in the HTCondor submit file. The +examples below will include the use of $(variable_name) to specify details +like input file names, file locations (aka paths), etc. When selecting a +variable name, users must avoid bespoke HTCondor submit file variables +such as Cluster, Process, output, and input, arguments, etc.

+ +

2. Use queue N in your HTCondor submit files

+

+ +

When using queue N, HTCondor will submit a total of N +jobs, counting from 0 to N - 1 and each job will be assigned +a unique Process id number spanning this range of values. Because +the Process variable will be unique for each job, it can be used in +the submit file to indicate unique filenames and filepaths for each job.

+ +

The most straightforward example of using queue N is to submit +N number of identical jobs. The example shown below demonstrates +how to use the Cluster and Process variables to assign unique names +for the HTCondor error, output, and log files for each job in the batch:

+ +
# 100jobs.sub
+# submit 100 identical jobs
+
+log = job_$(Cluster)_$(Process).log
+error = job_$(Cluster)_$(Process).err
+output = job_$(Cluster)_$(Process).out
+
+... remaining submit details ...
+
+queue 100
+
+ +

For each job, the appropriate number, 0, 1, 2, ... 99 will replace $(Process). +$(Cluster) will be a unique number assigned to the entire 100 job batch. Each +time you run condor_submit job.sub, you will be provided +with the Cluster number which you will also see in the output produced by +the command condor_q.

+ +

If a uniquely named results file needs to be returned by each job, +$(Process) and $(Cluster) can also be used as arguments, and anywhere +else as needed, in the submit file:

+ +
arguments = $(Cluster)_$(Process).results
+
+... remaining submit details ...
+
+queue 100
+
+ +

Be sure to properly format the arguments statement according to the +executable used by the job.

+ +

What if my jobs are not identical? queue N may still be a great +option! Additional examples for using this option include:

+ +

+

2A. Use integer numbered input files

+ +
[user@login]$ ls *.data
+0.data   1.data   2.data   3.data
+...      97.data  98.data  99.data
+
+ +

In the submit file, use:

+ +
transfer_input_files = $(Process).data
+
+... remaining submit details ...
+
+queue 100
+
+ +

+

2B. Specify a row or column number for each job

+ +

$(Process) can be used to specify a unique row or column of information in a +matrix to be used by each job in the batch. The matrix needs to then be transferred +with each job as input. For exmaple:

+ +
transfer_input_files = matrix.csv
+arguments = $(Process)
+
+... remaining submit details ...
+
+queue 100
+
+ +

The above exmaples assumes that your job is set up to use an argument to +specify the row or column to be used by your software.

+ +

+

2C. Need N to start at 1

+ +

If your input files are numbered 1 - 100 instead of 0 - 99, or your matrix +row starts with 1 instead of 0, you can perform basic arithmetic in the submit +file:

+ +
plusone = $(Process) + 1
+NewProcess = $INT(plusone,%d)
+arguments = $(NewProcess)
+
+... remaining submit details ...
+
+queue 100
+
+ +

Then use $(NewProcess) anywhere in the submit file that you would +have otherwise used $(Process). Note that there is nothing special about the +names plusone and NewProcess, you can use any names you want as variables.

+ +

+

3. Submit multiple jobs with one or more distinct variables per job

+ +

Think about what’s different between each job that needs to be submitted. +Will each job use a different input file or combination of software parameters? Do +some of the jobs need more memory or disk space? Do you want to use a different +software or script on a common set of input files? Using queue <var> from <list> +in your submit files can make that possible! <var> can be a single user-defined +variable or comma-separated list of variables to be used anywhere in the submit file. +<list> is a plain text file that defines <var> for each individual job to be submitted in the batch.

+ +

Suppose you need to run a program called compare_states that will run on +on the following set of input files: illinois.data, nebraska.data, and +wisconsin.data and each input file can analyzed as a separate job.

+ +

To create a submit file that will submit all three jobs, first create a +text file that lists each .data file (one file per line). +This step can be performed directly on the login node, for example:

+ +
[user@state-analysis]$ ls *.data > states.txt
+[user@state-analysis]$ cat states.txt
+illinois.data
+nebraska.data
+wisconsin.data
+
+ +

Then, in the submit file, following the pattern queue <var> from <list>, +replace <var> with a variable name like state and replace <list> +with the list of .data files saved in states.txt:

+ +
queue state from states.txt
+
+ +

For each line in states.txt, HTCondor will submit a job and the variable +$(state) can be used anywhere in the submit file to represent the name of the .data file +to be used by that job. For the first job, $(state) will be illinois.data, for the +second job $(state) will be nebraska.data, and so on. For example:

+ +
# run_compare_states_per_state.sub
+
+transfer_input_files = $(state)
+arguments = $(state)
+executable = compare_states
+
+... remaining submit details ...
+
+queue state from states.txt
+
+ +

+

3A. Use multiple variables for each job

+ +

Let’s imagine that each state .data file contains data spanning several +years and that each job needs to analyze a specific year of data. Then +the states.txt file can be modified to specify this information:

+ +
[user@state-analysis]$ cat states.txt
+illinois.data, 1995
+illinois.data, 2005
+nebraska.data, 1999
+nebraska.data, 2005
+wisconsin.data, 2000
+wisconsin.data, 2015
+
+ +

Then modify the queue to define two <var> named state and year:

+ +
queue state,year from states.txt
+
+ +

Then the variables $(state) and $(year) can be used in the submit file:

+ +
# run_compare_states_by_year.sub
+arguments = $(state) $(year)
+transfer_input_files = $(state)
+executable = compare_states
+
+... remaining submit details ...
+
+queue state,year from states.txt
+
+ +

+

4. Organizing Jobs Into Individual Directories

+ +

+

4A. Submitting Multiple Jobs in Different Directories with queue <variable> from list

+ +

One way to organize jobs is to assign each job to its own directory, +instead of putting files in the same directory with unique names. To +continue our "compare_states" example, suppose there's a directory +for each state you want to analyze, and each of those directories has +its own input file named input.data:

+ +
[user@state-analysis]$ ls -F
+compare_states  illinois/  nebraska/  wisconsin/
+
+[user@state-analysis]$ ls -F illinois/
+input.data
+
+[user@state-analysis]$ ls -F nebraska/
+input.data
+
+[user@state-analysis]$ ls -F wisconsin/
+input.data
+
+ +

The HTCondor submit file attribute initialdir can be used +to define a specific directory from which each job in the batch will be +submitted. The default initialdir location is the directory from which the +command condor_submit myjob.sub is executed.

+ +

Combining queue var from list with initiadir, each line of ** will include +the path to each state directory and `initialdir` set to this path for +each job:

+ +
#state-per-dir-job.sub
+initialdir = $(state_dir)
+transfer_input_files = input.data	
+executable = compare_states
+
+... remaining submit details ...
+
+queue state_dir from state-dirs.txt
+
+ +

Where state-dirs.txt is a list of each directory with state data:

+ +
[user@state-analysis]$ cat state-dirs.txt
+illinois
+nebraska
+wisconsin
+
+ +

Notice that executable = compare_states has remained unchanged in the above example. +When using initialdir, only the input and output file path (including the HTCondor log, error, and +output files) will be changed by initialdir.

+ +

In this example, HTCondor will create a job for each directory in state-dirs.txt and use +that state's directory as the initialdir from which the job will be submitted. +Therefore, transfer_input_files = input.data can be used without specifying +the path to this input.data file. Any output generated by the job will then be returned to the initialdir +location.

+ +

+

4B. Submitting Multiple Jobs in Different Directories with queue <directory> matching *

+ +

This section demonstrates how to submit multiple jobs, using a specific +directory structure where folder names have a string of text in common. It is relevant to anyone who wants to submit multiple jobs, where each job has its own directory for input/output files on the submit server.

+ +

Directory Structure +For a single batch of jobs, the directories will look like this:

+ +
project_name/
+    run_code.sh
+    submit.sub
+    shared/
+        scripts, code_package
+        shared_input
+    job1/
+        input/
+            job_input
+    job2/
+        input/
+            job_input
+    job3/
+        input/
+            job_input
+
+ +

You'll want to put all your code and files required for every job in +shared/ and individual input files in the individual job directories +in an input folder. In the submit file below, it matters that the +individual job directories start with the word "job". Your directories should all have a string of text in common, so that you can use the queue <directory> matching <commonString>* syntax to queue a job for each directory.

+ +
+

Note: the job directories need to be hosted in your /home directory +on the submit node. The following instructions will not work for files +hosted on /staging!

+
+ +

Submit File +Your submit file, which should go in your main project directory, should +look like this:

+ +
# Specify your executable (single binary or a script that runs several
+#  commands) and arguments
+executable = run_code.sh
+# arguments = arguments to your script go here
+#
+# Specify the desired name of the HTCondor log file,
+#  and the desired name of the standard error and standard output file.  
+log = process.log
+error = process.err
+output = process.out
+#
+# Specify that HTCondor should transfer files to and from the
+#  computer where each job runs. 
+should_transfer_files = YES
+# Set the submission directory for each job with the $(directory)
+# variable (set below in the queue statement).  Then transfer all 
+# files in the shared directory, and from the input folder in the 
+# submission directory
+initialdir = $(directory)
+transfer_input_files = ../shared/,input/
+#
+# Tell HTCondor what amount of compute resources
+#  each job will need on the computer where it runs.
+request_cpus = 1
+request_memory = 1GB
+request_disk = 1GB
+#
+# Create a job for each "job" directory.
+queue directory matching job*
+
+ +

Note that the final line matches the pattern of your directory names that you previously +created. You can use a different name for the +directories (like data, sample, or seed), but you should use whatever word +the directories have in common in the final queue statement in place of "job".

+ +

Jobs can then be submitted using condor_submit.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/noShow-use-submit-node.html b/preview-fall2024-info/uw-research-computing/noShow-use-submit-node.html new file mode 100644 index 000000000..1a894cbd4 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/noShow-use-submit-node.html @@ -0,0 +1,726 @@ + + + + + + +Policies for Using HTC Submit Servers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Policies for Using HTC Submit Servers +

+ +

If you have not yet requested a CHTC account and met with a Research +Computing Facilitator, please fill out this form.

+ +

When your account was created, a member of our team emailed you with the +server address you should use for logging in. See our +connecting guide for additional information.

+ +

Access to other submit servers is granted for +specific purposes and will have been indicated to you by CHTC staff.

+ +

Connecting to HTC Submit Servers

+ +

Once your account is active, you can connect to your designated submit +server with an SSH connection ("Putty" for Windows, putty.org; +"Terminal" for Linux/Max). Note, however, that our submit servers only +accept connections from campus networks. Off-campus connections have +been disabled for security purposes. If you need to connect from off +campus, you can first SSH to a computer in your department, and then SSH +to our submit server. You may also be able to use a Virtual Private +Network (VPN) to join the campus network when working off-campus. DoIT +provides information on using a +VPN.

+ +

For more detailed information on connecting to CHTC services, both +logging in and transferring files, see our Connecting to CHTC +guide.

+ +

General User Policies

+ +

See our User Policies and Expectations for details on general CHTC policies.

+ +

HTC System Specific Limits

+ +

Below are some of the default limits on CHTC’s HTC system. Note that as a large-scale +computing center, we want you to be able to run at a large scale - often much larger +than these defaults. Please contact the facilitation team whenever you encounter one +of these limits so we can adjust your account settings or discuss alternative ways to +achieve your computing goals.

+ +
    +
  • Jobs with long runtimes. There is a default run limit of 72 +hours for each job queued in the HTC System, once it starts running. +Jobs longer than this will be placed in HTCondor's "hold" state. +If your jobs will be longer, please email +us, and we'll help you to determine the +best solution.
  • +
  • Submitting many jobs from one submit file. HTCondor is designed +to submit thousands (or more) jobs from one submit file. If you are +submitting over 10,000 jobs per submit file or want to queue +more than 50,000 total jobs as a single user, +please email us as we have strategies to +submit that many jobs in a way that will ensure you have as many +jobs running as possible without also compromising queue performance.
  • +
  • Submitting many short jobs from one submit file. While HTCondor +is designed to submit thousands of jobs at a time, many short jobs +can overwhelm the submit server, resulting in other jobs taking much +longer to start than usual. If you plan on submitting over +1000 jobs per submit file, we ask that you ensure each job has a +minimum run time of 5 minutes (on average).
  • +
  • The default disk quota is 20 GB in your /home directory, as a +starting point. You can track your use of disk space and your quota value, +using our Quota Guide. If you need more space +for concurrent work, please see our Request a Quota Change +guide.
  • +
  • Submitting jobs with "large" files: HTCondor's +normal file transfer mechanism ("transfer_input_files") is good for +files up to 100MB in size (or 500MB total, per job). For jobs with larger +files, please see our guide on File Availability +Options, and contact us to make arrangements.
  • +
+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/os-request-htc.html b/preview-fall2024-info/uw-research-computing/os-request-htc.html new file mode 100644 index 000000000..ce1b22761 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/os-request-htc.html @@ -0,0 +1,738 @@ + + + + + + +Use Custom Linux Versions in CHTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Use Custom Linux Versions in CHTC +

+ +

By default, CHTC-managed submit servers automatically add a job +requirement that requires jobs to run on servers running our primary operating system unless otherwise specified by the user. There are two options to override this +default:

+ +
    +
  1. Using a Container (recommended)
  2. +
  3. Requesting a Specific +Operating System.
  4. +
+ + + +

Using a container to provide a base version of Linux will allow you to +run on any nodes in the HTC system, and not limit you to a subset of nodes.

+ +

After finding a container with the desired version of Linux, just follow our instructions +for Docker or Singularity/Apptainer jobs.

+ +

Note that the default Linux containers on Docker Hub are often missing commonly installed +packages. Our collaborators in OSG Services maintain a few curated containers with a +greater selection of installed tools that +can be seen here: Base Linux Containers

+ +

Option 2: Requesting a Specific Operating System

+ +

At any time, you can require a specific operating system +version (or versions) for your jobs. This option is more limiting because +you are restricted to operating systems used by CHTC, and the number of nodes +running that operating system.

+ +

Require CentOS Stream 8 (previous default) or CentOS Stream 9

+ +

To request that your jobs run on servers with CentOS 8 only, add the +following line to your submit file:

+ +
chtc_want_el8 = true
+
+ +

To request that your jobs run on servers with CentOS 9 only, add +the following line to your submit file:

+ +
chtc_want_el9 = true 
+
+ +
+

Note that after May 1, 2024, CentOS9 will be the default and CentOS8 will be phased out +by the middle of summer 2024. If you think your code relies on CentOS8, make sure to +see our transition guide or talk to the facilitation +team about a long-term strategy for running your work.

+
+ +

Use Both CentOS Stream 8 (previous default) and CentOS Stream 9 (current default)

+ +

To request that your jobs run on computers running either version of +CentOS Linux, add the following requirements line to your submit file:

+ +
requirements = (OpSysMajorVer == 8) || (OpSysMajorVer == 9)
+
+
+

Note: these requirements are not necessary for jobs that use Docker containers; +these jobs will run on servers with any operating system automatically.

+
+ +

The advantage of this option is that you may be able to access a +larger number of computers in CHTC. Note that code compiled on a +newer version of Linux may not run older versions of Linux. Make +sure to test your jobs specifically on both CentOS Stream 8 and CentOS Stream 9 +before using the option above.

+ +

Does your job already have a requirements statement? If so, you can +add the requirements above to the pre-existing requirements by using +the characters &&. For example, if your jobs already require large +data staging:

+ +
requirements = (Target.HasCHTCStaging == true) 
+
+ +

You can add the requirements for using both operating system versions like so:

+ +
requirements = (Target.HasCHTCStaging == true) && ((OpSysMajorVer == 8) || (OpSysMajorVer == 9))
+
+ + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/osdf-fileXfer-draft.html b/preview-fall2024-info/uw-research-computing/osdf-fileXfer-draft.html new file mode 100644 index 000000000..cc56041e5 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/osdf-fileXfer-draft.html @@ -0,0 +1,719 @@ + + + + + + +HTC Data Storage Locations + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ HTC Data Storage Locations +

+ +

[toc]

+ +

Data Storage Locations

+

The HTC system has two primary locations where users can store files: /home and /staging.

+ +

The mechanisms behind /home and /staging that manage data are different and are optimized to handle different file sizes. /home is more efficient at managing small files, while /staging is more efficient at managing larger files. It’s important to place your files in the correct location, as it will improve the speed and efficiency at which your data is handled and will help maintain the stability of the HTC filesystem.

+ +

Understand your file sizes

+

To know whether a file should be placed in /home or in /staging, you will need to know it’s file size (also known as the amount of “disk space” a file uses). There are many commands to print out your file sizes, but here are a few of our favorite:

+ +

Use ls with -lh flags

+

The command ls stands for “list” and, by default, lists the files in your current directory. The flag -l stands for “long” and -h stands for “human-readable”. When the flags are combined and passed to the ls command, it prints out the long metadata associated with the files and converts values such as file sizes into human-readable formats (instead of a computer readable format).

+ +
NetID@submit$ ls -lh
+
+ +

Use du -h

+

Similar to ls -lh, du -h prints out the “disk usage” of directories in a human-readable format.

+ +
NetID@submit$ du -h
+
+ +

Transferring Data to Jobs

+

The HTCondor submit file transfer_input_files = line should always be used to tell HTCondor what files to transfer to each job, regardless of if that file is origionating from your /home or /staging directory. However, the syntax you use to tell HTCondor to fetch files from /home and /staging and transfer to your running job will change:

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Input SizesFile LocationSubmit File Syntax to Transfer to Jobs
0-500 MB/hometransfer_input_files = input.txt
500-10GB/stagingtransfer_input_files = **osdf:///chtc/staging/NetID/input.txt
10GB +/stagingtransfer_input_files = **file:///staging/NetID/input.txt
+ +

Transfer Data Back from Jobs to /home or /staging

+ +

When a job completes, by default, HTCondor will return newly created or edited files on the top level directory back to your /home directory.

+ +

To transfer files or folders back to /staging, in your HTCondor submit file, use +transfer_output_remaps = “output1.txt = file:///staging/NetID/output1.txt”, where output1.txt is the name of the output file or folder you would like transfered back to a /staging directory.

+ +

If you have more than one file or folder to transfer back to /staging, use a semicolon (;) to seperate multiple files for HTCondor to transfer back like so: +transfer_output_remaps = “output1.txt = file:///staging/NetID/output1.txt; output2.txt = file:///staging/NetID/output2.txt”

+ +

Make sure to only include one set of quotation marks that wraps around the information you are feeding to transfer_output_remaps =.

+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/other-resources.html b/preview-fall2024-info/uw-research-computing/other-resources.html new file mode 100644 index 000000000..0f1cdb099 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/other-resources.html @@ -0,0 +1,376 @@ + + + + + + +Other Resources + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ Other Resources +

+ +
+
+
+ +
+
+
+ + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/python-jobs.html b/preview-fall2024-info/uw-research-computing/python-jobs.html new file mode 100644 index 000000000..083e02307 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/python-jobs.html @@ -0,0 +1,1108 @@ + + + + + + +Running Python Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Running Python Jobs +

+ +

Quickstart: Python

+ + + +

Build a container with Python & packages installed inside:

+ +
    +
  1. How to build your own container
  2. +
  3. Example container recipes for Python
  4. +
  5. Use your container in your HTC jobs
  6. +
+ +

Option B

+ +

Use an existing container with a base installation of Python:

+ +
    +
  1. Choose an existing container. See +OSG Base Containers +or +DockerHub Python Containers.
  2. +
  3. Use the container in your HTC jobs
  4. +
+ + + +

More information

+ +

All CHTC machines have a base installation of Python 3. +The exact versions and packages installed, however, can vary from machine to machine. +You should be able to include simple python commands in your calculations, i.e., python3 simple-script.py.

+ +

If you need a specific version of Python 3 or would like to install your own packages, we recommend that you use a container as described above.

+ +

The example recipes provided above for building your own container are intended for python packages that can be installed using python3 -m pip install. +Additional software can be installed when building your own container.

+ +

For packages that need to be installed with conda install, see the guide on Conda.

+ +

Executable

+ +

When using a container, you can use a python .py script as the submit file executable, provided that the first line (the “shebang”) in the .py file is

+ +
#!/usr/bin/env python3
+
+ +

with the rest of the file containing the commands that you want to run using Python.

+ +

Alternatively, you can use a bash .sh script as the submit file executable, and in that file you can use the python3 command:

+ +
#!/bin/bash
+
+python3 my-script.py
+
+ +

In this case, remember to include your .py file in the transfer_input_files line of your submit file.

+ + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/quota-request.html b/preview-fall2024-info/uw-research-computing/quota-request.html new file mode 100644 index 000000000..c5fd6ce38 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/quota-request.html @@ -0,0 +1,419 @@ + + + + + + +Request a Quota Change + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Request a Quota Change +

+ +

To request a change in quota(s) for data storage locations on CHTC systems, please fill out the form below. +This form applies to the following locations for both individual and shared (group) directories:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LocationPurposeMore Information
HTC /homeFor files less than 1 GB for jobs on the HTC systemChecking Disk Quota and Usage
HTC /stagingFor files greater than 1 GB for jobs on the HTC systemStaging Guide
HPC /homeFor repeatedly used files for jobs on the HPC systemHPC Data Storage and Management
HPC /scratchFor working data for jobs on the HPC systemHPC Data Storage and Management
+ +

For other locations, please email us at chtc@cs.wisc.edu. +Remember, CHTC data locations are not for long-term storage and are NOT backed up. +Please review our data policies on the Policies and Expectations for Users page.

+ +

How to Check Your Quotas

+ +

The form asks for the current quotas of the folders you wish to change. +For individual directories, your quotas are printed on login. +For group directories at HTC /staging, HPC /home, HPC /scratch, you can retrieve your quotas using the command

+ +
get_quotas /path/to/group/directory
+
+ +

Quota Request Form

+ +

The following link leads to a Qualtrics form that we use for requesting quota changes.

+ + + +

If you do not receive an automated email from chtc@cs.wisc.edu within a few hours of completing the form, + OR if you do not receive a response from a human within two business days (M-F), please email chtc@cs.wisc.edu.

+ + +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/r-jobs.html b/preview-fall2024-info/uw-research-computing/r-jobs.html new file mode 100644 index 000000000..fdc95a044 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/r-jobs.html @@ -0,0 +1,1088 @@ + + + + + + +Running R Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Running R Jobs +

+ +

Quickstart: R

+ + + +

Build a container with R & packages installed inside:

+ +
    +
  1. How to build your own container
  2. +
  3. Example container recipes for R
  4. +
  5. Use your container in your HTC jobs
  6. +
+ +

Option B

+ +

Use an existing container with a base installation of R:

+ +
    +
  1. Choose an existing container. See +OSG R containers +or +Rocker R containers.
  2. +
  3. Use the container in your HTC jobs
  4. +
+ + + +

More information

+ +

No CHTC machine has R pre-installed, so you must configure a portable copy of R to work on the HTC system. +Using a container as described above is the easiest way to accomplish this.

+ +

Executable

+ +

When using a container, you can use a .R script as the submit file executable, provided that the first line (the “shebang”) in the .R file is

+ +
#!/usr/bin/env Rscript
+
+ +

with the rest of the file containing the commands that you want to run using R.

+ +

Alternatively, you can use a bash .sh script as the submit file executable, and in that file you can use the Rscript command:

+ +
#!/bin/bash
+
+Rscript my-script.R
+
+ +

In this case, remember to include your .R file in the transfer_input_files line of your submit file.

+ + + +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/scaling-htc.html b/preview-fall2024-info/uw-research-computing/scaling-htc.html new file mode 100644 index 000000000..7336f67f0 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/scaling-htc.html @@ -0,0 +1,786 @@ + + + + + + +Scale Beyond Local HTC Capacity + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Scale Beyond Local HTC Capacity +

+ +

This guide provides an introduction to running jobs outside of CHTC: why +using these resources is beneficial, what resources are available, and +how to use them.

+ +

Contents

+ +
    +
  1. Why run on additional resources outside CHTC? + +
  2. +
  3. Job Qualifications
  4. +
  5. Submitting Jobs to Run Beyond CHTC
  6. +
+ +

+ +

1. Why run on additional resources outside CHTC?

+ +

Running on other resources in addition to CHTC has one huge benefit: +size! The UW Grid and OSG include thousands of computers, +addition to what's already available in CHTC, including specialized +hardware resources like GPUs. Most CHTC users who run +on CHTC, the UW Grid, and the OSG can get more than 100,000 computer +hours (more than 11 years of computing!) in a single day. Read on to +learn more about these resources.

+ +

+ +

A. UW Grid

+ +

What we call the "UW Grid" is a collection of all the groups and +centers on campus that run their own high throughput computing pool that +uses HTCondor. Some of these groups include departments (Biochemistry, +Statistics) or large physics projects (IceCube, CMS). Through agreements +with these groups, jobs submitted in CHTC can opt into running on these +other campus pools if there is space.

+ +

We call sending jobs to other pools on campus flocking.

+ +

+ +

B. UW-Madison’s OSG Pool

+ +

CHTC maintains an OSG pool for the campus community, which includes +resources contributed by campuses, national labs, and other institutions +across and beyond the US.

+ +

When you send jobs to other institutions in our OSG pool, we call that gliding.

+ +

+ +

2. Job Qualifications

+ +

Not all jobs will run well outside of CHTC. Because these jobs are +running all over the campus or country, on computers that don't belong +to us, they have two major requirements:

+ +
    +
  • +

    Moderate Data Sizes: We can support input file sizes of up to +20 GB per file per job. This covers input files that would normally be +transferred out of a /home directory or use SQUID, in addition to larger +files up to 20GB. Outputs per job can be of similar sizes. If your input or +output files are larger than 1GB, or you have any other questions about +handling data on resources beyond CHTC, please contact us!

    +
  • +
  • +

    Short or interruptable jobs: Your job can complete in under 10 hours +-- either it finishes in that amount of time, or it +self-checkpoints at least that frequently. If you would like to implement +self-checkpointing for a longer code, we are happy to provide resources +and guidance.

    +
  • +
+ +

+ +

3. Submitting Jobs to Run Beyond CHTC

+ +

If your jobs meet the characteristics above and you would like to use +either the UW Grid or OS Pool to run jobs, in addition to CHTC, you can add +the following to your submit file:

+ + + + + + + + + + + + +
+WantFlocking = trueAlso send jobs to other HTCondor Pools on campus (UW Grid)
Good for jobs that are less than ~8 hours, on average, or checkpointing jobs.
+WantGlideIn = trueAlso send jobs to the OS Pool.
Good for jobs that are less than ~8 hours, on average, or checkpointing jobs.
+ +

To guarantee maximum efficiency, please do the following steps +whenever submitting a new type of job to the UW Grid or OSG:

+ +
    +
  1. +

    Test Your Jobs: You should run a small test (anywhere from +10-100 jobs) outside CHTC before submitting your full workflow. To +do this, take a job submission that you know runs successfully on +CHTC. Then add the following options in the submit file + submit the +test jobs:

    + +
    requirements = (Poolname =!= "CHTC")
    +
    + +

    (If your submit file already has a requirements = line, you can +appending the Poolname requirement by using a double ampersand +(&&) and then the additional requirement.)

    +
  2. +
  3. +

    Troubleshooting: If your jobs don't run successfully on the UW +Grid or OS Pool, please get in touch with a research computing +facilitator.

    +
  4. +
  5. +

    Scaling Up: Once you have tested your jobs and they seem to be +running successfully, you are ready to submit a full batch of jobs +that uses CHTC and the UW Grid/OS Pool. REMOVE the Poolname +requirement from the test jobs but leave the +wantFlocking and ++wantGlidein lines.

    +
  6. +
+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/sign-in.html b/preview-fall2024-info/uw-research-computing/sign-in.html new file mode 100644 index 000000000..a7924ed4c --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/sign-in.html @@ -0,0 +1,352 @@ + + + + + + +Sign In for Office Hours + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ Sign In for Office Hours +

+ + +
+ +
+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/singularity-hpc.html b/preview-fall2024-info/uw-research-computing/singularity-hpc.html new file mode 100644 index 000000000..703d96573 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/singularity-hpc.html @@ -0,0 +1,483 @@ + + + + + + +Using Software in a Container on the HPC Cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Using Software in a Container on the HPC Cluster +

+ +

DEPRECATED

+ +

See instead our “Using Apptainer Containers on HPC” guide: https://chtc.cs.wisc.edu/uw-research-computing/apptainer-hpc.

+ +

Overview

+ +

Software that is packaged in a "container" can +be run on the HPC cluster. This guide assumes that you are starting with +an existing Docker container and shows how to use it to run a job on the HPC cluster.

+ +

Note that you may need to install a version of MPI to your container +when it is initially created. See the notes about this below.

+ +

The two steps to run a container on the HPC cluster:

+
    +
  1. Convert the container to a Singularity image file
  2. +
  3. Run a job that uses the container
  4. +
+ +

+Notes about MPI and Containers +==================

+ +

There are two ways to run a Singularity container integrated with MPI: hybrid +mode and bind mode.

+ +

In hybrid mode, the container has its own copy of MPI that is compatible +with a version of MPI already installed on the cluster.

+ +

In bind mode, the code in the container has been compiled with MPI that +exists outside the container and there is no MPI installation in the container itself. +Again, the version of MPI used needs to be compatible with one already installed +on the cluster.

+ +

This will be relevant in how the job is executed later on: Using Singularity Container Images

+ +

+ +

1. Convert Container to Singularity Format

+ +

We assume that there is a Docker container (either found +or created by you) online that you want to use. To use this container +on the HPC cluster, it needs to be converted to a Singularity-format +image file. To do this:

+ +
    +
  1. Log in to one of the HPC cluster log in nodes.
  2. +
  3. Start an interactive job: +
     [alice@login]$ srun -n4 -N1 -p int --pty bash
    +
    +
  4. +
  5. Once the interactive job starts, you’ll need to unset a shell environment +variable that prevents download of the Docker container. +
    [alice@int]$ unset HTTPS_PROXY
    +
    +
  6. +
  7. Then, save the Docker container to a Singularity image. +
     [alice@int]$ singularity build /software/alice/name.simg docker://user/image:version
    +
    + +

    For example, if user "Alice" wanted to use the "Fenics" container + provided on DockerHub, + and save it to a file named fenics.simg, she would run:

    + +
     [alice@int]$ singularity build /software/alice/fenics.simg docker://fenicsproject/stable:latest
    +
    + +
    +

    This command will by default, pull the initial Docker container from +Docker Hub. If your Docker container is stored elsewhere, or you are +starting with a Singularity image, contact CHTC staff for specific instructions.

    +
    +
  8. +
  9. Once the Singularity command completes, type exit to leave the interactive job.
  10. +
+ +

+ +

2. Using Singularity Container Images

+ +

To use a Singularity container in a job, the SLURM submit file will remain mostly the +same; what will change is the job’s primary command at the end of the +file. This command will run your primary program inside the container +file you've downloaded. The main MPI command will still be part of the +singularity command:

+ +
#!/bin/sh
+#SBATCH options
+
+module load MPI/version
+mpirun -np ## singularity exec /path/to/container/file command-to-run
+
+ +

For example, if Alice wanted to run a script she had written +(poisson.py) inside the downloaded fenics container, using 40 cores, she would use the +following command at the end of her submit file:

+ +
mpirun -np 40 singularity exec /software/alice/fenics.simg ./poisson.py
+
+ +

The example shown above uses the “hybrid” model for running MPI, which assumes +that there is a copy of MPI installed in the container that matches what already +exists on the cluster.

+ +

If your container does not have it’s own copy of MPI installed, you need +to use the “bind” model for running MPI which requires an additional flag and +the location of the main MPI directory:

+ +
#!/bin/sh
+#SBATCH options
+
+module load MPI/version
+mpirun -np ## singularity exec --bind /path/to/cluster/MPI/dir/ /path/to/container/file command-to-run
+
+ +

On CHTC’s cluster, the GCC based version of OpenMPI is installed at the path: +` /software/chtc/easybuild/v2/software/OpenMPI/4.0.5-GCC-9.3.0/` +So the command(s) to run the “Alice” example above would be:

+ +
MPI_DIR=/software/chtc/easybuild/v2/software/OpenMPI/4.0.5-GCC-9.3.0/
+mpirun -np 40 singularity exec --bind $MPI_DIR /software/alice/fenics.simg ./poisson.py
+
+ +

More details on the difference between using the “hybrid” and “bind” model +for MPI and Singularity is here: https://sylabs.io/guides/3.8/user-guide/mpi.html

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/singularity-htc.html b/preview-fall2024-info/uw-research-computing/singularity-htc.html new file mode 100644 index 000000000..9638eaa95 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/singularity-htc.html @@ -0,0 +1,365 @@ + + + + + + +Use Apptainer (Singularity) Environments + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Use Apptainer (Singularity) Environments +

+ +

The contents of this page have been updated and split into the following guides:

+ + + +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/software-overview-htc.html b/preview-fall2024-info/uw-research-computing/software-overview-htc.html new file mode 100644 index 000000000..c948fda80 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/software-overview-htc.html @@ -0,0 +1,1094 @@ + + + + + + +Overview: How to Use Software + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Overview: How to Use Software +

+ + + +

+In order to run jobs on the High Throughput Computing (HTC) system, researchers need to set up their software on the system. +This guide introduces how to build software in a container (our recommended strategy), links to a repository with a selection of software installation “recipes”, and quick links to common software packages and their installation recommendations.

+ +
+ +

Table of Contents

+ +
+
+ +

Quickstart

+ +

Recipes

+ +

Containers

+ +
+
+
+ +

Quickstart

+ +

Click the link in the table below to jump to the instructions for the language/program/software that you want to use. +More information is provided in the CHTC Recipes Repository and Containers sections.

+ +
+
+ + +
+ +
+ +
+ +
+ +
+ +
+ +
+ +
+ +
+ +
+ +
+ +
+ +
+
+ +
+

Quickstart: Conda

+ + + +

Build a container with Conda packages installed inside:

+ +
    +
  1. How to build your own container
  2. +
  3. Example container recipes for Conda
  4. +
  5. Use your container in your HTC jobs
  6. +
+ +

Option B

+ +

Create your own portable copy of your Conda packages:

+ +
    +
  1. Follow the instructions in our guide
  2. +
+ +
+

This approach may be sensitive to the operating system of the execution point. +We recommend building a container instead, but are keeping these instructions as a backup.

+
+ + + More Information +
+ + + + + + + + + + + +
+
+ +
+ +

Recipes

+ +

CHTC provides specific examples for software and workflows for use on our systems in our “Recipes” repository on Github: +https://github.com/CHTC/recipes.

+ +

Links to specific recipes are used in the Software section for certain softwares and coding languages.

+ +
+ +

Containers

+ +

Many of the recipes in our Recipes repository involve building your own container. +In this section, we provide a brief introduction into how to use containers for setting up your own software to run on the High Throughput system.

+ +

What is a Container?

+ +

“A container is a standard unit of software that packages up code and all its dependencies so the application runs quickly and reliably from one computing environment to another.” +– Docker

+ +

A container is a portable, self-contained operating system and can be easily executed on different computers regardless of their operating systems or programs. +When building the container you can choose the operating system you want to use, and can install programs as if you were the owner of the computer.

+ +

While there are some caveats, containers are useful for deploying software on shared computing systems like CHTC, where you do not have permission to install programs directly.

+ +

“You can build a container using Apptainer on your laptop, and then run it on many of the largest HPC clusters in the world, local university or company clusters, a single server, in the cloud, or on a workstation down the hall.” +– Apptainer

+ +
+

What is a Container Image?

+ +

A “container image” is the persistent, on-disk copy of the container. +When we talk about building or moving or distributing a container, we’re actually talking about the file(s) that constitute the container. +When a container is “running” or “executed”, the container image is used to create the run time environment for executing the programs installed inside of it.

+
+ +

Container Technologies

+ +

There are two container technologies supported by CHTC: Docker and Apptainer. +Here we briefly discuss the advantages of each.

+ +

Docker

+ +

https://www.docker.com/

+ +

Docker is a commercial container technology for building and distributing containers. +Docker provides a platform for distributing containers, called Docker Hub. +Docker Hub can make it easy to share containers with colleagues without having to worry about the minutiae of moving files around.

+ +

On the HTC system, you can provide the name of your Docker Hub container in your submit file, +and HTCondor will automatically pull (download) the container and use it to create the software environment for executing your job. +Unfortunately, however, you are unable to build a Docker container and upload it to Docker Hub from CHTC servers, +so your container must already exist on Docker Hub in a public repository. +This requires that you have Docker installed on your computer so that you can build the container and upload it to Docker Hub.

+ +

Apptainer

+ +

https://apptainer.org/

+ +

Apptainer is an open-source container technology for building containers. +Apptainer creates a single, stand-alone file that is the (container image). +As long as you have the container image file, you can use Apptainer to run your container.

+ +

On the HTC system, you can provide the name of your Apptainer file in your submit file, +and HTCondor will use a copy of it to create the software environment for executing your job. +You can use Apptainer to build the container image file on CHTC servers, so there is no need to install the container software on your own computer.

+ +

Use an Existing Container

+ +

If you or a colleague have already built a container for use on CHTC, it is fairly straightforward to modify your jobs to use the container environment as discussed below.

+ +

Use a Docker container

+ +

Full Guide: Running HTC Jobs Using Docker Containers

+ +

If the container you want to use is hosted on Docker Hub, find the container “address” and provide it in your submit file. +The address typically has the convention of user/repository:tag, though official repositories such as Python are just repository:tag. +In your submit file, use

+ +
container_image = docker://user/repository:tag
+
+ +

If the container you want to use is hosted in a different container registry, there should still be a container “address” to use, +but now there will be a website prefix.

+ +
container_image = docker://registry_address/user/repository:tag
+
+ +

For example, to use a container from the NVIDIA Container Registry (nvcr), +you would have docker://nvcr.io/nvidia/repository:tag.

+ +

Back to Top

+ +

Use an Apptainer container

+ +

Full Guide: Use Apptainer Containers

+ +

For historical reasons, the Apptainer container file has the file extension .sif. +The syntax for giving HTCondor the name of the container file depends on where it is located on the CHTC system.

+ +

If the .sif file is in a /home directory:

+ +
container_image = path/to/my-container.sif
+
+ +

If the .sif file is in a /staging directory:

+ +
container_image = file:///staging/path/to/my-container.sif
+
+ +

If the .sif file is in a /staging directory AND you are using +WantFlocking or +WantGliding:

+ +
container_image = osdf:///chtc/staging/path/to/my-container.sif
+
+ +

Back to Top

+ +

Build Your Own Container

+ +

You can build your own container with the operating system and software that you want to use. +The general process is the same whether you are using Docker or Apptainer.

+ +
    +
  1. +

    Consult your software’s documentation

    + +

    Determine the requirements for installing the software you want to use. +In particular you are looking for (a) the operating systems it is compatible with and (b) the prerequisite libraries or packages.

    +
  2. +
  3. +

    Choose a base container

    + +

    The base container should at minimum use an operating system compatible with your software. +Ideally the container you choose also has many of the prerequisite libraries/programs already installed.

    +
  4. +
  5. +

    Create your own definition file

    + +

    The definition file contains the installation commands needed to set up your software. +(The structure of the container “definition” file differs between Docker and Apptainer, but it is fairly straightforward to translate between the two.)

    +
  6. +
  7. +

    Build the container

    + +

    Once the definition file has been written, you must “build” the container. +The computer you use to build the container will run through the installation commands, almost as if you were actually installing the software on that computer, +but will save the results into the container file(s) for later use.

    +
  8. +
  9. +

    Distribute the container

    + +

    To use the container on CHTC servers, you’ll need to distribute the container to right location. +For Docker containers, this means “pushing” the container to Docker Hub or similar container registry. +For Apptainer containers, this typically means copying the container .sif file to the /staging system.

    +
  10. +
+ +

You can then use the container following the instructions above.

+ +
+

A common question is whether the software installation process is repeated each time a container is used. +The answer is “no”. +The software installation process only occurs when the container is actually being built. +Once the container has been built, no changes can be made to the container when being used (on CHTC systems).

+
+ +

Build your own Docker container

+ +

Please follow the instructions in our guide Build a Docker Container Image to build your own container using Docker. +As mentioned above, you will need to have Docker installed on your own computer. +This is so that you can push the completed container to Docker Hub.

+ +

You are unable to push containers from CHTC to Docker Hub, so please do not build Docker containers using CHTC!

+ +

Build your own Apptainer container

+ +

Please follow the instructions in our guide Use Apptainer Containers to build your own container using Apptainer. +You can use CHTC servers to build the container, so there is no need to install any software on your computer.

+ +

Back to Top

+ +
+ +
+
+ + + + +
+ + + + + + +
HTC Guides
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + + + +
+

+ +

+ +
+ + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/tensorflow-singularity.html b/preview-fall2024-info/uw-research-computing/tensorflow-singularity.html new file mode 100644 index 000000000..21ed67e1c --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/tensorflow-singularity.html @@ -0,0 +1,362 @@ + + + + + + +Running Tensorflow Jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Running Tensorflow Jobs +

+ +

The contents of the guide previously at this page are not currently +supported in CHTC, although there are plans to re-integrate them in the +future. For questions about running Tensorflow in CHTC, email CHTC's +Research Computing Facilitators at chtc@cs.wisc.edu

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/testing-and-scaling-up.md b/preview-fall2024-info/uw-research-computing/testing-and-scaling-up.md new file mode 100644 index 000000000..e58332ea6 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/testing-and-scaling-up.md @@ -0,0 +1,131 @@ + +[title]: - "Optimizing HTCondor Submit File Resource Requests" + +[TOC] + +# Overview + +Much of HTCondor's HTC power comes from the ability to run a large number of jobs simulateously. +To optimize your work with a high-throughput computing (HTC) +approach, you will need to test and optimizing the resource requests of those jobs, by +only requesting the amount of memory, disk, and cpus truly needed. +This is an important practice that will maximize your throughput by optimizing the +number of potential 'slots' that your jobs can match to, reducing the overall +turnaround time for completing a whole batch. + +If you have questions or are unsure if and how your work can be broken up, please contact us at +. + +# Requesting the Number of CPUs, Memory, and Disk Space for the HTCondor Submit File + +In the HTCondor submit file, you must explicitly request the number of +CPUs (i.e. cores), and the amount of disk and memory that the job needs +to complete successfully. +When you submit a job for the +first time you may not know just how much to request and that's okay. +Below are some suggestions for making resource requests for initial test +jobs. **As always, reviewing the HTCondor `log` file from past jobs is +a great way to learn about the resource needs of your jobs.** + +**Requesting CPU Cores** + +- For **requesting CPU cores start by requesting a single cpu**. With single-cpu jobs, you will see +your jobs start sooner. Ultimately you will be able to achieve +greater throughtput with single cpus jobs compared to jobs that request +and use multiple cpus. + + - **Keep in mind, requesting more CPU cores for a job + does not mean that your jobs will use more cpus.** Rather, you want to make sure + that your CPU request matches the number of cores (i.e. 'threads' or 'processes') + that you expect your software to use. (Most softwares only use 1 CPU core, by default.) + + - There is limited support for multicore work in our high throughput system. For large-scale multicore jobs, contact a Research Computing Facilitator at . + +**Requesting Disk Space** + +- To inform initial disk requests always look at the size of your input +files. At a minimum, you need to request enough disk to support all +of the input files, executable, and the output you expect, but don't forget that the standard 'error' and 'output' +files you specify will capture 'terminal' output that may add up, too. + + - If many of your input and output files are compressed +(i.e. zipped or tarballs) you will need to factor that into your +estimates for disk usage as these files will take up additonal space once uncompressed +in the job. + + - For your initial tests it is okay to request more disk than +your job may need so that the test completes successfully. **The key +is to adjust disk requests for subsequent jobs based on the results +of these test jobs.** + +**Requesting Memory** + +- Estimating **memory requests** can sometimes be tricky. If you've performed the +same or similar work on another computer, consider using the amount of +memory (i.e. RAM) from that computer as a starting point. For instance, +most laptop computers these days will have 8 or 16 GB of memory, which is okay to start +with if you know a single job will succeed on your laptop. + + - For your initial tests it is okay to request more memory than +your job may need so that the test completes successfully. **The key +is to adjust memory requests for subsequent jobs based on the results +of these test jobs.** To fine tune your requests, make sure to run test jobs - see below for a recommended process. + +**Importance of Test Jobs** + +- Once you have run a test job using a small number of jobs, **Review the bottom of the HTCondor `log` files from your test jobs to see how many cpus and how much memory and disk space were used.** HTCondor will report +the memory, disk, and cpu usage of your jobs in a table at the *bottom* of this file. You can use these values to inform the parameters for future jobs. For example, the bottom of a `.log` file may look like this: + + Partitionable Resources : Usage Request Allocated + Cpus : 1 1 1 + Disk (KB) : 860878 1048576 1808522 + IoHeavy : 0 + Memory (MB) : 960 1024 1024 + +*Memory is listed in units of megabytes (MB) and disk usage is listed in units of kilobytes (KB). A quick Google search yields many calculators to help convert between differnt computing size measurements.* + + +# Always Start With Test Jobs + +Submitting test jobs is an important first step for optimizing +the resource requests of your jobs. We always recommend the following approach whether this is your first time +using HTC or you are an experienced user starting a new workflow: + +**Step 1: Submit a single test job** + - Use a single test job to confirm the job was completed successfully and the results are what you expected. + +**Step 2: Submit a few (3-10) test jobs using a single submit file** + - Once you have a single test job that completes successfully, the next + step is to submit a small batch of test jobs (e.g. 3 - 10 jobs) + [**using a single submit file**](https://chtc.cs.wisc.edu/uw-research-computing/multiple-jobs). Use this small-scale + multi-job submission test to ensure that all jobs complete successfully, produce the + desired output, and do not conflict with each other when submitted together. Additionally, by running test jobs, it provides an opportunity to review the `.log` files after each submission to optimize resource requests for future submissions as described above. + +**Step 3: Scale up** + - If your workflow requires submission of 500 jobs or less, proceed with submitting your entire batch of jobs. If you plan to submit + more than 500 jobs, we recommend submitting an intermediate test of 100-1,000 jobs to catch any + failures or holds that may mean your jobs have additional `requirements` they may need to specify + (and which CHTC staff can help you to identify, based upon your tests). + +Some general tips for test jobs: + +- Select smaller data sets or subsets of data for your first test jobs. Using +smaller data will keep the resource needs of your jobs low which will help get +test jobs to start, and complete, sooner, when you're just making sure that your submit file +and other logistical aspects of jobs submission are as you want them. + +- If possible, submit test jobs that will reproduce results you've gotten +using another system, this makes for a good "sanity check", as you'll be able +to compare the results of the test to those previously obtained. + +- Give your test jobs, and associated HTCondor `log`, `error`, `output`, +and `submit` files meaningful names so you know which results refer to which tests. + +- After initial tests complete successfully, scale up to larger or full-size +data sets; **if your jobs may span a range of input file sizes, submit tests using the smallest +and largest inputs to examine the range of resources that these jobs may need.** + +# Get Help + +For assistance or questions, please email the CHTC team at [chtc@cs.wisc.edu](mailto:chtc@cs.wisc.edu). + diff --git a/preview-fall2024-info/uw-research-computing/testing-jobs.html b/preview-fall2024-info/uw-research-computing/testing-jobs.html new file mode 100644 index 000000000..b1a453dde --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/testing-jobs.html @@ -0,0 +1,541 @@ + + + + + + +Importance of Testing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Importance of Testing +

+ +

Running your code in test jobs before submitting large job batches is +CRUCIAL to effective high-throughput computing.

+ +

Why Test?

+ +

Improving your own throughput

+ +

Spending the time and effort to run test jobs will pay off in more +effective high-throughput computing in the following ways:

+ +
    +
  • Better matching: Pinpointing the required amount of memory/disk +space will allow your jobs to match to as many computers as +possible. Requesting excessive amounts of memory and disk space will +limit the number of slots where your jobs can run, as well as using +unnecessary space that could be available for other users. That +said...
  • +
  • Fewer holds or evictions: If you don't request *enough* +memory or disk and your job exceeds its request, it can go on hold. +Jobs that have not been tested and run over 72 hours are liable to +be evicted without finishing.
  • +
  • Fewer wasted compute hours: the evictions and holds described +above will be wasted compute hours, decreasing your priority in the +high-throughput system while not returning any results.
  • +
  • Making good choices: knowing how big and how long your jobs are, +and the size of input/output files will show you how to most +effectively use CHTC resources. Jobs under 2 hrs or so? Allow your +jobs to flock and glide to the UW Grid and OS Pool. Input +files of more than 5 GB? You should probably be using the CHTC large +file staging area. Longer jobs? Include a line in your submit file +restricting your jobs to the CHTC servers that guarantee 72 hours.
  • +
+ +

Being a good citizen

+ +

CHTC's high-throughput system has hundreds to thousands of users, +meaning that poor computing practices by one user can impact many other +users. Users who submit jobs that don't finish or are evicted because +of incorrect memory requests are using hours that could have been used +by other people. In the worst case, untested code can cause other jobs +running on an execute server to be evicted, directly harming someone +else's research process. The best practices listed in these guides +exist for a reason. Testing your code and job submissions to make sure +they abide by CHTC recommendations will not only benefit your own +throughput but make sure that everyone else is also getting a fair share +of the resource.

+ +

What to Test

+ +

When running test jobs, you want to pay attention to at least the +following five variables:

+ +
    +
  • disk space
  • +
  • memory usage
  • +
  • length of job
  • +
  • input file size
  • +
  • output file size
  • +
+ +

Memory and disk space simply make sure that your jobs have the resources +they need to run properly. Memory is the amount of RAM needed by your +program when it executes; disk space is how much hard drive space is +required to store your data, executables, and any output files.

+ +

Job length has a huge impact on where your jobs can run. Within a subset +of CHTC servers, jobs are guaranteed to run for 72 hours. Jobs that run +for longer than 72 hours will fail, unless they have implemented a +self-checkpointing method that allows them to resume after being +evicted. Jobs that are shorter, around 2-4 hours, are good candidates to +run on the UW Grid and/or OS Pool.

+ +

Input and output file size will impact how your files will be +transferred to and from the execute nodes. Large input files will need +to be staged on a proxy server or shared file system; small input files +can use HTCondor's built-in file transfer system. If you have questions +about how to handle your data, please email +chtc@cs.wisc.edu to get in touch with a research +computing facilitator who can advise you.

+ +

In addition to these considerations, your script/program itself should +be thoroughly tested on your own machine until it is as bug-free and +correct as possible. If it uses any libraries or packages, you should +know what they are and if they have any other dependencies.

+ +

How to Test

+ +

Interactive Jobs

+ +

One of the most useful tools for testing is HTCondor's interactive job +feature. An interactive job is essentially a job without an executable; +you are the one running the commands instead, through a bash (shell?) +session.

+ +

To request an interactive job:

+ +
    +
  1. +

    Create a submit file as if you were submitting the job normally, +with one change. Don't include an executable line; instead, list +your executable file in the transfer_input_files line.

    + +
    # sample submit file
    +universe = vanilla
    +log = interactive.log
    +
    +# executable = # delete or comment out
    +should_transfer_files = YES
    +when_to_transfer_output = ON_EXIT
    +transfer_input_files = data_file,myprogram
    +
    +request_cpus = 1
    +request_memory = 1GB
    +request_disk = 1GB
    +          
    +queue
    +
    +
  2. +
  3. +

    Then, submit the job using the -i option:

    + +
    $ condor_submit -i submit_file
    +
    + +

    You should see a message like:

    + +
    Submitting job(s).
    +1 job(s) submitted to cluster 4347054.
    +Waiting for job to start... 
    +
    +
  4. +
  5. After a few minutes, the job should match and open an interactive +session on an execute server, with all the files you listed in +transfer_input_files You are now on an execute server, much like +one your jobs will be running on when you submit them to HTCondor. +Here, you can try running your executable.
  6. +
  7. +

    Once you are done, you can type exit to leave the interactive +session. Note that any files you created during the session will +be transferred back with you! Another useful tool can be to save +your history to a file, using the following command:

    + +
    $ history > history.txt 
    +
    +
  8. +
+ +

Scale testing

+ +

Once you know that your code works and you can successfully submit one +job to be run by HTCondor, you should test a few jobs before submitting +the full-size batch. After these few jobs complete, pay attention to the +variables described above (memory, disk space, etc.) so you can edit +your submit files before submitting your entire batch of jobs.

+ +

To find information about memory, disk space and time, look at a job's +log file. Its name and where it is located may vary, depending on your +submit process, but once you find it, you should see information like +this:

+ +
001 (845638.000.000) 03/12 12:48:06 Job executing on host: <128.104.58.85:49163>
+...
+005 (845638.000.000) 03/12 12:48:06 Job terminated.
+    (1) Normal termination (return value 0)
+        Usr 0 00:00:00, Sys 0 00:00:00  -  Run Remote Usage
+        Usr 0 00:00:00, Sys 0 00:00:00  -  Run Local Usage
+        Usr 0 00:00:00, Sys 0 00:00:00  -  Total Remote Usage
+        Usr 0 00:00:00, Sys 0 00:00:00  -  Total Local Usage
+    17  -  Run Bytes Sent By Job
+    92  -  Run Bytes Received By Job
+    17  -  Total Bytes Sent By Job
+    92  -  Total Bytes Received By Job
+        Partitionable Resources :    Usage  Request Allocated
+           Cpus                 :                 1         1
+           Disk (KB)            :       12  1000000  26703078
+           Memory (MB)          :        0     1000      1000
+
+ +

The table at the end of the log file shows how many resources you used +and can be used to fine-tune your requests for memory and disk. If you +didn't keep track yourself, the log file also lists when the job +started to execute, and when it ended, thus the length of time required +for completion.

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/thanks.html b/preview-fall2024-info/uw-research-computing/thanks.html new file mode 100644 index 000000000..4271b12bb --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/thanks.html @@ -0,0 +1,357 @@ + + + + + + +Account Request Submitted + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+

+ Account Request Submitted +

+ + + +

Thank you for your account request! You should get an email with “[CHTC Requests]” in the subject line within the next +few minutes and a research +computing facilitator should be in touch within a business day.

+ +

If you don’t get an email with the “CHTC Requests” subject or hear from a research computing facilitator within a +day, please check your spam folder; if they aren’t in spam, +please contact us via the email address at the bottom of this page to confirm that we received your account request.

+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/transfer-data-researchdrive.html b/preview-fall2024-info/uw-research-computing/transfer-data-researchdrive.html new file mode 100644 index 000000000..aa5643472 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/transfer-data-researchdrive.html @@ -0,0 +1,566 @@ + + + + + + +Transfer Files Between CHTC and ResearchDrive + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Transfer Files Between CHTC and ResearchDrive +

+ +

UW Madison provides a shared data storage for research called ResearchDrive. It +is possible to transfer files directly between ResearchDrive and CHTC’s systems. The +instructions in this guide may also work for accessing other data services on campus from CHTC; contact us if you +would like to know more.

+ +

A. Pre-Requisites

+ +

In order to follow the steps in this guide, you need access to a ResearchDrive share, either as PI or member of your PI’s group, as well as a CHTC account. In what follows, +we assume that you are transferring files to and from our HTC system, but you can +use the same process to transfer files to and from the HPC cluster if you first log +in to one of the HPC login nodes.

+ +

B. Transferring Files

+ +

To transfer data between ResearchDrive and CHTC, do the following:

+ +
    +
  1. Log in: +
      +
    1. If you are transferring files to or from a /staging directory, log in to transfer.chtc.wisc.edu.
    2. +
    3. If you are transferring files and or from your /home directory, log into your usual submit server (typically ap2001.chtc.wisc.edu or ap2002.chtc.wisc.edu).
    4. +
    +
  2. +
  3. Choose a folder: Navigate to the folder in CHTC (/staging or /home), where you would like to transfer files.
  4. +
  5. Connect to ResearchDrive: Run the following command to connect to ResearchDrive, filling in the username of +your PI: +
     [alice@server]$ smbclient -k //research.drive.wisc.edu/PI-Name
    +
    + +

    Your prompt should change to look like this:

    +
     smb: \> 
    +
    + +
    +

    Note about NetIDs

    +

    If your CHTC account is not tied to your campus NetID or you are accessing a data +storage service that doesn’t use your NetID, you’ll need to omit the -k flag above

    +
    +
  6. +
  7. Choose a folder, part 2: If you type ls now, you’ll see the files in ResearchDrive, not CHTC. +Navigate through ResearchDrive (using cd) until you are at the folder where you would +like to get or put files.
  8. +
  9. Move files: To move files, you will use the get and put commands: +
      +
    • To move files from CHTC to ResearchDrive, run: +
        smb: \> put filename
      +
      +
    • +
    • To move files from ResearchDrive to CHTC, run: +
        smb: \> get filename
      +
      +
    • +
    +
  10. +
  11. Finish: Once you are done moving files, you can type exit to leave the connection to ResearchDrive.
  12. +
+ +

Transferring a Batch of Files

+ +

The steps described above work well for transferring a single file, or tar archive of +many files, at a time and is best for transferring a few files in a session. However, +smbclient also provides options for transferring many individual files in a single command +using the * wildcard character.

+ +

To transfer multiple files at once, first you must turn off the smbclient file transfer prompt, +then use either mget or mput for your file transfer. For example, if you have multiple fastq.gz files +to transfer to CHTC:

+ +
    +
  1. Log in: +
      +
    1. If you are transferring files to or from a /staging directory, log in to transfer.chtc.wisc.edu.
    2. +
    3. If you are transferring files to or from your /home directory, log into your usual submit server (typically ap2001.chtc.wisc.edu or ap2002.chtc.wisc.edu).
    4. +
    +
  2. +
  3. Choose a folder: Navigate to the folder in CHTC (/staging or /home), where you would like to put the files.
  4. +
  5. Connect to ResearchDrive: Run the following command to connect to ResearchDrive, filling in the username of +your PI: +
     [alice@server]$ smbclient -k //research.drive.wisc.edu/PI-Name
    +
    +
  6. +
  7. Navigate to appropriate ResearchDrive directory +
     smb: \> cd path/to/files
    +
    +
  8. +
  9. Turn of Prompting +
     smb: \> prompt
    +
    +
  10. +
  11. Use mget instead of get + This command downloads a group of files that end with “fastq.gz” to CHTC. +
     smb: \> mget *.fastq.gz
    +
    +
  12. +
+ +

As another example, use smbclient to transfer multiple tar.gz output files to ResearchDrive from CHTC +after your jobs complete:

+ +
    +
  1. Log in: +
      +
    1. If you are transferring files to or from a /staging directory, log in to transfer.chtc.wisc.edu.
    2. +
    3. If you are transferring files to or from your /home directory, log into your usual submit server (typically ap2001.chtc.wisc.edu or ap2002.chtc.wisc.edu).
    4. +
    +
  2. +
  3. Choose a folder: Navigate to the folder in CHTC (/staging or /home) where your output files are located.
  4. +
  5. Connect to ResearchDrive: Run the following command to connect to ResearchDrive, filling in the username of +your PI: +
     [alice@server]$ smbclient -k //research.drive.wisc.edu/PI-Name
    +
    +
  6. +
  7. Navigate to appropriate ResearchDrive directory +
     smb: \> cd path/to/directory
    +
    +
  8. +
  9. Turn off Prompting +
     smb: \> prompt
    +
    +
  10. +
  11. Use mput instead of put +
     smb: \> mput *.tar.gz
    +
    +
  12. +
+ + +
+
+ + + + +
+ + + + + + +
General Guides
+
+ + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/transfer-files-computer.html b/preview-fall2024-info/uw-research-computing/transfer-files-computer.html new file mode 100644 index 000000000..b562c00c5 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/transfer-files-computer.html @@ -0,0 +1,567 @@ + + + + + + +Transfer Files between CHTC and your Computer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Transfer Files between CHTC and your Computer +

+ + + + +

+ +

1. Transferring Files

+ +

To transfer files to and from CHTC, you will need the same username and +hostname information for logging in, as well as understanding +where your files are and where you would like them to go.

+ +

+ +

A. On the command line

+ +

On Mac, Linux, or modern Windows (10+) systems, you can use the "Terminal" application and +the scp command to copy files between your computer and CHTC servers.

+ +

Your computer to CHTC

+ +

First, open the "Terminal" application and navigate to the directory +with the files or directories you want to transfer. Then, use this +command to copy these files to CHTC:

+ +
$ scp file username@hostname:/home/username
+
+ +

If you would like these files to end up in a different directory inside +your home directory, just add it to the path at the end of the command.

+ +

CHTC to your computer

+ +

Open the "Terminal" application. Do NOT log into CHTC. Instead, +navigate to where you want the files to go on your computer. Then, use +this command to copy these files there:

+ +
$ scp username@hostname:/home/username/file ./
+
+ +

Again, for many files, it will be easiest to create a compressed tarball +(.tar.gz file) of your files and transfer that instead of each file +individually.

+ +

+ +

B. Using a file transfer program (Windows/Mac)

+ +

Windows and Mac users can also use special programs to help them +transfer files between their computers and CHTC. For Windows, we +recommend WinSCP. It requires the +same information as Putty (hostname, username), and once it's set up, +looks like this:

+ +

+ +

The left window is a directory on your computer, the right window is +your home directory in CHTC. To move files between the two, simply drag +and drop.

+ +

There are other programs besides WinSCP that do this. Another that works +on Mac and Windows is called Cyberduck.

+ +

+ +

C. Transferring Multiple Files

+ +

If you are transferring many files, it is advantageous to compress them +into a single compressed file, in order to facilitate transferring them. +Place all the files you need in a directory, and then either zip it or +use the "tar" command to compress them:

+ +
$ tar czf data_files.tar.gz file_directory/
+
+ +

To untar or unzip files on the submit server or head nodes, you can use +either:

+ +
[alice@submit]$ tar xzf data_files.tar.gz
+
+ +

or

+ +
[alice@submit]$ unzip data_files.zip
+
+ +

+ +

2. Creating and Editing Files in CHTC

+ +

Once you have logged in to a CHTC server, you can edit files from the +command line, by using a command line file editor. Some common editing +programs are:

+ +
    +
  • nano
  • +
  • vi
  • +
  • emacs
  • +
+ +

nano is the most beginner-friendly, and emacs is the most advanced. +This Software Carpentry +lesson describes +how to use nano, and there are many other resources online with +instructions for these text editors.

+ +

Some of the file transfer programs mentioned above +allow you to edit files on CHTC servers through the interface.

+ +
+
+ + + + +
+ + + + + + +
General Guides
+
+ + + + + + + + + + + + + + + + + + +
+

+ +

+ +
+ + + + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/use-submit-node.html b/preview-fall2024-info/uw-research-computing/use-submit-node.html new file mode 100644 index 000000000..e5899a1bb --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/use-submit-node.html @@ -0,0 +1,14 @@ + + +
+ +
diff --git a/preview-fall2024-info/uw-research-computing/user-expectations.html b/preview-fall2024-info/uw-research-computing/user-expectations.html new file mode 100644 index 000000000..e60f5a69c --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/user-expectations.html @@ -0,0 +1,445 @@ + + + + + + +Policies and Expectations for Using CHTC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + +
+
+
+
+
+ +

+ Policies and Expectations for Using CHTC +

+ +

This page lists important policies and expectations for using CHTC computing and +data services. Our goal is to support a community of users and a variety of +research. If an individual user is taking +action that negatively impacts our services, we reserve the right to +deactivate their account or remove files without notice.

+ +

Access and Use

+ +

Use of CHTC services are free to use in support of UW - Madison’s research and +teaching mission.

+ +

Accounts are linked to individuals and should NOT be shared. We are happy to make new +accounts for individuals or group-owned spaces for sharing files. Accounts that we +notice being shared will be immediately disabled and a meeting with the PI +(faculty advisor) may be necessary to reinstate the account.

+ +

For more information on the process for obtaining an account, see our +How to Request an Account guide.

+ +

Data Policies

+ +

CHTC data locations are not backed up, and users should +treat CHTC compute systems as temporary storage locations for active, +currently-queued computational work. Users should remove data from CHTC +systems upon completion of a batch of computational work and keep copies of +all essential files in a non-CHTC location. CHTC staff reserve the right +to delete data from any CHTC data location at at any time, to preserve +systems performance, and are not responsible for data loss or file system +corruption, which are possible in the absence of back-ups.

+ +

CHTC is not HIPAA-compliant and users should not bring HIPAA data into +CHTC services. If you have data security concerns or any questions about +data security in CHTC, please get in touch!

+ +

To request a change in the quotas for a storage location, please see +our Request a Quota Change guide.

+ +

Export Control

+ +

Users agree not to access, utilize, store, or in any way run export controlled data, information, +programs, etc. on CHTC software, equipment, or computing resources without prior review by the +UW-Madison Export Control Office.

+ +

Export controlled information is subject to federal government rules on handling and viewing and has +restrictions on who and where it may be accessed. A license can be required for access by foreign +persons and in foreign jurisdictions so it’s important to ensure that all legal requirements are +followed. +If you have export controlled information that you would like to use on the CHTC, or you are unsure +if the information you have is export controlled, please contact the Export Control Office at +exportcontrol@grad.wisc.edu for guidance.

+ +

Note: The CHTC is not compliant with Controlled Unclassified Information (CUI) requirements.

+ +

User Expectations

+ +

Because our systems are shared by many CHTC users, everyone contributes to +helping the systems run smoothly. The following are some best practices +to get the most out of CHTC without harming other users. Our goal +is always to help you get your work done - if you think the following recommendations +limit your capacity to run work, please contact us to discuss alternatives.

+ +

Never run computationally intensive tasks on the login nodes for either +system. As a rule of thumb, anything that runs for more than a few seconds, or +is known to use a lot of cores or memory should not be run directly, but as a job. +Small scripts and commands (to compress data, create directories, +etc.) that run within a few minutes on the submit server are okay, +but their use should be minimized when possible. If you have questions about this, +please contact the facilitation team. CHTC staff reserve the right to kill any long-running or problematic processes on the +head nodes and/or disable user accounts that violate this policy

+ +

Avoid unsupervised scripts on the login nodes. Automating tasks via tools like +cron, watch, or using a workflow manager (not including HTCondor’s DAGMan) on the login node is not allowed without prior +discussion with the facilitation or infrastructure team.

+ +
+

(HTC system specific): Since use of watch with condor_q is prohibited, +we recommend using condor_watch_q as an alternative for live updates on your jobs +in the queue. condor_watch_q is more efficient and will not impair system performance.

+
+ +

Test your jobs. We recommend testing a small version of your overall workflow +before submitting your full workflow. By testing a smaller version of your jobs, +you can determine resource requests, runtimes, and whether you may need an increase +in your user quota. Both our HTC and HPC systems use a fair shair policy and each +researcher has a user priority. Submitting many jobs that fail or do not produce +the unexpected output will decrease your user priority without helping you complete +your research. User priorities naturally reset over time.

+ +
+
+ + + + +
+ +
+ + + +
+
+
+ +
+
+ + + + + + + + + diff --git a/preview-fall2024-info/uw-research-computing/who-we-are.md b/preview-fall2024-info/uw-research-computing/who-we-are.md new file mode 100644 index 000000000..58d0f3299 --- /dev/null +++ b/preview-fall2024-info/uw-research-computing/who-we-are.md @@ -0,0 +1,33 @@ +Center for High Throughput Computing + + +

Join Hundreds of UW-Madison Researchers using CHTC Resources for:

+

✓ RNA/DNA Sequencing Analyses ✓ Machine Learning Workflows

+

✓ Economic Simulations & Predictions ✓ Weather Modeling Analyses

+

✓ Chemical Reaction Predictions ✓ Computer Vision & Artificial Intelligence Decision Making

+

… and much more!

+ +

Who We Are

+ +We are the University of Wisconsin-Madison’s core computational resource provider for large scale computing. UW-Madison staff, students, faculty, and external collaborators are welcome to use the Center for High Throughput Computing's (CHTC) resources to carry out their computationally-intensive research. + +CHTC provides a variety of resources and tools to meet the demands of the University’s research community. + +We provide no-cost compute resources (CPUs/GPUs, high-memory servers, etc.), as well as no-cost personalized consultations and classroom support. + + +

Your Home for Research Computing is Here

+ +As a leading research institution, UW-Madison needs a leading research computing center. Established in 2006, the Center for High Throughput Computing (CHTC), aims to bring the power of High Throughput Computing (HTC) to all fields of research and to allow the future of HTC to be shaped by insight from all fields. To advance this mission, the CHTC provides researchers access to state-of-the-art High Throughput Computing and High Performance Computing systems, as well as tools for data management and specalized hardware. + +Beyond CHTC's compute resources, CHTC’s Research Facilitation team helps researchers of all backgrounds identify their needs for large scale computing, practice skills needed to do their work, and provide support for implementing workflows. + +They offer the following services, free-of-charge: +• Office Hours twice a week +• Personal consultations to provide individualized guidance +• Workshops and other informational events +• Yearly week-long HTC summer school +• Guest presentations for courses, seminars, and other groups +• Email support + + diff --git a/preview-fall2024-info/veritas.html b/preview-fall2024-info/veritas.html new file mode 100644 index 000000000..b737c18de --- /dev/null +++ b/preview-fall2024-info/veritas.html @@ -0,0 +1,389 @@ + + + + + + +VERITAS and OSG explore extreme window into the universe + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+
+ + + +
+
+
+

+ VERITAS and OSG explore extreme window into the universe +

+

Understanding the universe has always fascinated mankind. The VERITAS Cherenkov telescope array unravels its secrets by detecting very-high-energy gamma rays from astrophysics sources.

+ +

Gamma-ray astronomy studies the most energetic form of electromagnetic radiation, with photon energies in the GeV – TeV range (gigaelectronvolt to teraelectronvolt). The Very Energetic Radiation Imaging Telescope Array System (VERITAS) uses four 12-meter diameter imaging telescopes. The system uses the Imaging Atmospheric Cherenkov Telescope (IACT) technique to observe gamma rays that cause particle showers in Earth’s upper atmosphere.

+ +

Inside the system, there are four photo-multiplier, 499-pixel cameras, each 0.15 degree in diameter, which record images of the showers by detecting the Cherenkov light emitted by particles in the air shower. The field of view of each camera is 3.5 degrees.

+ +

There are currently three operating IACT arrays: H.E.S.S., MAGIC, and VERITAS. VERITAS is sensitive to very-high-energy (VHE) gamma rays in the energy range between ~80 GeV up to several tens of TeV. It is one of the most sensitive instruments in that energy band.

+ +
+ Image of VERITAS +
The Very Energetic Radiation Imaging Telescope Array System (VERITAS) uses four 12-meter diameter imaging telescopes. The system uses the Imaging Atmospheric Cherenkov Telescope (IACT) technique to observe gamma rays that cause particle showers in Earth’s upper atmosphere. The picture shows the four telescopes at their permanent location at the Fred Lawrence Whipple Observatory (FLWO) observatory one hour south of Tucson, AZ. Courtesy Nepomuk Otte.
+
+ +

A. Nepomuk Otte, Ph.D., is an assistant professor in the School of Physics at the Georgia Institute of Technology. He works in the Center for Relativistic Astrophysics and is a collaborator in VERITAS.

+ +

“We use the VERITAS telescopes in Arizona to study black holes, the remnants of exploding stars, pulsars, and other objects in the sky,” says Otte. “A high-energy gamma ray has a trillion times more energy than a light particle from the sun. When such a gamma ray hits the atmosphere, it produces millions of electrons and positrons that travel faster than the speed of light through the atmosphere.”

+ +

Otte explains that these charged particles emit a bluish flash of light as they zip through the atmosphere. The VERITAS telescopes collect that light and project it onto special cameras to take an image of the particle shower.

+ +

“In our analysis software, we compare the recorded images with simulated ones to find out if a shower was produced by an actual gamma ray or a cosmic ray, which would be a background event,” says Otte. “We also have to compare our events with simulated ones to reconstruct the energy and its origin in the sky—everything we need for a full reconstruction. For our analysis, it is crucial that we properly simulate our experiment to make sense of the data.”

+ +

Otte relies on the Open Science Pool to run the simulations. “Without simulations, we are blind because the characteristics of each recorded image depend on too many parameters to be described analytically,” says Otte. “We have to repeat every step of the experiment in the computer, from the gamma-ray interaction in the atmosphere up to the point where the digitized photon detector signals are written to disk about 100 million times. That is a very time-consuming process.” Otte then compares each recorded event with simulated ones. “The simulated events that best match the recorded event tell us what the energy of the recorded event was and whether it was a gamma ray or a cosmic ray.”

+ +

VERITAS began recording data ten years ago. Over that time span, VERITAS accumulated 10,000 hours of observations on more than 100 objects. Some objects were observed for more than 300 hours. The analysis of these large data sets is sensitive to even small differences between the experiment and the simulations, which was not important when VERITAS started. Two years ago, the VERITAS collaboration reworked the simulation models to account for these small differences by including more details about the experiment itself.

+ +

“We had to rewrite large fractions of our simulation code,” says Otte. “The added detail also meant we needed more computing power. In the past, we could do our simulations on a few hundred CPUs. Now, we need a hundred times more power because we want to simulate ten times more showers than before.”

+ +

OSG gives the VERITAS collaboration the computing power they need. “Using free cycles that others are not using is almost perfect for us,” says Otte. He and his group started using the OSG in August 2016. Initially, Otte wrote an XSEDE allocation application to use Stampede, and the XSEDE experts recommended OSG as a better fit for the project. “I knew about OSG, having used it for other experiments,” says Otte, “but the shared free cycles in this case was a huge help.”

+ +

Otte says the grand challenge in their field is to look at the air showers in the atmosphere; they see very few gamma rays and just a handful of them observed for over 100 hours. At the same time, millions of background events are recorded that are cosmic rays but look very similar to gamma-ray air showers. “So, our challenge is to dig needles out of a huge haystack,” says Otte. “This has been a huge challenge for decades.”

+ +

It was possible only after realizing the power of image analysis of air showers in the late 1980s to distinguish between gamma-ray events and background events with very high efficiency. The simulations tell what features to look for in the images to suppress the background events in the analysis.

+ +

“Simulations are crucial,” says Otte. “We could not make sense of the data without them. And now with bigger data sets it has become very important to also include aspects of the telescopes that did not matter before. For example, we have now recorded events with energies of several tens of TeV. These events are extremely rare, but we have them in our data. The images of these events are so bright that a lot of the camera pixels saturate. We had not included these saturation effects in our simulations before and thus made large errors in reconstructing the energy of these events.”

+ +

After the VERITAS analysis is done, the data are combined with observations in X-ray, radio, and optical and compared with models that try to explain what happens inside the source. One of the important science drivers for VERITAS is to find the origin of cosmic rays, which is a century-old puzzle. “The remnants of supernovae are prime candidates to accelerate cosmic rays,” says Otte. “In some cases, we resolve the expanding shell in gamma rays and can directly see where the cosmic rays come from.”

+ +

Otte uses the OSG mostly for the simulations. “We don’t need these massive computing resources like other experiments might,” says Otte. “Running the simulations is a single effort that takes a lot of time and a lot of computing resources. Buying resources for such a short time would not be sustainable. The sharing concept of OSG is perfect for us. We borrow the resources, do production, have the data on disk, and then do our science for the next few years on our local computing clusters at the universities.”

+ +

Without the OSG, Otte says they would be stuck with local clusters, and that would hold them back. Another important aspect for the VERITAS collaboration is they have groups across the nation with computing resources, but only the OSG can combine them all into one big virtual computing cluster. That makes them far more productive. “With tens of terabytes of data,” says Otte, “the grid makes things much easier.”

+ +

“With the help of the OSG, we are exploring a new and very exciting window into the most extreme objects in our universe. Like black holes and exploding stars, we study the origin of dark matter, which makes up 25 percent of the universe, and we don’t even understand what it is. We explore the evolution of the universe and can even test the fabric of space-time. VERITAS is a very versatile tool and a world-leading instrument.”

+ +

“As we pursue our research, we develop new technologies and algorithms. These find use in other areas as well. For example, the photon detector technology we use is also used in apparatus for cancer screening and diagnostics. And our algorithms can apply to and be used for other large data sets.”

+ +
+ Picture of Veritas research group +
Research group of Nepomuk Otte. Courtesy Nepomuk Otte.
+
+ +
+
+
+
+
+ + + + + + + + + diff --git a/preview-fall2024-info/web.css b/preview-fall2024-info/web.css new file mode 100644 index 000000000..0651cbc15 --- /dev/null +++ b/preview-fall2024-info/web.css @@ -0,0 +1,657 @@ +/* The following six styles set attibutes for heading tags H1-H6 */ + +div.announcement { + border: 1px solid #787878; + background-color: #efefef; + color: #787878; + padding: .5em; + margin: 1em 2em 1em 2em; + } + +table.gtable { + background: #B70101; + padding: 5px 10px; + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + + color: #FFFFFF; + overflow: hidden; + margin-bottom: 20px; + + background: #ddd; + color: #333; + border: 0; + border-bottom: 3px solid #bbb; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} + +table.gtable img{ + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; +} + +table.gtable td { + padding: 0.6em 0.8em 0.6em 0.8em; + background-color: #ddd; + border-bottom: 1px solid #bbb; + border-top: 0px; + overflow: visible; +} + +table.gtable th { + padding: 0.6em 0.8em 0.6em 0.8em; + background-color: #b70101; + color: #FFFFFF; + border: 0px; + border-bottom: 3px solid #920000; +} + +H1 { + margin-bottom: -5px; + text-align: left; + color: #000000; + font-size: 150%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + line-height: 195% + } + + +H2 { + margin-top: 20px; + margin-bottom: 0px; + text-align: left; + color: #000000; + font-size: 120%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + line-height: 150% + } + + + +H3 { + margin-top: 20px; + margin-bottom: -10px; + color: #000000; + font-size: 95%; + line-height: 130%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + width: 100% + } + + +H4 { + margin-top: 15px; + margin-bottom: -10px; + text-align: left; + color: #000000; + font-size: 95%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + width: 100% + } + + + +H5 { + margin-top: 10px; + margin-bottom: -10px; + text-align: left; + color: #000000; + font-size: 95%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + width: 100% + } + + +H6 { + margin-top: 10px; + margin-bottom: -10px; + text-align: left; + color: #000000; + font-size: 95%; + font-weight: bold; + font-family: verdana, geneva, arial, sans-serif; + width: 100% + } + +body { + background-color: #eee; + font-family: Verdana, Arial, Helvetica,sans-serif; +} + +.bgred { + background-color: #B70101; + -moz-border-top-right-radius: 10px; + -webkit-border-top-right-radius: 10px; + border-bottom-right-radius: 10px; + -moz-border-bottom-right-radius: 10px; + -webkit-border-bottom-right-radius: 10px; + margin: 10px 0px; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} + +#copyright { +font-family: Verdana, Arial, Helvetica, sans-serif; +font-size: 75%; + +background: #ddd; +color: #333; +border: 1px solid #bbb; +border-bottom: 3px solid #bbb; +border-top: 0px; + +padding: 5px 10px; +border-radius: 5px; +-moz-border-radius: 5px; +-webkit-border-radius: 5px; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; + + +margin-top: 40px; +} + +#copyright a { + color: #66a; +} + + +.navbodyblack { + font-family: Verdana,Arial, Helvetica, sans-serif; + color: #ffffff; background-color: #B70101; + text-decoration: none; padding-left: 8px; + padding-top: 5px; padding-bottom: 5px; + padding-right: 2px; + font-weight: normal; + margin: 0px; +} + +.navbodyblack a:link { + color:#ffffff; + text-decoration: none; +} + +.navbodyblack a:visited { + color:#ffffff; + text-decoration: none; +} + +.navbodyblack a:hover { + color:#cc9900; + text-decoration: none; +} + +code { + font-size: 120%; +} + +pre { + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + font-size: 120%; + margin: 1em 2em; + + background: #ddd; + color: #333; + border: 1px solid #bbb; + border-bottom: 3px solid #bbb; + border-top: 0px; + border-left: 5px solid #b70101; + padding: 0.5em 1.2em; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} + +blockquote { + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + font-size: 90%; + margin: 1em 2em; + + background: #fff; + color: #333; + border: 1px solid #bbb; + border-bottom: 3px solid #bbb; + border-top: 0px; + border-left: 5px solid #000; + padding: 0.5em 1.2em; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} + +ul.sidebar { + font-size: 80%; + margin: 0.8em; +} +ul.sidebar,ul.sidebar ul { + padding: 0; + border: 0; + color: white; +} +ul.sidebar ul { + margin: 0 0.8em; +} +ul.sidebar a:link, ul.sidebar a:visited, ul.sidebar a:active { + color: white; + text-decoration: none; + display: block; +} +ul.sidebar a:hover, ul.sidebar a:active { + /*text-decoration: underline; + color:#cc9900; */ + color: #b70101; + background-color: white; + transition: all linear 0.2s 0s; + position: relative; + left:5px; + + border-left:5px solid #d41a1a; +} +ul.sidebar li { + font-size: 16px; + margin: 0; + margin-top: .5em; + padding: 2px 4px; + border: 0; + list-style-type: none; +} +ul.sidebar li a{ + position: relative; + top: 0px; + + transition: all linear 0.2s 0s; + + padding: 5px 10px; + border-left:5px solid #d41a1a; + background: #d41a1a; + + -moz-box-shadow: 0px 2px 4px 1px #9E0000; + -webkit-box-shadow: 0px 2px 4px 1px #9E0000; + box-shadow: 0px 2px 4px 1px #9E0000; + + color: white; + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; +} + +ul.sidebar ul li a{ + font-size: 14px; + +} + +ul.sidebar li.spacer { +} + +#tile-wrapper { + margin-left:auto; + margin-right:auto; + padding:5px; +} + +a.tile { + display: inline-block; + position:relative; + width:800px; + text-decoration: none; + + overflow:hidden; + + margin-left:auto; + margin-right:auto; + + font-size:75%; + margin: 10px; + background: #cdcdcd; + padding: 10px; + border-radius: 5px; + border-bottom: 5px solid #aaa; + border-top: 5px solid #ddd; + color: #555; + + -moz-box-shadow: 0px 1px 8px 1px #bbb; + -webkit-box-shadow: 0px 1px 8px 1px #bbb; + box-shadow: 0px 1px 8px 1px #bbb; +} + +a.tile:hover { + top:-5px; + + + -moz-box-shadow: 0px 6px 8px 1px #bbb; + -webkit-box-shadow: 0px 6px 8px 1px #bbb; + box-shadow: 0px 6px 8px 1px #bbb; + +} + + +a.tile p{ + display:inline-block; + width:84%; + height:65px; + float:right; + + text-align: left; + + background: #fff; + color: #000; + padding: 10px; + + font-size: 10px; + + border-color: #fff; + background-color: #fff; + padding: 10px; + margin:5px; + border-radius: 5px; +} + +a.tile img{ + width:75px; + float:left; + + display:inline-block; + + border-color: #fff; + background-color: #fff; + padding: 5px; + margin:5px; + border-radius: 5px; +} + +a.tile h2{ + text-decoration: none; + color:#555; + margin:0px 5px; + font-size: 140%; +} + +#hours { + width: 120px; + height: 92px; + font-size:75%; + margin-left:10px; + float: right; + background: #B70101; + padding: 5px 10px; + border-radius: 5px; + border-bottom: 5px solid #920000; + border-top: 5px solid #d41a1a; + color: #fff; + + -moz-box-shadow: 0px 1px 4px 1px #bbb; + -webkit-box-shadow: 0px 1px 4px 1px #bbb; + box-shadow: 0px 1px 4px 1px #bbb; +} + +#osg_power { + height: 92px;u + margin-left:10px; + float: right; + background: #F29B12; + padding: 5px 10px; + border-radius: 5px; + border-bottom: 5px solid #EF7821; + border-top: 5px solid #FDC10A; + color: #fff; + + -moz-box-shadow: 0px 1px 4px 1px #bbb; + -webkit-box-shadow: 0px 1px 4px 1px #bbb; + box-shadow: 0px 1px 4px 1px #bbb; +} + +#osg_power img { + border-radius: 5px; +} + +p.underconstruction { + border: 1px solid #666; + background-color: #FFA; + padding: 0.1em 0.5em; + margin-left: 2em; + margin-right: 2em; + font-style: italic; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} +.num { + text-align: right; +} +table { + border-collapse: collapse; +} +td,tr { + padding-left: 0.2em; + padding-right: 0.2em; +} + + +/* these two classes control the layout of tables in submit_variations.shtml*/ +table.directory table { +width: 100%; +} + +table.directory td { +width: 50%; +vertical-align: top; +} + +table.triple table { +width: 100%; +} + +table.triple-sub td { +width: 32%; +vertical-align: top; +} + +/* make a list wider spaced */ + +li.spaced { + margin: 10px 0; +} + +code.term { + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + font-size: 122%; + margin: 1em 2em; + + background: #484848; + color: #F0F0F0; + border: 0px solid #303030; + border-bottom: 0px solid #323232; + border-top: 0px; + border-left: 0px solid #323232; + padding: 0.5em 0.5em; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} + +pre.term { + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + font-size: 122%; + margin: 1em 2em; + + background: #484848; + color: #F0F0F0; + border: 0px solid #303030; + border-bottom: 0px solid #323232; + border-top: 0px; + border-left: 0px solid #323232; + padding: 0.5em 0.5em; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; +} + +pre.sub { + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + font-size: 120%; + margin: 1em 2em; + + background: #F0F0F0; + color: #333; + border: 1px solid #bbb; + border-bottom: 3px solid #bbb; + border-top: 0px; + border-left: 5px solid #b70101; + padding: 0.5em 1.2em; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; + } + +pre.file { + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + font-size: 120%; + margin: 1em 2em; + + background: #D8D8D8; + color: #333; + border: 1px solid #bbb; + border-bottom: 3px solid #bbb; + border-top: 0px; + border-left: 5px solid #0D4F8B; + padding: 0.5em 1.2em; + + -moz-box-shadow: 0px 2px 7px 1px #bbb; + -webkit-box-shadow: 0px 2px 7px 1px #bbb; + box-shadow: 0px 2px 7px 1px #bbb; + } + +pre.other { + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + font-size: 120%; + margin: 1em 2em; + + background: #fff; + color: #000; + border: 0px solid #bbb; + border-bottom: 0px solid #bbb; + border-top: 0px; + border-left: 0px solid #0D4F8B; + padding: 0.5em 1.2em; + + -moz-box-shadow: 0px 0px 0px 0px #bbb; + -webkit-box-shadow: 0px 0px 0px 0px #bbb; + box-shadow: 0px 0px 0px 0px #bbb; + } + +/* guide tiles? */ + +blockquote.tile2 { + display: inline-block; + position:relative; + width:800px; + text-decoration: none; + + overflow:hidden; + + margin-left:auto; + margin-right:auto; + + margin: 10px; + background: #fff; + padding: 10px; + border-radius: 5px; + border-bottom: 5px solid #aaa; + border-top: 5px solid #ddd; + color: #555; + + -moz-box-shadow: 0px 1px 8px 1px #bbb; + -webkit-box-shadow: 0px 1px 8px 1px #bbb; + box-shadow: 0px 1px 8px 1px #bbb; +} + + +blockquote.tile2 p{ + display:inline-block; + width:84%; + /*height:65px;*/ + float:right; + + text-align: left; + + background: #fff; + color: #000; + padding: 10px; + + border-color: #fff; + background-color: #fff; + margin:5px; + border-radius: 5px; +} + +blockquote.tile2 ul{ + display:inline-block; + width:84%; + /*height:65px;*/ + float:right; + + text-align: left; + + background: #fff; + color: #000; + padding: 10px; + + border-color: #fff; + background-color: #fff; + margin:5px; + border-radius: 5px; +} + +blockquote.tile2 img{ + width:75px; + float:left; + + display:inline-block; + + border-color: #000; + background-color: #fff; + padding: 5px; + margin:5px; + border-radius: 5px; +} + +blockquote.tile2 h2{ + text-decoration: none; + color:#000; + margin:0px 5px; + font-size: 120%; +}