{"id":12353,"date":"2025-10-06T16:16:35","date_gmt":"2025-10-06T20:16:35","guid":{"rendered":"https:\/\/labs.icahn.mssm.edu\/minervalab\/?page_id=12353"},"modified":"2025-10-07T11:59:06","modified_gmt":"2025-10-07T15:59:06","slug":"air%c2%b7ms-documentation","status":"publish","type":"page","link":"https:\/\/labs.icahn.mssm.edu\/minervalab\/air%c2%b7ms-documentation\/","title":{"rendered":"AIR\u00b7MS Documentation"},"content":{"rendered":"<p>[et_pb_section bb_built=&#8221;1&#8243; inner_width=&#8221;auto&#8221; inner_max_width=&#8221;1080px&#8221;][et_pb_row _builder_version=&#8221;4.27.4&#8243;][et_pb_column type=&#8221;4_4&#8243; custom_padding__hover=&#8221;|||&#8221; custom_padding=&#8221;|||&#8221;][et_pb_text admin_label=&#8221;Breadcrumb&#8221; _builder_version=&#8221;4.27.4&#8243; _module_preset=&#8221;default&#8221; background_pattern_color=&#8221;rgba(0,0,0,0.2)&#8221; background_mask_color=&#8221;#ffffff&#8221; text_text_shadow_horizontal_length=&#8221;text_text_shadow_style,%91object Object%93&#8243; text_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; text_text_shadow_vertical_length=&#8221;text_text_shadow_style,%91object Object%93&#8243; text_text_shadow_vertical_length_tablet=&#8221;0px&#8221; text_text_shadow_blur_strength=&#8221;text_text_shadow_style,%91object Object%93&#8243; text_text_shadow_blur_strength_tablet=&#8221;1px&#8221; link_text_shadow_horizontal_length=&#8221;link_text_shadow_style,%91object Object%93&#8243; link_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; link_text_shadow_vertical_length=&#8221;link_text_shadow_style,%91object Object%93&#8243; link_text_shadow_vertical_length_tablet=&#8221;0px&#8221; link_text_shadow_blur_strength=&#8221;link_text_shadow_style,%91object Object%93&#8243; link_text_shadow_blur_strength_tablet=&#8221;1px&#8221; ul_text_shadow_horizontal_length=&#8221;ul_text_shadow_style,%91object Object%93&#8243; ul_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; ul_text_shadow_vertical_length=&#8221;ul_text_shadow_style,%91object Object%93&#8243; ul_text_shadow_vertical_length_tablet=&#8221;0px&#8221; ul_text_shadow_blur_strength=&#8221;ul_text_shadow_style,%91object Object%93&#8243; ul_text_shadow_blur_strength_tablet=&#8221;1px&#8221; ol_text_shadow_horizontal_length=&#8221;ol_text_shadow_style,%91object Object%93&#8243; ol_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; ol_text_shadow_vertical_length=&#8221;ol_text_shadow_style,%91object Object%93&#8243; ol_text_shadow_vertical_length_tablet=&#8221;0px&#8221; ol_text_shadow_blur_strength=&#8221;ol_text_shadow_style,%91object Object%93&#8243; ol_text_shadow_blur_strength_tablet=&#8221;1px&#8221; quote_text_shadow_horizontal_length=&#8221;quote_text_shadow_style,%91object Object%93&#8243; quote_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; quote_text_shadow_vertical_length=&#8221;quote_text_shadow_style,%91object Object%93&#8243; quote_text_shadow_vertical_length_tablet=&#8221;0px&#8221; quote_text_shadow_blur_strength=&#8221;quote_text_shadow_style,%91object Object%93&#8243; quote_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_text_shadow_horizontal_length=&#8221;header_text_shadow_style,%91object Object%93&#8243; header_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_text_shadow_vertical_length=&#8221;header_text_shadow_style,%91object Object%93&#8243; header_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_text_shadow_blur_strength=&#8221;header_text_shadow_style,%91object Object%93&#8243; header_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_2_text_shadow_horizontal_length=&#8221;header_2_text_shadow_style,%91object Object%93&#8243; header_2_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_2_text_shadow_vertical_length=&#8221;header_2_text_shadow_style,%91object Object%93&#8243; header_2_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_2_text_shadow_blur_strength=&#8221;header_2_text_shadow_style,%91object Object%93&#8243; header_2_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_3_text_shadow_horizontal_length=&#8221;header_3_text_shadow_style,%91object Object%93&#8243; header_3_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_3_text_shadow_vertical_length=&#8221;header_3_text_shadow_style,%91object Object%93&#8243; header_3_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_3_text_shadow_blur_strength=&#8221;header_3_text_shadow_style,%91object Object%93&#8243; header_3_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_4_text_shadow_horizontal_length=&#8221;header_4_text_shadow_style,%91object Object%93&#8243; header_4_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_4_text_shadow_vertical_length=&#8221;header_4_text_shadow_style,%91object Object%93&#8243; header_4_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_4_text_shadow_blur_strength=&#8221;header_4_text_shadow_style,%91object Object%93&#8243; header_4_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_5_text_shadow_horizontal_length=&#8221;header_5_text_shadow_style,%91object Object%93&#8243; header_5_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_5_text_shadow_vertical_length=&#8221;header_5_text_shadow_style,%91object Object%93&#8243; header_5_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_5_text_shadow_blur_strength=&#8221;header_5_text_shadow_style,%91object Object%93&#8243; header_5_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_6_text_shadow_horizontal_length=&#8221;header_6_text_shadow_style,%91object Object%93&#8243; header_6_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_6_text_shadow_vertical_length=&#8221;header_6_text_shadow_style,%91object Object%93&#8243; header_6_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_6_text_shadow_blur_strength=&#8221;header_6_text_shadow_style,%91object Object%93&#8243; header_6_text_shadow_blur_strength_tablet=&#8221;1px&#8221; box_shadow_horizontal_tablet=&#8221;0px&#8221; box_shadow_vertical_tablet=&#8221;0px&#8221; box_shadow_blur_tablet=&#8221;40px&#8221; box_shadow_spread_tablet=&#8221;0px&#8221; vertical_offset_tablet=&#8221;0&#8243; horizontal_offset_tablet=&#8221;0&#8243; z_index_tablet=&#8221;0&#8243;]<\/p>\n<p><a href=\"https:\/\/labs.icahn.mssm.edu\/minervalab\/scientific-computing-and-data\/\">Scientific Computing and Data<\/a> \/ <a href=\"https:\/\/labs.icahn.mssm.edu\/minervalab\/air-ms-artificial-intelligence-ready-mount-sinai\/\">AIR<strong>\u00b7<\/strong>MS (AI Ready Mount Sinai)<\/a> \/ Documentation<\/p>\n<p>&nbsp;<\/p>\n<p>[\/et_pb_text][et_pb_text _builder_version=&#8221;4.27.4&#8243; background_pattern_color=&#8221;rgba(0,0,0,0.2)&#8221; background_mask_color=&#8221;#ffffff&#8221; text_text_shadow_horizontal_length=&#8221;text_text_shadow_style,%91object Object%93&#8243; text_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; text_text_shadow_vertical_length=&#8221;text_text_shadow_style,%91object Object%93&#8243; text_text_shadow_vertical_length_tablet=&#8221;0px&#8221; text_text_shadow_blur_strength=&#8221;text_text_shadow_style,%91object Object%93&#8243; text_text_shadow_blur_strength_tablet=&#8221;1px&#8221; link_text_shadow_horizontal_length=&#8221;link_text_shadow_style,%91object Object%93&#8243; link_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; link_text_shadow_vertical_length=&#8221;link_text_shadow_style,%91object Object%93&#8243; link_text_shadow_vertical_length_tablet=&#8221;0px&#8221; link_text_shadow_blur_strength=&#8221;link_text_shadow_style,%91object Object%93&#8243; link_text_shadow_blur_strength_tablet=&#8221;1px&#8221; ul_text_shadow_horizontal_length=&#8221;ul_text_shadow_style,%91object Object%93&#8243; ul_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; ul_text_shadow_vertical_length=&#8221;ul_text_shadow_style,%91object Object%93&#8243; ul_text_shadow_vertical_length_tablet=&#8221;0px&#8221; ul_text_shadow_blur_strength=&#8221;ul_text_shadow_style,%91object Object%93&#8243; ul_text_shadow_blur_strength_tablet=&#8221;1px&#8221; ol_text_shadow_horizontal_length=&#8221;ol_text_shadow_style,%91object Object%93&#8243; ol_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; ol_text_shadow_vertical_length=&#8221;ol_text_shadow_style,%91object Object%93&#8243; ol_text_shadow_vertical_length_tablet=&#8221;0px&#8221; ol_text_shadow_blur_strength=&#8221;ol_text_shadow_style,%91object Object%93&#8243; ol_text_shadow_blur_strength_tablet=&#8221;1px&#8221; quote_text_shadow_horizontal_length=&#8221;quote_text_shadow_style,%91object Object%93&#8243; quote_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; quote_text_shadow_vertical_length=&#8221;quote_text_shadow_style,%91object Object%93&#8243; quote_text_shadow_vertical_length_tablet=&#8221;0px&#8221; quote_text_shadow_blur_strength=&#8221;quote_text_shadow_style,%91object Object%93&#8243; quote_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_text_shadow_horizontal_length=&#8221;header_text_shadow_style,%91object Object%93&#8243; header_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_text_shadow_vertical_length=&#8221;header_text_shadow_style,%91object Object%93&#8243; header_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_text_shadow_blur_strength=&#8221;header_text_shadow_style,%91object Object%93&#8243; header_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_2_text_shadow_horizontal_length=&#8221;header_2_text_shadow_style,%91object Object%93&#8243; header_2_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_2_text_shadow_vertical_length=&#8221;header_2_text_shadow_style,%91object Object%93&#8243; header_2_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_2_text_shadow_blur_strength=&#8221;header_2_text_shadow_style,%91object Object%93&#8243; header_2_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_3_text_shadow_horizontal_length=&#8221;header_3_text_shadow_style,%91object Object%93&#8243; header_3_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_3_text_shadow_vertical_length=&#8221;header_3_text_shadow_style,%91object Object%93&#8243; header_3_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_3_text_shadow_blur_strength=&#8221;header_3_text_shadow_style,%91object Object%93&#8243; header_3_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_4_text_shadow_horizontal_length=&#8221;header_4_text_shadow_style,%91object Object%93&#8243; header_4_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_4_text_shadow_vertical_length=&#8221;header_4_text_shadow_style,%91object Object%93&#8243; header_4_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_4_text_shadow_blur_strength=&#8221;header_4_text_shadow_style,%91object Object%93&#8243; header_4_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_5_text_shadow_horizontal_length=&#8221;header_5_text_shadow_style,%91object Object%93&#8243; header_5_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_5_text_shadow_vertical_length=&#8221;header_5_text_shadow_style,%91object Object%93&#8243; header_5_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_5_text_shadow_blur_strength=&#8221;header_5_text_shadow_style,%91object Object%93&#8243; header_5_text_shadow_blur_strength_tablet=&#8221;1px&#8221; header_6_text_shadow_horizontal_length=&#8221;header_6_text_shadow_style,%91object Object%93&#8243; header_6_text_shadow_horizontal_length_tablet=&#8221;0px&#8221; header_6_text_shadow_vertical_length=&#8221;header_6_text_shadow_style,%91object Object%93&#8243; header_6_text_shadow_vertical_length_tablet=&#8221;0px&#8221; header_6_text_shadow_blur_strength=&#8221;header_6_text_shadow_style,%91object Object%93&#8243; header_6_text_shadow_blur_strength_tablet=&#8221;1px&#8221; box_shadow_horizontal_tablet=&#8221;0px&#8221; box_shadow_vertical_tablet=&#8221;0px&#8221; box_shadow_blur_tablet=&#8221;40px&#8221; box_shadow_spread_tablet=&#8221;0px&#8221; vertical_offset_tablet=&#8221;0&#8243; horizontal_offset_tablet=&#8221;0&#8243; z_index_tablet=&#8221;0&#8243;]<\/p>\n<h1 id=\"application-tier\"><strong><span style=\"color: #00aeef\">Application Tier<\/span><\/strong><\/h1>\n<p>The application tier is a foundation that facilitates the execution of applications within the AIR\u00b7MS environment. It streamlines software development by providing infrastructure components that are commonly needed by microservice-based applications:<\/p>\n<ul>\n<li>scalable compute infrastructure for executing application code,<\/li>\n<li>a database for storing application metadata,<\/li>\n<li>an access control mechanism to restrict usage of individual applications.<\/li>\n<\/ul>\n<p>These building blocks ensure that software\u00a0developers have a consistent and reliable foundation to build upon, enhancing efficiency and reducing redundancy.<\/p>\n<p>In terms of user roles within AIR\u00b7MS, the application tier caters to distinct personas with specific needs:<\/p>\n<ul>\n<li><strong>Researchers<\/strong>, who access deployed applications via private endpoints within the Mount Sinai network.<\/li>\n<li><strong>Service Providers<\/strong>, who deploy and manage services integrated with the AIR\u00b7MS environment.<\/li>\n<\/ul>\n<h2>\u00a0<\/h2>\n<h2 id=\"architecture\"><strong><span style=\"color: #00aeef\">Architecture<\/span><\/strong><\/h2>\n<p>The application tier is composed by an execution layer, a data layer, and an application gateway. The diagram below presents the architecture of the application tier.<\/p>\n<p><img decoding=\"async\" class=\"aligncenter\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/app-tier\/architecture.drawio.svg\" alt=\"Service Overview\" \/><\/p>\n<p>\u00a0<\/p>\n<div class=\"prose\">\n<h3 id=\"execution-layer\"><strong><span style=\"color: #00aeef\">Execution Layer<\/span><\/strong><\/h3>\n<p>In adopting a practical and scalable infrastructure strategy, we opt for a microservices architecture containerized through Docker and orchestrated with Kubernetes. This approach enables a flexible deployment model, supporting efficiency and scalability for our applications.<\/p>\n<p>To facilitate our Kubernetes-based approach, the following infrastructure elements play a critical role:<\/p>\n<ul>\n<li><strong>Azure Kubernetes Cluster (AKS):<\/strong>\u00a0Serving as a reliable foundation, AKS manages our containerized microservices, offering scalability and ease of orchestration.<\/li>\n<li><strong>Container registry:<\/strong>\u00a0Essential for version control and efficient distribution, the container registry is employed to store and manage container images in a centralized repository.<\/li>\n<li><strong>Key Vault:<\/strong>\u00a0Prioritizing security, Key Vault securely manages sensitive information, such as API keys and database credentials, ensuring a robust layer of protection for our microservices.<\/li>\n<\/ul>\n<h3>\u00a0<\/h3>\n<h3 id=\"data-layer\"><strong><span style=\"color: #00aeef\">Data Layer<\/span><\/strong><\/h3>\n<p>An\u00a0<strong>Azure Database for PostgreSQL \u2013 Flexible Server<\/strong>\u00a0is used to store application metadata. This includes configuration values, internal application states, and integration-specific metadata.<\/p>\n<p>This managed PostgreSQL service resides in a delegated subnet and is only accessible from within the application tier.<\/p>\n<p><strong>Persistent volumes<\/strong>\u00a0can also be mounted by application containers. This is useful for applications that need to store data in files that persist beyond the lifecycle of a single container instance.<\/p>\n<p>In addition, the AIR\u00b7MS platform integrates with SAP HANA for storing and querying sensitive research data. Although not part of the application tier per se, applications deployed to the tier can securely query SAP HANA:<\/p>\n<ul>\n<li>All communication uses SSL.<\/li>\n<li>Access is controlled via LDAP-authenticated Entra ID group mappings.<\/li>\n<li>Privileges restrict access to specific datasets through Sailpoint-managed roles.<\/li>\n<\/ul>\n<h3>\u00a0<\/h3>\n<h3 id=\"azure-application-gateway\"><strong><span style=\"color: #00aeef\">Azure Application Gateway<\/span><\/strong><\/h3>\n<p>The Azure Application Gateway provides a unified entry point for applications deployed in AIR\u00b7MS. It performs TLS termination and acts as a reverse proxy that routes traffic based on subdomain or path.<\/p>\n<p>Examples:<\/p>\n<ul>\n<li><strong>Path-based routing:<\/strong>\n<ul>\n<li><a href=\"https:\/\/airms.mssm.edu\/visian\">https:\/\/airms.mssm.edu\/visian<\/a><\/li>\n<li><a href=\"https:\/\/airms.mssm.edu\/hello-world\">https:\/\/airms.mssm.edu\/hello-world<\/a><\/li>\n<\/ul>\n<\/li>\n<li><strong>Subdomain-based routing:<\/strong>\n<ul>\n<li><a href=\"https:\/\/d2e.airms.mssm.edu\/\">https:\/\/d2e.airms.mssm.edu<\/a><\/li>\n<\/ul>\n<\/li>\n<\/ul>\n<p>Environment-specific base domains include:<\/p>\n<ul>\n<li><a href=\"https:\/\/airms.mssm.edu\/\">https:\/\/airms.mssm.edu<\/a>\u00a0(Production)<\/li>\n<li><a href=\"https:\/\/airms-staging.mssm.edu\/\">https:\/\/airms-staging.mssm.edu<\/a>\u00a0(Staging)<\/li>\n<li><a href=\"https:\/\/airms-sandbox.mssm.edu\/\">https:\/\/airms-sandbox.mssm.edu<\/a>\u00a0(Sandbox)<\/li>\n<\/ul>\n<p>The different environments are isolated from each other and allow for development and testing without impacting production services:<\/p>\n<ul>\n<li><a href=\"https:\/\/airms-sandbox.mssm.edu\/visian\">https:\/\/airms-sandbox.mssm.edu\/visian<\/a><\/li>\n<li><a href=\"https:\/\/d2e.airms-sandbox.mssm.edu\/\">https:\/\/d2e.airms-sandbox.mssm.edu<\/a><\/li>\n<\/ul>\n<p>The gateway supports both Kubernetes ingress and VM-based services.<\/p>\n<h2>\u00a0<\/h2>\n<h2 id=\"application-access-control\"><strong><span style=\"color: #00aeef\">Application Access Control<\/span><\/strong><\/h2>\n<p>Access to deployed applications is controlled using Microsoft Entra ID:<\/p>\n<ul>\n<li>Each application is registered with Entra ID and assigned a unique client ID.<\/li>\n<li>Entra ID groups define which users can access which applications.<\/li>\n<li>Sailpoint is used for automating and managing group membership.<\/li>\n<\/ul>\n<p>Network-level access is also restricted:<\/p>\n<ul>\n<li>Only AIR\u00b7MS users within the Mount Sinai network can access private application endpoints<\/li>\n<li>Network security groups (NSGs) enforce subnet-level isolation and allow only the authorized traffic<\/li>\n<\/ul>\n<h2>\u00a0<\/h2>\n<h2 id=\"cicd-pipeline\"><strong><span style=\"color: #00aeef\">CI\/CD Pipeline<\/span><\/strong><\/h2>\n<p>The CI\/CD pipeline automates how applications are\u00a0<strong>built, tested, and deployed<\/strong>\u00a0across different environments (Sandbox, Staging, Production). It uses GitHub Actions on Mount Sinai\u2019s own GitHub Enterprise instance and self-hosted GitHub runners to build and deploy applications reliably and securely.<\/p>\n<p>They key responsibilities of the CI\/CD pipeline are to:<\/p>\n<ul>\n<li>Build an application when code changes are made and push it to the container registry<\/li>\n<li>Run tests to ensure it still works<\/li>\n<li>Deploy it to the desired environment<\/li>\n<li>Create databases and access users, when needed<\/li>\n<\/ul>\n<h3>\u00a0<\/h3>\n<h3 id=\"key-actions\"><strong><span style=\"color: #00aeef\">Key Actions<\/span><\/strong><\/h3>\n<p>There are 3 automated workflows available:<\/p>\n<table>\n<thead>\n<tr>\n<th>Action<\/th>\n<th>What it does<\/th>\n<\/tr>\n<\/thead>\n<tbody>\n<tr>\n<td><strong>Build<\/strong><\/td>\n<td>Automatically triggered when code changes. Builds the app, runs tests, and stores it.<\/td>\n<\/tr>\n<tr>\n<td><strong>Deploy<\/strong><\/td>\n<td>Deploys the app to the specified environment (Sandbox, Staging, Production). Triggered automatically on Sandbox.<\/td>\n<\/tr>\n<tr>\n<td><strong>Database<\/strong><\/td>\n<td>Creates or updates a metadata database and access user for an application. Triggered manually.<\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<h3>\u00a0<\/h3>\n<h3 id=\"deployment-secrets\"><strong><span style=\"color: #00aeef\">Deployment Secrets<\/span><\/strong><\/h3>\n<p>To deploy securely, sensitive information like passwords or API keys (called secrets) must be stored safely in Azure Key Vault. These are loaded automatically during deployment.<\/p>\n<p>There are two kinds of configurations:<\/p>\n<ul>\n<li><strong>Secrets:<\/strong>\u00a0Stored in Key Vault, e.g. passwords<\/li>\n<li><strong>Settings:<\/strong>\u00a0Non-sensitive configuration stored in the repository<\/li>\n<\/ul>\n<h3>\u00a0<\/h3>\n<h3 id=\"image-signing\"><strong><span style=\"color: #00aeef\">Image Signing<\/span><\/strong><\/h3>\n<p>All application versions are stored as signed Docker images. This ensures their authenticity and integrity. In order to only allow trusted images to end up in the container registry, Docker consent trust is enabled and used. The CI\/CD pipeline uses a signing key that is granted access to the container registry.<\/p>\n<\/div>\n<section class=\"prose\">\n<hr \/>\n<\/section>\n<p>\u00a0<\/p>\n<h1 id=\"data-quality-dashboard\"><strong><span style=\"color: #00aeef\">Data Quality Dashboard<\/span><\/strong><\/h1>\n<h2 id=\"overview\"><strong><span style=\"color: #00aeef\">Overview<\/span><\/strong><\/h2>\n<p>The Data Quality Dashboard (DQD) on AIR\u00b7MS is implemented with the intention of allowing the data team and the users to understand the quality of the dataset being added to the AIR\u00b7MS database.<\/p>\n<p>The DQD is part of the\u00a0<a href=\"https:\/\/pubmed.ncbi.nlm.nih.gov\/38269952\/\">HADES library in OHDSI<\/a>, and has been modified to run on SAP HANA. It is currently executed by the data administrators on the AIR\u00b7MS platform, as and when needed, due to high resource utilization during the run.<\/p>\n<h2 id=\"quality-checks\"><strong><span style=\"color: #00aeef\">Quality Checks<\/span><\/strong><\/h2>\n<p>The DQD performs a set of data quality checks on the AIR\u00b7MS dataset. It executes the checks systematically, assesses them against a predetermined threshold, and then communicates the results in a straightforward and understandable manner.<\/p>\n<p>The quality checks are organized according to the\u00a0<a href=\"https:\/\/www.ncbi.nlm.nih.gov\/pmc\/articles\/PMC5051581\/\">Kahn framework<\/a>. It employs a system of categories and contexts that stand in for methods for evaluating the quality of data.<\/p>\n<p>More information:\u00a0<a href=\"https:\/\/ohdsi.github.io\/DataQualityDashboard\/\">Official DQD documentation<\/a>.<\/p>\n<p>The DQD consists of 24 types of checks, categorized into Kahn contexts and categories. Moreover, every type of data quality check is categorized as a table check, field check, or concept-level check.<\/p>\n<ul>\n<li>\n<p><strong>Table-level checks<\/strong>\u00a0are assessments of the table as a whole, without focusing on specific fields, or checks that apply across multiple event tables. These checks ensure that the necessary tables exist and that some individuals in the PERSON table have corresponding records in the event tables.<\/p>\n<\/li>\n<li>\n<p><strong>Field-level checks<\/strong>\u00a0pertain to individual fields within a table and are the most common type of check in the current version of DQD. This comprises checks that assess primary key relationships and checks that verify if the concepts in a domain adhere to the specified rules.<\/p>\n<\/li>\n<li>\n<p><strong>Concept-level checks<\/strong>\u00a0pertain to specific concepts (codes).<\/p>\n<\/li>\n<\/ul>\n<p>More information about each type of check:\u00a0<a href=\"https:\/\/ohdsi.github.io\/DataQualityDashboard\/articles\/CheckTypeDescriptions\">test type information in the OHDSI DQD documentation<\/a>.<\/p>\n<p>With a few exceptions, each check collects a set of relevant table rows (for example, all rows in a specific table, or all rows using a specific code) and then verifies if each row satisfies a certain pass\/fail criteria. For example, that the patient ID actually occurs in the PATIENT table, or that a specific code is classified as a preferred code by OHDSI. If the fraction of rows that fail the check is above the check-specific threshold, the check is marked as failed.<\/p>\n<p>The thresholds differ between checks: some fail as soon as a single row fails, others require 5% or more of the rows to fail, indicating that some criteria are considered impossible to fulfill in every single case.<\/p>\n<p>Also, apart from passing or failing, a check can be skipped if no relevant rows to check were found. For example, if a particular table is not used or a specific code does not appear in the data at all. It is not uncommon for 50% or more of all plausibility checks to be skipped. Skipped checks are counted as passed in the summary table and the failure percentage calculated relative to all checks, including skipped ones.<\/p>\n<p>Around 4,000 specific data quality checks are executed against the database and assessed using a predetermined threshold. The outcomes are visualized in a table as shown below:<\/p>\n<p><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/dqd\/data-quality-checks.png\" alt=\"Results of data quality check\" \/><\/p>\n<div class=\"prose\">\n<p>The table organizes the output according to the following main categories:<\/p>\n<ul>\n<li>\n<p><strong>Plausibility<\/strong>: Does the data agree with basic logical and medical expectations? Example: Does the measurement unit provided for a specific lab test make sense. Example: Is it a unit like cm or m for body height?<\/p>\n<\/li>\n<li>\n<p><strong>Conformance<\/strong>: Does the data conform to the OMOP Common Data Model? Example: Is the patient ID given for a diagnosis entry indeed the primary ID of an entry in the PATIENT table?<\/p>\n<\/li>\n<li>\n<p><strong>Completeness<\/strong>: Are all the expected data elements and vocabulary mappings present? Example: Does every medication entry have a standard OHDSI code identifying the medication given?<\/p>\n<\/li>\n<\/ul>\n<p>The DQD user interface provides a complete list of all run checks, including a check description, the fraction of failed rows, and the overall check pass\/fail outcome.<\/p>\n<h2 id=\"application-access\"><strong><span style=\"color: #00aeef\">Application Access<\/span><\/strong><\/h2>\n<p>Currently, only data administrators have access to the results of a DQD run. In the next version of AIR\u00b7MS, we plan to enable researchers to sign in to a researcher portal to view the DQD results, if they wish.<\/p>\n<\/div>\n<section class=\"prose\">\n<hr \/>\n<h1>\u00a0<\/h1>\n<h1 id=\"understanding-azure-machine-learning-and-the-azure-machine-learning-aml-platform\"><span style=\"color: #00aeef\"><strong>Understanding Azure Machine Learning and the Azure Machine Learning (AML) Platform<\/strong><\/span><\/h1>\n<h2 id=\"what-is-azure\"><span style=\"color: #00aeef\"><strong>What is Azure?<\/strong><\/span><\/h2>\n<p>The Azure cloud platform (commonly called Azure) is Microsoft\u2019s public cloud platform. Azure offers a large collection of services, which includes platform as a service (PaaS), infrastructure as a service (IaaS), and managed database service capabilities. It has more than 200 products and cloud services designed to support a wide range of solutions. Azure allows to build, run, and manage applications across multiple clouds, on-premises, and at the edge, with the tools and frameworks of your choice.<\/p>\n<p>Azure relies on virtualization technology. To learn more about virtualization, visit this link with excellent information by Microsoft Learn:\u00a0<a href=\"https:\/\/learn.microsoft.com\/en-us\/azure\/cloud-adoption-framework\/get-started\/what-is-azure\">How does Azure work? [\u2197]<\/a><\/p>\n<h2 id=\"what-is-azure-machine-learning\"><strong><span style=\"color: #00aeef\">What is Azure Machine Learning?<\/span><\/strong><\/h2>\n<p>For you, as a researcher in the AIR\u00b7MS project, the machine learning-related servies of Azure are of particular interest. The machine learning-related services form Azure Machine Learning platform (commonly reffered to as AML). It\u2019s designed to govern the entire machine learning life cycle, so you can train and deploy models without focusing on setup. The platform is suitable for any kind of machine learning, from classical to deep learning, to supervised and unsupervised learning.<\/p>\n<p>With built-in services, like Azure Machine Learning studio, which provides a user-friendly interface, and Automated Machine Learning capabilities that assist you in model selection and training, Azure Machine Learning has tools and features to suit every level of experience.<\/p>\n<h3 id=\"how-to-use-azure-machine-learning\"><strong><span style=\"color: #00aeef\">How to Use Azure Machine Learning?<\/span><\/strong><\/h3>\n<p>Using Azure Machine Learning requires an Azure account and an Azure subscription. As a researcher, please reach out to the AIR\u00b7MS team with your request to use AML. The AIR\u00b7MS team will create the required Azure accounts and enroll your account to appropriate subscription.<\/p>\n<p>Azure Machine Learning manages all the resources you need for the machine learning lifecycle inside a workspace. Workspaces can be shared by multiple users and include things like the computing resources available for your notebooks, training clusters, and pipelines. They are also containers for your data stores and a repository for models. The AIR\u00b7MS at the time of setup, will create a workspace for you.<\/p>\n<p>You can interact with Azure Machine Learning in these ways:<\/p>\n<ul>\n<li>\n<p>In the cloud with the AML user interface.<\/p>\n<\/li>\n<li>\n<p>Through your local machine through the Python software development kit (SDK), REST API, and command line interface (CLI) extension.<\/p>\n<\/li>\n<\/ul>\n<p>Azure Machine Learning enables users familiar with machine learning frameworks to quickly train and deploy models using code, while giving others powerful visual tools. If you prefer low-code or no-code options, you can use Azure Machine Learning studio to quickly train and deploy machine learning models.<\/p>\n<h3 id=\"what-is-azure-machine-learning-studio\"><strong><span style=\"color: #00aeef\">What is Azure Machine Learning Studio?<\/span><\/strong><\/h3>\n<p>Azure Machine Learning studio is a browser-based service that provides no-code and code-first solutions to visually create, train, and manage models through a web UI.<\/p>\n<p>The components of Azure Machine Learning studio are:<\/p>\n<ul>\n<li>\n<p><strong>Jupyter Notebooks<\/strong>: Notebooks provide a collaborative environment for runnable code, visualizations, and comments. Included in studio are sample notebooks you can use to get started with Azure Machine Learning.<\/p>\n<\/li>\n<li>\n<p><strong>AutoML<\/strong>: Automated Machine Learning (AutoML) automates creating the best machine learning models, helping you find the best model for your data \u2013 no matter your data science expertise. Specializing in classification, regression, and time-series forecasting, AutoML experiments with different features, algorithms, and parameters depending on the task, then provides scores on models it thinks are the best fit. You can use AutoML in Azure Machine Learning studio or through the Python SDK.<\/p>\n<\/li>\n<li>\n<p><strong>Designer<\/strong>: If you prefer a no-code option, Azure Machine Learning Designer within the Azure Machine Learning studio gives you a visual canvas with drag and drop controls to manipulate datasets and modules. You can find more information about this option\u00a0<a href=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/automl-designer-about\">here<\/a>.<\/p>\n<\/li>\n<\/ul>\n<p><em>Modules<\/em>\u00a0within Azure Machine Learning Designer are algorithms that can have a range of purposes, from data ingress functions to training, scoring, and validation processes.<\/p>\n<p>If you are looking for scenarios in which AML has been particularly powerful for teams across different companies, visit\u00a0<a href=\"https:\/\/learn.microsoft.com\/en-us\/training\/modules\/intro-to-azure-ml\/4-when-to-use-azure-ml\">When to use Azure Machine Learning [\u2197]<\/a>\u00a0on the Microsoft Learn website.<\/p>\n<\/section>\n<p>\u00a0<\/p>\n<h1 id=\"using-automl-and-aml-designer\"><span style=\"color: #00aeef\"><strong>Using AutoML and AML Designer<\/strong><\/span><\/h1>\n<h2 id=\"about-microsoft-automl\"><span style=\"color: #00aeef\"><strong>About Microsoft AutoML<\/strong><\/span><\/h2>\n<p>Automated machine learning, also referred to as automated ML or AutoML, is the process of automating the time-consuming, iterative tasks of machine learning model development. It allows data scientists, analysts, and developers to build machine learning models with high scale, efficiency, and productivity, all while sustaining model quality. It particularly specializes in classification, regression, and time-series forecasting.<\/p>\n<p>State-of-the-art machine learning\/AI systems consist of complex pipelines with choices of hyperparameters, models, and configuration details that need to be tuned for optimal performance. The resulting optimization space can be too complex and high-dimensional for researchers and engineers to explore manually.<\/p>\n<p>When automated systems are used, the high costs of running a single experiment (for example, training a deep neural network) and the high sample complexity (that is, large number of experiments required) together make na\u00efve approaches impractical. Many of the problems we are interested in can be cast as high-dimensional combinatorial optimization tasks.<\/p>\n<p>Broadly speaking, AutoML tackles these problems by designing probabilistic machine learning models to guide (automated) experimental decisions and meta-learning to reduce the sample complexity and transfer knowledge across related datasets or problems.<\/p>\n<h2 id=\"no-code-ui-or-a-code-based-sdk-for-automl\"><strong><span style=\"color: #00aeef\">No-Code UI or a Code-Based SDK for AutoML<\/span><\/strong><\/h2>\n<h3 id=\"no-code\"><strong><span style=\"color: #00aeef\">No code<\/span><\/strong><\/h3>\n<p>If you prefer a no-code approach, the following tutorial from Microsoft explains the AutoML user interface and its features. You can follow along at your own pace:\u00a0<a href=\"https:\/\/learn.microsoft.com\/en-us\/azure\/machine-learning\/how-to-use-automated-ml-for-ml-models?view=azureml-api-2\">No-code AutoML training for tabular data [\u2197]<\/a>.<\/p>\n<h3 id=\"sdk\"><span style=\"color: #00aeef\"><strong>SDK<\/strong><\/span><\/h3>\n<p>If you\u2019re a code-experienced researcher, you can use AutoML with the Azure Machine Learning Python SDK. Get started with this tutorial from Microsoft:\u00a0<a href=\"https:\/\/learn.microsoft.com\/en-us\/azure\/machine-learning\/tutorial-auto-train-image-models?view=azureml-api-2&amp;tabs=cli\">Train an object detection model (preview) with AutoML and Python [\u2197]<\/a>.<\/p>\n<h2 id=\"azure-machine-learning-designer\"><strong><span style=\"color: #00aeef\">Azure Machine Learning designer<\/span><\/strong><\/h2>\n<p>Azure Machine Learning designer is a drag-and-drop interface used to train and deploy models in Azure Machine Learning. It allows you to use a visual canvas to build an end-to-end machine learning workflow. Train, test, and deploy models in the designer:<\/p>\n<ul>\n<li>\n<p>Drag and drop data assets and components onto the canvas.<\/p>\n<\/li>\n<li>\n<p>Connect the components to create a pipeline draft.<\/p>\n<\/li>\n<li>\n<p>Submit a pipeline run using the compute resources in your Azure Machine Learning workspace.<\/p>\n<\/li>\n<li>\n<p>Convert your training pipelines to inference pipelines.<\/p>\n<\/li>\n<li>\n<p>Publish your pipelines to a REST pipeline endpoint to submit a new pipeline that runs with different parameters and data assets.<\/p>\n<ul>\n<li>\n<p>Publish a training pipeline to reuse a single pipeline to train multiple models while changing parameters and data assets.<\/p>\n<\/li>\n<li>\n<p>Publish a batch inference pipeline to make predictions on new data by using a previously trained model.<\/p>\n<\/li>\n<\/ul>\n<\/li>\n<li>\n<p>Deploy a real-time inference pipeline to an online endpoint to make predictions on new data in real time.<\/p>\n<\/li>\n<\/ul>\n<h2 id=\"core-concepts\"><span style=\"color: #00aeef\"><strong>Core Concepts<\/strong><\/span><\/h2>\n<ul>\n<li>\n<p><strong>Pipeline<\/strong>: A pipeline consists of data assets and analytical components, which you connect. Pipelines have many uses: You can make a pipeline that trains a single model, or one that trains multiple models. You can create a pipeline that makes predictions in real time or in batch, or make a pipeline that only cleans data. Pipelines let you reuse your work and organize your projects.<\/p>\n<\/li>\n<li>\n<p><strong>Data<\/strong>: A machine learning data asset makes it easy to access and work with your data. Several\u00a0<a href=\"https:\/\/learn.microsoft.com\/en-us\/azure\/machine-learning\/samples-designer?view=azureml-api-1#datasets\">sample data assets [\u2197]<\/a>\u00a0are included in the designer for you to experiment with. You can\u00a0<a href=\"https:\/\/learn.microsoft.com\/en-us\/azure\/machine-learning\/how-to-create-register-datasets?view=azureml-api-1\">register [\u2197]<\/a>\u00a0additional data assets as you need them.<\/p>\n<\/li>\n<li>\n<p><strong>Component<\/strong>: A component is an algorithm that you can perform on your data. The designer has several components ranging from data ingress functions to training, scoring, and validation processes.<\/p>\n<\/li>\n<\/ul>\n<p>To learn more:\u00a0<a href=\"https:\/\/learn.microsoft.com\/en-us\/azure\/machine-learning\/tutorial-designer-automobile-price-train-score?view=azureml-api-1\">Tutorial: Designer &#8211; train a no-code regression model [\u2197]<\/a><\/p>\n<h1>\u00a0<\/h1>\n<h1 id=\"visian---the-image-annotation-tool\"><span style=\"color: #00aeef\"><strong>VISIAN &#8211; the Image Annotation Tool<\/strong><\/span><\/h1>\n<p>VISIAN is a web-based editor to annotate medical images. It allows you to view medical images in 2D and 3D while changing the viewing orientation or adjusting parameters such as contrast and brightness. VISIAN is equipped with annotation features including brush and outline tools, as well as smart brushes that significantly speed up the segmentation process. Moreover, VISIAN supports multiple annotation layers, enabling detailed analysis and preparation for medical research and diagnostics.<\/p>\n<ol>\n<li><a href=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/#application-access\">Application Access<\/a><\/li>\n<li><a href=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/#working-with-visian\">Working with VISIAN<\/a><\/li>\n<li><a href=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/#managing-project-data\">Managing Project Data<\/a><\/li>\n<\/ol>\n<h2 id=\"accessing-visian\">\u00a0 <strong><span style=\"color: #00aeef\">Accessing VISIAN<\/span><\/strong><\/h2>\n<p>The latest release of VISIAN, integrated for the Bowel Segmentation use case, makes use of the AIR\u00b7MS backend (Azure) for storing medical images and annotations. When annotators use VISIAN, both the medical images and the created annotations are loaded and saved in storage in AIR\u00b7MS. No data is managed outside of Mount Sinai\u2019s infrastructure.<\/p>\n<p>You can access VISIAN from within the Mount Sinai network or using VPN. In order to access the application, approved annotators of the project need to be assigned a specific role. For this purpose, please send us a request per email using our\u00a0<a href=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/contact\">contact<\/a>\u00a0information. Once this role is assigned to an annotator, the principal investigator of can add them to the project, using their email address. Afterwards, annotators can authenticate using Mount Sinai\u2019s single sign-on.<\/p>\n<p>URL:\u00a0<a href=\"https:\/\/airms.mssm.edu\/visian\/\">https:\/\/airms.mssm.edu\/visian\/<\/a><\/p>\n<h2 id=\"working-with-visian\"><strong><span style=\"color: #00aeef\">Working with VISIAN<\/span><\/strong><\/h2>\n<p>When you open VISIAN, you see a list of projects where your are collaborating. To open a project, click its title.<\/p>\n<p>\u00a0<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/visian-proj-1.png\" alt=\"VISIAN\u2019s projects screen\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<p>Once within a project, the image studies of that project are shown. Projects in VISIAN have\u00a0<em>Principal Investigators<\/em>\u00a0(PIs) and\u00a0<em>Annotators<\/em>. Each of these have different views. Principal investigators can see all the studies and the annotations created by the Annotators of that project. In contrast, Annotators can only see the studies assigned to them.<\/p>\n<p>Image studies have a status:<\/p>\n<ul>\n<li><strong>No Annotation<\/strong><\/li>\n<li><strong>In Progress<\/strong><\/li>\n<li><strong>Completed<\/strong><\/li>\n<\/ul>\n<p>\u00a0<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/visian-proj-2.png\" alt=\"VISIAN\u2019s study list\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<p>Upon choosing a study, the study is loaded in the browser, and Annotators can create their annotation:<\/p>\n<p>\u00a0<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/visian-proj-3.png\" alt=\"VISIAN\u2019s image loading progress bar\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<p>Annotators can save their work by clicking the\u00a0<strong>Save<\/strong>\u00a0button.<\/p>\n<p>\u00a0<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/visian-proj-4.png\" alt=\"VISIAN creation of an annotation and saving\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<p>An annotation can be saved as\u00a0<strong>In Progress<\/strong>\u00a0or\u00a0<strong>Completed<\/strong>. Principal investigators can see the status of all annotations in their projects.<\/p>\n<p>\u00a0<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/visian-proj-5.png\" alt=\"VISIAN\u2019s save statuses\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<p>To open any supplementary image series while adding an annotation, click the clip icon.<\/p>\n<p>\u00a0<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/visian-proj-6.png\" alt=\"VISIAN\u2019s supplementary annotation\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<h2 id=\"managing-project-data\"><strong><span style=\"color: #00aeef\">Managing Project Data<\/span><\/strong><\/h2>\n<p>PIs have special file management privileges to upload image studies and to retrieve the created annotations. For this purpose,\u00a0<a href=\"https:\/\/azure.microsoft.com\/en-us\/products\/storage\/storage-explorer\">Azure Storage Explorer<\/a>\u00a0(ASE) must be used. In order to upload image studies to the project, go through the following steps.<\/p>\n<ol>\n<li>\n<p>After installing Azure Storage Explorer, open it and click\u00a0<strong>Sign in with Azure<\/strong>:<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-1.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<\/li>\n<li>\n<p>Select\u00a0<strong>Azure<\/strong>\u00a0as environment to sign in:<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-2.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<\/li>\n<li>\n<p>You are redirected to the browser:<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-3.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<\/li>\n<li>\n<p>In the browser, enter your Mount Sinai email address (<code>@mssm.edu<\/code>\u00a0or\u00a0<code>@mountsinai.org<\/code>):<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-4.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<\/li>\n<li>\n<p>Back in ASE, click the subscription where the Bowel Segmentation project is stored:<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-5.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<\/li>\n<li>\n<p>In the panel on the left, navigate to the storage container called\u00a0<strong>bowelseg<\/strong>, by clicking\u00a0<strong>Storage Accounts &gt; imgannostorage &gt; Blob Containers &gt; bowelseg<\/strong>:<\/p>\n<\/li>\n<\/ol>\n<p>\u00a0<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-6.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<ol start=\"7\">\n<li>\n<p>Within this storage container, the system has two subfolders: an\u00a0<strong>input<\/strong>\u00a0and an\u00a0<strong>output<\/strong>\u00a0subfolder. As the names indicate, these are for placing the input image studies, and where the annotations for these studies will be found later (respectively):<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-7.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<\/li>\n<li>\n<p>Within the\u00a0<strong>input<\/strong>\u00a0folder, subfolders for each of the image studies can be created. Below, image studies from a few subjects are shown:<\/p>\n<\/li>\n<\/ol>\n<p>\u00a0<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-8.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<ol start=\"9\">\n<li>Within each subject\u2019s folder, the system expects exactly two subfolders: one with the image series to be annotated, called\u00a0<strong><code>ABD-PELVIS_AX HASTE T2 Long TE_COMPOSED<\/code><\/strong>, and another one with the image series that is supplementary, called\u00a0<strong><code>ABD-PELVIS_COR T2 HASTE_ MBH_COMP_AD<\/code><\/strong>:<\/li>\n<\/ol>\n<p>\u00a0<\/p>\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-9.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<p>\u00a0<\/p>\n<ol start=\"10\">\n<li>The actual\u00a0<strong>.dcm<\/strong>\u00a0files should be copied into each of those two subfolders (e.g. through drag-and-drop from your Finder, on macOS):\n<div class=\"image-popover\">\n<figure><img decoding=\"async\" src=\"https:\/\/github.mountsinai.org\/pages\/AIRMS\/documentation\/visian\/ase-10.png\" alt=\"VISIAN\u2019s data management by the PI\" \/><\/figure>\n<\/div>\n<\/li>\n<\/ol>\n<p>\u00a0<\/p>\n<p>[\/et_pb_text][\/et_pb_column][\/et_pb_row][\/et_pb_section]<\/p>\n","protected":false},"excerpt":{"rendered":"<p>Scientific Computing and Data \/ AIR\u00b7MS (AI Ready Mount Sinai) \/ Documentation &nbsp; Application TierThe application tier is a foundation that facilitates the execution of applications within the AIR\u00b7MS environment. It streamlines software development by providing infrastructure components that are commonly needed by microservice-based applications:scalable compute infrastructure for executing application code,a database for storing application [&hellip;]<\/p>\n","protected":false},"author":700,"featured_media":0,"parent":0,"menu_order":0,"comment_status":"closed","ping_status":"closed","template":"","meta":{"_et_pb_use_builder":"on","_et_pb_old_content":"<p>[et_pb_section bb_built=\"1\" inner_width=\"auto\" inner_max_width=\"1080px\"][et_pb_row _builder_version=\"4.27.4\"][\/et_pb_row][\/et_pb_section]<\/p>","_et_gb_content_width":"","footnotes":""},"class_list":["post-12353","page","type-page","status-publish","hentry"],"aioseo_notices":[],"_links":{"self":[{"href":"https:\/\/labs.icahn.mssm.edu\/minervalab\/wp-json\/wp\/v2\/pages\/12353","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/labs.icahn.mssm.edu\/minervalab\/wp-json\/wp\/v2\/pages"}],"about":[{"href":"https:\/\/labs.icahn.mssm.edu\/minervalab\/wp-json\/wp\/v2\/types\/page"}],"author":[{"embeddable":true,"href":"https:\/\/labs.icahn.mssm.edu\/minervalab\/wp-json\/wp\/v2\/users\/700"}],"replies":[{"embeddable":true,"href":"https:\/\/labs.icahn.mssm.edu\/minervalab\/wp-json\/wp\/v2\/comments?post=12353"}],"version-history":[{"count":12,"href":"https:\/\/labs.icahn.mssm.edu\/minervalab\/wp-json\/wp\/v2\/pages\/12353\/revisions"}],"predecessor-version":[{"id":12423,"href":"https:\/\/labs.icahn.mssm.edu\/minervalab\/wp-json\/wp\/v2\/pages\/12353\/revisions\/12423"}],"wp:attachment":[{"href":"https:\/\/labs.icahn.mssm.edu\/minervalab\/wp-json\/wp\/v2\/media?parent=12353"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}