diff --git a/instructions/convert-jpa-to-spring-data-cosmos.instructions.md b/instructions/convert-jpa-to-spring-data-cosmos.instructions.md index ef7ca56..7481e08 100644 --- a/instructions/convert-jpa-to-spring-data-cosmos.instructions.md +++ b/instructions/convert-jpa-to-spring-data-cosmos.instructions.md @@ -912,38 +912,39 @@ public Set getRelatedEntities() { ### **Top Runtime Issues to Check** 1. **Repository Collection Casting**: - ```java - // Fix any repository methods that return collections: - default List customFindMethod() { - return StreamSupport.stream(this.findAll().spliterator(), false) - .collect(Collectors.toList()); - } - -2. **BigDecimal Compatibility (Java 17+)**: - - ```java + ```java + // Fix any repository methods that return collections: + default List customFindMethod() { + return StreamSupport.stream(this.findAll().spliterator(), false) + .collect(Collectors.toList()); + } + + 2. **BigDecimal Compatibility (Java 17+)**: + + ```java // Replace BigDecimal fields with alternatives: private Double amount; // Or String for high precision - ``` - -3. **Health Check Configuration**: - ```yaml + ``` + + 3. **Health Check Configuration**: + ```yaml # Remove database dependencies from health checks: management: health: readiness: include: 'ping,diskSpace' - ``` - -### **Authentication Conversion Patterns** - -- **Remove `@JsonIgnore` from fields that need Cosmos DB persistence** -- **Store complex objects as simple types** (e.g., authorities as `Set`) -- **Convert between simple and complex types** in service/repository layers - -### **Template/UI Compatibility Patterns** - -- **Add transient properties** with `@JsonIgnore` for UI access to related data -- **Use service layer** to populate transient relationships before rendering -- **Never return repository results directly** to templates without relationship population + ``` + + ### **Authentication Conversion Patterns** + + - **Remove `@JsonIgnore` from fields that need Cosmos DB persistence** + - **Store complex objects as simple types** (e.g., authorities as `Set`) + - **Convert between simple and complex types** in service/repository layers + + ### **Template/UI Compatibility Patterns** + + - **Add transient properties** with `@JsonIgnore` for UI access to related data + - **Use service layer** to populate transient relationships before rendering + - **Never return repository results directly** to templates without relationship population + \ No newline at end of file diff --git a/instructions/devbox-image-definition.instructions.md b/instructions/devbox-image-definition.instructions.md index d1ee13f..4e5b90a 100644 --- a/instructions/devbox-image-definition.instructions.md +++ b/instructions/devbox-image-definition.instructions.md @@ -193,20 +193,20 @@ Run this command in Terminal to apply the customizations on the Dev Box to aid i Run this command in Terminal to list the customization tasks that are available for use with the customization file. This returns a blob of JSON which includes a description of what a task is for and examples of how to use it in the yaml file. Example: -```devbox customizations list-tasks``` - -> [!IMPORTANT] -> [Keeping track of the available customization tasks for use during prompting](#keeping-track-of-the-available-customization-tasks-for-use-during-prompting) and then referring to the contents of the local file can reduce the need to prompt the user to execute this command. - -### Installing WinGet locally for package discovery - -**Recommendation**: Having WinGet CLI on your the Dev Box you're using to author the image definition file can aid in finding correct package IDs for software installations. This is especially helpful when the MCP WinGet task generator requires you to search for package names. This would typically be the case but may depend on the base image used. - -#### How to install WinGet - -Option 1: PowerShell - -```powershell + ```devbox customizations list-tasks``` + + > [!IMPORTANT] + > [Keeping track of the available customization tasks for use during prompting](#keeping-track-of-the-available-customization-tasks-for-use-during-prompting) and then referring to the contents of the local file can reduce the need to prompt the user to execute this command. + + ### Installing WinGet locally for package discovery + + **Recommendation**: Having WinGet CLI on your the Dev Box you're using to author the image definition file can aid in finding correct package IDs for software installations. This is especially helpful when the MCP WinGet task generator requires you to search for package names. This would typically be the case but may depend on the base image used. + + #### How to install WinGet + + Option 1: PowerShell + + ```powershell # Install WinGet via PowerShell $progressPreference = 'silentlyContinue' Invoke-WebRequest -Uri https://aka.ms/getwinget -OutFile Microsoft.DesktopAppInstaller_8wekyb3d8bbwe.msixbundle diff --git a/instructions/dotnet-upgrade.instructions.md b/instructions/dotnet-upgrade.instructions.md index e384dc3..37bd7a1 100644 --- a/instructions/dotnet-upgrade.instructions.md +++ b/instructions/dotnet-upgrade.instructions.md @@ -131,13 +131,13 @@ BlobServiceClient client = new BlobServiceClient(connectionString); 2. Update NuGet packages to versions compatible with the target framework. 3. After upgrading and restoring the latest DLLs, review code for any required changes. 4. Rebuild the project: - ```bash - dotnet build .csproj - ``` + ```bash + dotnet build .csproj + ``` 5. Run unit tests if any: - ```bash - dotnet test - ``` + ```bash + dotnet test + ``` 6. Fix build or runtime issues before proceeding. @@ -168,120 +168,121 @@ After all projects are upgraded: ## 7. Tools & Automation - **.NET Upgrade Assistant**(Optional): - ```bash - dotnet tool install -g upgrade-assistant - upgrade-assistant upgrade .sln``` - -- **Upgrade CI/CD Pipelines**: - When upgrading .NET projects, remember that build pipelines must also reference the correct SDK, NuGet versions, and tasks. - a. Locate pipeline YAML files - - Check common folders such as: - - .azuredevops/ - - .pipelines/ - - Deployment/ - - Root of the repo (*.yml) - -b. Scan for .NET SDK installation tasks - Look for tasks like: - - task: UseDotNet@2 - inputs: - version: - - or - displayName: Use .NET Core sdk - -c. Update SDK version to match the upgraded framework - Replace the old version with the new target version. - Example: - - task: UseDotNet@2 - displayName: Use .NET SDK - inputs: - version: - includePreviewVersions: true # optional, if upgrading to a preview release - -d. Update NuGet Tool version if required - Ensure the NuGet installer task matches the upgraded framework’s needs. - Example: - - task: NuGetToolInstaller@0 - displayName: Use NuGet - inputs: - versionSpec: - checkLatest: true - -e. Validate the pipeline after updates - - Commit changes to a feature branch. - - Trigger a CI build to confirm: - - The YAML is valid. - - The SDK is installed successfully. - - Projects restore, build, and test with the upgraded framework. - ---- - -## 8. Commit Plan -- Always work on the specified branch or branch provided in context, if no branch specified create a new branch (`upgradeNetFramework`). -- Commit after each successful project upgrade. -- If a project fails, rollback to the previous commit and fix incrementally. - - ---- - -## 9. Final Deliverable -- Fully upgraded solution targeting the desired framework version. -- Updated documentation of upgraded dependencies. -- Test results confirming successful build & execution. - ---- - - -## 10. Upgrade Checklist (Per Project) - -Use this table as a sample to track the progress of the upgrade across all projects in the solution and add this in the PullRequest - -| Project Name | Target Framework | Dependencies Updated | Builds Successfully | Tests Passing | Deployment Verified | Notes | -|--------------|------------------|-----------------------|---------------------|---------------|---------------------|-------| -| Project A | ☐ net8.0 | ☐ | ☐ | ☐ | ☐ | | -| Project B | ☐ net8.0 | ☐ | ☐ | ☐ | ☐ | | -| Project C | ☐ net8.0 | ☐ | ☐ | ☐ | ☐ | | - -> ✅ Mark each column as you complete the step for every project. - -## 11. Commit & PR Guidelines - -- Use a **single PR per repository**: - - Title: `Upgrade to .NET [VERSION]` - - Include: - - Updated target frameworks. - - NuGet upgrade summary. - - Provide test results as summarized above. -- Tag with `breaking-change` if APIs were replaced. - -## 12. Multi-Repo Execution (Optional) - -For organizations with multiple repositories: -1. Store this `instructions.md` in a central upgrade template repo. -2. Provide SWE Agent / Cursor with: - ``` + ```bash + dotnet tool install -g upgrade-assistant + upgrade-assistant upgrade .sln``` + + - **Upgrade CI/CD Pipelines**: + When upgrading .NET projects, remember that build pipelines must also reference the correct SDK, NuGet versions, and tasks. + a. Locate pipeline YAML files + - Check common folders such as: + - .azuredevops/ + - .pipelines/ + - Deployment/ + - Root of the repo (*.yml) + + b. Scan for .NET SDK installation tasks + Look for tasks like: + - task: UseDotNet@2 + inputs: + version: + + or + displayName: Use .NET Core sdk + + c. Update SDK version to match the upgraded framework + Replace the old version with the new target version. + Example: + - task: UseDotNet@2 + displayName: Use .NET SDK + inputs: + version: + includePreviewVersions: true # optional, if upgrading to a preview release + + d. Update NuGet Tool version if required + Ensure the NuGet installer task matches the upgraded framework’s needs. + Example: + - task: NuGetToolInstaller@0 + displayName: Use NuGet + inputs: + versionSpec: + checkLatest: true + + e. Validate the pipeline after updates + - Commit changes to a feature branch. + - Trigger a CI build to confirm: + - The YAML is valid. + - The SDK is installed successfully. + - Projects restore, build, and test with the upgraded framework. + + --- + + ## 8. Commit Plan + - Always work on the specified branch or branch provided in context, if no branch specified create a new branch (`upgradeNetFramework`). + - Commit after each successful project upgrade. + - If a project fails, rollback to the previous commit and fix incrementally. + + + --- + + ## 9. Final Deliverable + - Fully upgraded solution targeting the desired framework version. + - Updated documentation of upgraded dependencies. + - Test results confirming successful build & execution. + + --- + + + ## 10. Upgrade Checklist (Per Project) + + Use this table as a sample to track the progress of the upgrade across all projects in the solution and add this in the PullRequest + + | Project Name | Target Framework | Dependencies Updated | Builds Successfully | Tests Passing | Deployment Verified | Notes | + |--------------|------------------|-----------------------|---------------------|---------------|---------------------|-------| + | Project A | ☐ net8.0 | ☐ | ☐ | ☐ | ☐ | | + | Project B | ☐ net8.0 | ☐ | ☐ | ☐ | ☐ | | + | Project C | ☐ net8.0 | ☐ | ☐ | ☐ | ☐ | | + + > ✅ Mark each column as you complete the step for every project. + + ## 11. Commit & PR Guidelines + + - Use a **single PR per repository**: + - Title: `Upgrade to .NET [VERSION]` + - Include: + - Updated target frameworks. + - NuGet upgrade summary. + - Provide test results as summarized above. + - Tag with `breaking-change` if APIs were replaced. + + ## 12. Multi-Repo Execution (Optional) + + For organizations with multiple repositories: + 1. Store this `instructions.md` in a central upgrade template repo. + 2. Provide SWE Agent / Cursor with: + ``` Upgrade all repositories to latest supported .NET versions following instructions.md - ``` -3. Agent should: - - Detect project type per repo. - - Apply the appropriate upgrade path. - - Open PRs for each repo. - - -## 🔑 Notes & Best Practices - -- **Prefer Migration to Modern .NET** - If on .NET Framework or .NET Standard, evaluate moving to .NET 6/8 for long-term support. -- **Automate Tests Early** - CI/CD should block merges if tests fail. -- **Incremental Upgrades** - Large solutions may require upgrading one project at a time. - - ### ✅ Example Agent Prompt - - > Upgrade this repository to the latest supported .NET version following the steps in `dotnet-upgrade-instructions.md`. - > Detect project type (.NET Core, Standard, or Framework) and apply the correct migration path. - > Ensure all tests pass and CI/CD workflows are updated. - ---- + ``` + 3. Agent should: + - Detect project type per repo. + - Apply the appropriate upgrade path. + - Open PRs for each repo. + + + ## 🔑 Notes & Best Practices + + - **Prefer Migration to Modern .NET** + If on .NET Framework or .NET Standard, evaluate moving to .NET 6/8 for long-term support. + - **Automate Tests Early** + CI/CD should block merges if tests fail. + - **Incremental Upgrades** + Large solutions may require upgrading one project at a time. + + ### ✅ Example Agent Prompt + + > Upgrade this repository to the latest supported .NET version following the steps in `dotnet-upgrade-instructions.md`. + > Detect project type (.NET Core, Standard, or Framework) and apply the correct migration path. + > Ensure all tests pass and CI/CD workflows are updated. + + --- + \ No newline at end of file diff --git a/instructions/object-calisthenics.instructions.md b/instructions/object-calisthenics.instructions.md index dcc1ff9..ea76205 100644 --- a/instructions/object-calisthenics.instructions.md +++ b/instructions/object-calisthenics.instructions.md @@ -148,12 +148,12 @@ First Class Collections: a class that contains an array as an attribute should n .Count(); } } - ``` - -5. **One Dot per Line**: - - Limit the number of method calls in a single line to improve readability and maintainability. - - ```csharp + ``` + + 5. **One Dot per Line**: + - Limit the number of method calls in a single line to improve readability and maintainability. + + ```csharp // Bad Example - Multiple dots in a single line public void ProcessOrder(Order order) { var userEmail = order.User.GetEmail().ToUpper().Trim(); @@ -166,13 +166,13 @@ First Class Collections: a class that contains an array as an attribute should n var userEmail = email.ToUpper().Trim(); // Do something with userEmail } - ``` - -6. **Don't abbreviate**: - - Use meaningful names for classes, methods, and variables. - - Avoid abbreviations that can lead to confusion. - - ```csharp + ``` + + 6. **Don't abbreviate**: + - Use meaningful names for classes, methods, and variables. + - Avoid abbreviations that can lead to confusion. + + ```csharp // Bad Example - Abbreviated names public class U { public string N { get; set; } @@ -181,18 +181,18 @@ First Class Collections: a class that contains an array as an attribute should n public class User { public string Name { get; set; } } - ``` - -7. **Keep entities small (Class, method, namespace or package)**: - - Limit the size of classes and methods to improve code readability and maintainability. - - Each class should have a single responsibility and be as small as possible. - - Constraints: - - Maximum 10 methods per class - - Maximum 50 lines per class - - Maximum 10 classes per package or namespace - - ```csharp + ``` + + 7. **Keep entities small (Class, method, namespace or package)**: + - Limit the size of classes and methods to improve code readability and maintainability. + - Each class should have a single responsibility and be as small as possible. + + Constraints: + - Maximum 10 methods per class + - Maximum 50 lines per class + - Maximum 10 classes per package or namespace + + ```csharp // Bad Example - Large class with multiple responsibilities public class UserManager { public void CreateUser(string name) { /*...*/ } @@ -211,15 +211,15 @@ First Class Collections: a class that contains an array as an attribute should n public class UserUpdater { public void UpdateUser(int id, string name) { /*...*/ } } - ``` - - -8. **No Classes with More Than Two Instance Variables**: - - Encourage classes to have a single responsibility by limiting the number of instance variables. - - Limit the number of instance variables to two to maintain simplicity. - - Do not count ILogger or any other logger as instance variable. - - ```csharp + ``` + + + 8. **No Classes with More Than Two Instance Variables**: + - Encourage classes to have a single responsibility by limiting the number of instance variables. + - Limit the number of instance variables to two to maintain simplicity. + - Do not count ILogger or any other logger as instance variable. + + ```csharp // Bad Example - Class with multiple instance variables public class UserCreateCommandHandler { // Bad: Too many instance variables @@ -248,14 +248,14 @@ First Class Collections: a class that contains an array as an attribute should n this.logger = logger; } } - ``` - -9. **No Getters/Setters in Domain Classes**: - - Avoid exposing setters for properties in domain classes. - - Use private constructors and static factory methods for object creation. - - **Note**: This rule applies primarily to domain classes, not DTOs or data transfer objects. - - ```csharp + ``` + + 9. **No Getters/Setters in Domain Classes**: + - Avoid exposing setters for properties in domain classes. + - Use private constructors and static factory methods for object creation. + - **Note**: This rule applies primarily to domain classes, not DTOs or data transfer objects. + + ```csharp // Bad Example - Domain class with public setters public class User { // Domain class public string Name { get; set; } // Avoid this in domain classes @@ -272,31 +272,32 @@ First Class Collections: a class that contains an array as an attribute should n public class UserDto { // DTO - exemption applies public string Name { get; set; } // Acceptable for DTOs } - ``` - -## Implementation Guidelines -- **Domain Classes**: - - Use private constructors and static factory methods for creating instances. - - Avoid exposing setters for properties. - - Apply all 9 rules strictly for business domain code. - -- **Application Layer**: - - Apply these rules to use case handlers and application services. - - Focus on maintaining single responsibility and clean abstractions. - -- **DTOs and Data Objects**: - - Rules 3 (wrapping primitives), 8 (two instance variables), and 9 (no getters/setters) may be relaxed for DTOs. - - Public properties with getters/setters are acceptable for data transfer objects. - -- **Testing**: - - Ensure tests validate the behavior of objects rather than their state. - - Test classes may have relaxed rules for readability and maintainability. - -- **Code Reviews**: - - Enforce these rules during code reviews for domain and application code. - - Be pragmatic about infrastructure and DTO code. - -## References -- [Object Calisthenics - Original 9 Rules by Jeff Bay](https://www.cs.helsinki.fi/u/luontola/tdd-2009/ext/ObjectCalisthenics.pdf) -- [ThoughtWorks - Object Calisthenics](https://www.thoughtworks.com/insights/blog/object-calisthenics) -- [Clean Code: A Handbook of Agile Software Craftsmanship - Robert C. Martin](https://www.oreilly.com/library/view/clean-code-a/9780136083238/) + ``` + + ## Implementation Guidelines + - **Domain Classes**: + - Use private constructors and static factory methods for creating instances. + - Avoid exposing setters for properties. + - Apply all 9 rules strictly for business domain code. + + - **Application Layer**: + - Apply these rules to use case handlers and application services. + - Focus on maintaining single responsibility and clean abstractions. + + - **DTOs and Data Objects**: + - Rules 3 (wrapping primitives), 8 (two instance variables), and 9 (no getters/setters) may be relaxed for DTOs. + - Public properties with getters/setters are acceptable for data transfer objects. + + - **Testing**: + - Ensure tests validate the behavior of objects rather than their state. + - Test classes may have relaxed rules for readability and maintainability. + + - **Code Reviews**: + - Enforce these rules during code reviews for domain and application code. + - Be pragmatic about infrastructure and DTO code. + + ## References + - [Object Calisthenics - Original 9 Rules by Jeff Bay](https://www.cs.helsinki.fi/u/luontola/tdd-2009/ext/ObjectCalisthenics.pdf) + - [ThoughtWorks - Object Calisthenics](https://www.thoughtworks.com/insights/blog/object-calisthenics) + - [Clean Code: A Handbook of Agile Software Craftsmanship - Robert C. Martin](https://www.oreilly.com/library/view/clean-code-a/9780136083238/) + \ No newline at end of file diff --git a/prompts/az-cost-optimize.prompt.md b/prompts/az-cost-optimize.prompt.md index c780f5f..6c37cad 100644 --- a/prompts/az-cost-optimize.prompt.md +++ b/prompts/az-cost-optimize.prompt.md @@ -198,16 +198,16 @@ This workflow analyzes Infrastructure-as-Code (IaC) files and Azure resources to **IaC Files Detected**: [Yes/No - based on file_search results] - ```bash - # If IaC files found: Show IaC modifications + deployment - # File: infrastructure/bicep/modules/app-service.bicep - # Change: sku.name: 'S3' → 'B2' - az deployment group create --resource-group [rg] --template-file infrastructure/bicep/main.bicep - - # If no IaC files: Direct Azure CLI commands + warning - # ⚠️ No IaC files found. If they exist elsewhere, modify those instead. - az appservice plan update --name [plan] --sku B2 - ``` + ```bash + # If IaC files found: Show IaC modifications + deployment + # File: infrastructure/bicep/modules/app-service.bicep + # Change: sku.name: 'S3' → 'B2' + az deployment group create --resource-group [rg] --template-file infrastructure/bicep/main.bicep + + # If no IaC files: Direct Azure CLI commands + warning + # ⚠️ No IaC files found. If they exist elsewhere, modify those instead. + az appservice plan update --name [plan] --sku B2 + ``` ### 📊 Evidence - Current Configuration: [details] @@ -251,12 +251,12 @@ This workflow analyzes Infrastructure-as-Code (IaC) files and Azure resources to ## 🏗️ Current Architecture Overview - ```mermaid - graph TB - subgraph "Resource Group: [name]" - [Generated architecture diagram showing current resources and costs] - end - ``` + ```mermaid + graph TB + subgraph "Resource Group: [name]" + [Generated architecture diagram showing current resources and costs] + end + ``` ## 📋 Implementation Tracking diff --git a/prompts/azure-resource-health-diagnose.prompt.md b/prompts/azure-resource-health-diagnose.prompt.md index d663f4b..30b5926 100644 --- a/prompts/azure-resource-health-diagnose.prompt.md +++ b/prompts/azure-resource-health-diagnose.prompt.md @@ -237,22 +237,22 @@ This workflow analyzes a specific Azure resource to assess its health status, di ## 🛠️ Remediation Plan ### Phase 1: Immediate Actions (0-2 hours) - ```bash - # Critical fixes to restore service - [Azure CLI commands with explanations] - ``` + ```bash + # Critical fixes to restore service + [Azure CLI commands with explanations] + ``` ### Phase 2: Short-term Fixes (2-24 hours) - ```bash - # Performance and reliability improvements - [Azure CLI commands with explanations] - ``` + ```bash + # Performance and reliability improvements + [Azure CLI commands with explanations] + ``` ### Phase 3: Long-term Improvements (1-4 weeks) - ```bash - # Architectural and preventive measures - [Azure CLI commands and configuration changes] - ``` + ```bash + # Architectural and preventive measures + [Azure CLI commands and configuration changes] + ``` ## 📈 Monitoring Recommendations - **Alerts to Configure**: [List of recommended alerts] diff --git a/prompts/cosmosdb-datamodeling.prompt.md b/prompts/cosmosdb-datamodeling.prompt.md index aa0b757..61dab6b 100644 --- a/prompts/cosmosdb-datamodeling.prompt.md +++ b/prompts/cosmosdb-datamodeling.prompt.md @@ -18,6 +18,7 @@ You are an AI pair programming with a USER. Your goal is to help the USER create 🔴 **CRITICAL**: You MUST limit the number of questions you ask at any given time, try to limit it to one question, or AT MOST: three related questions. 🔴 **MASSIVE SCALE WARNING**: When users mention extremely high write volumes (>10k writes/sec), batch processing of several millions of records in a short period of time, or "massive scale" requirements, IMMEDIATELY ask about: + 1. **Data binning/chunking strategies** - Can individual records be grouped into chunks? 2. **Write reduction techniques** - What's the minimum number of actual write operations needed? Do all writes need to be individually processed or can they be batched? 3. **Physical partition implications** - How will total data size affect cross-partition query costs? @@ -143,16 +144,19 @@ For each pair of related containers, ask: When entities have 30-70% access correlation, choose between: **Multi-Document Container (Same Container, Different Document Types):** + - ✅ Use when: Frequent joint queries, related entities, acceptable operational coupling - ✅ Benefits: Single query retrieval, reduced latency, cost savings, transactional consistency - ❌ Drawbacks: Shared throughput, operational coupling, complex indexing **Separate Containers:** + - ✅ Use when: Independent scaling needs, different operational requirements - ✅ Benefits: Clean separation, independent throughput, specialized optimization - ❌ Drawbacks: Cross-partition queries, higher latency, increased cost **Enhanced Decision Criteria:** + - **>70% correlation + bounded size + related operations** → Multi-Document Container - **50-70% correlation** → Analyze operational coupling: - Same backup/restore needs? → Multi-Document Container @@ -187,24 +191,24 @@ Purpose: Step-by-step reasoned final design with complete justifications A JSON representation showing 5-10 representative documents for the container -```json -[ - { - "id": "user_123", - "partitionKey": "user_123", - "type": "user", - "name": "John Doe", - "email": "john@example.com" - }, - { - "id": "order_456", - "partitionKey": "user_123", - "type": "order", - "userId": "user_123", - "amount": 99.99 - } -] -``` + ```json + [ + { + "id": "user_123", + "partitionKey": "user_123", + "type": "user", + "name": "John Doe", + "email": "john@example.com" + }, + { + "id": "order_456", + "partitionKey": "user_123", + "type": "order", + "userId": "user_123", + "amount": 99.99 + } + ] + ``` - **Purpose**: [what this container stores and why this design was chosen] - **Aggregate Boundary**: [what data is grouped together in this container and why] @@ -216,24 +220,28 @@ A JSON representation showing 5-10 representative documents for the container - **Consistency Level**: [Session/Eventual/Strong - with justification] ### Indexing Strategy + - **Indexing Policy**: [Automatic/Manual - with justification] - **Included Paths**: [specific paths that need indexing for query performance] - **Excluded Paths**: [paths excluded to reduce RU consumption and storage] - **Composite Indexes**: [multi-property indexes for ORDER BY and complex filters] - ```json - { - "compositeIndexes": [ - [ - { "path": "/userId", "order": "ascending" }, - { "path": "/timestamp", "order": "descending" } + + ```json + { + "compositeIndexes": [ + [ + { "path": "/userId", "order": "ascending" }, + { "path": "/timestamp", "order": "descending" } + ] ] - ] - } - ``` + } + ``` + - **Access Patterns Served**: [Pattern #2, #5 - specific pattern references] - **RU Impact**: [expected RU consumption and optimization reasoning] ## Access Pattern Mapping + ### Solved Patterns 🔴 CRITICAL: List both writes and reads solved. @@ -246,6 +254,7 @@ A JSON representation showing 5-10 representative documents for the container |---------|-----------|---------------|-------------------|---------------------| ## Hot Partition Analysis + - **MainContainer**: Pattern #1 at 500 RPS distributed across ~10K users = 0.05 RPS per partition ✅ - **Container-2**: Pattern #4 filtering by status could concentrate on "ACTIVE" status - **Mitigation**: Add random suffix to partition key @@ -278,6 +287,7 @@ A JSON representation showing 5-10 representative documents for the container - [ ] Trade-offs explicitly documented and justified ✅ - [ ] Global distribution strategy detailed ✅ - [ ] Cross-referenced against `cosmosdb_requirements.md` for accuracy ✅ + ``` ## Communication Guidelines @@ -587,18 +597,20 @@ Index overhead increases RU costs and storage. It occurs when documents have man When making aggregate design decisions: • Calculate read cost = frequency × RUs per operation -• Calculate write cost = frequency × RUs per operation +• Calculate write cost = frequency × RUs per operation • Total cost = Σ(read costs) + Σ(write costs) • Choose the design with lower total cost Example cost analysis: Option 1 - Denormalized Order+Customer: + - Read cost: 1000 RPS × 1 RU = 1000 RU/s - Write cost: 50 order updates × 5 RU + 10 customer updates × 50 orders × 5 RU = 2750 RU/s - Total: 3750 RU/s Option 2 - Normalized with separate query: + - Read cost: 1000 RPS × (1 RU + 3 RU) = 4000 RU/s - Write cost: 50 order updates × 5 RU + 10 customer updates × 5 RU = 300 RU/s - Total: 4300 RU/s @@ -620,6 +632,7 @@ When facing massive write volumes, **data binning/chunking** can reduce write op **Result**: 90M records → 900k documents (95.7% reduction) **Implementation**: + ```json { "id": "chunk_001", @@ -635,17 +648,20 @@ When facing massive write volumes, **data binning/chunking** can reduce write op ``` **When to Use**: + - Write volumes >10k operations/sec - Individual records are small (<2KB each) - Records are often accessed in groups - Batch processing scenarios **Query Patterns**: + - Single chunk: Point read (1 RU for 100 records) - Multiple chunks: `SELECT * FROM c WHERE STARTSWITH(c.partitionKey, "account_test_")` - RU efficiency: 43 RU per 150KB chunk vs 500 RU for 100 individual reads **Cost Benefits**: + - 95%+ write RU reduction - Massive reduction in physical operations - Better partition distribution @@ -656,6 +672,7 @@ When facing massive write volumes, **data binning/chunking** can reduce write op When multiple entity types are frequently accessed together, group them in the same container using different document types: **User + Recent Orders Example:** + ```json [ { @@ -676,23 +693,27 @@ When multiple entity types are frequently accessed together, group them in the s ``` **Query Patterns:** + - Get user only: Point read with id="user_123", partitionKey="user_123" - Get user + recent orders: `SELECT * FROM c WHERE c.partitionKey = "user_123"` - Get specific order: Point read with id="order_456", partitionKey="user_123" **When to Use:** + - 40-80% access correlation between entities - Entities have natural parent-child relationship - Acceptable operational coupling (throughput, indexing, change feed) - Combined entity queries stay under reasonable RU costs **Benefits:** + - Single query retrieval for related data - Reduced latency and RU cost for joint access patterns - Transactional consistency within partition - Maintains entity normalization (no data duplication) **Trade-offs:** + - Mixed entity types in change feed require filtering - Shared container throughput affects all entity types - Complex indexing policies for different document types @@ -727,6 +748,7 @@ When cost analysis shows: Example analysis: Product + Reviews Aggregate Analysis: + - Access pattern: View product details (no reviews) - 70% - Access pattern: View product with reviews - 30% - Update frequency: Products daily, Reviews hourly @@ -777,6 +799,7 @@ Example: ProductReview container Composite partition keys are useful when data has a natural hierarchy and you need to query it at multiple levels. For example, in a learning management system, common queries are to get all courses for a student, all lessons in a student's course, or a specific lesson. StudentCourseLessons container: + - Partition Key: student_id - Document types with hierarchical IDs: @@ -804,6 +827,7 @@ StudentCourseLessons container: ``` This enables: + - Get all data: `SELECT * FROM c WHERE c.partitionKey = "student_123"` - Get course: `SELECT * FROM c WHERE c.partitionKey = "student_123" AND c.courseId = "course_456"` - Get lesson: Point read with partitionKey="student_123" AND id="lesson_789" @@ -813,6 +837,7 @@ This enables: Composite partition keys are useful to model natural query boundaries. TenantData container: + - Partition Key: tenant_id + "_" + customer_id ```json @@ -831,12 +856,14 @@ Natural because queries are always tenant-scoped and users never query across te Cosmos DB supports rich date/time operations in SQL queries. You can store temporal data using ISO 8601 strings or Unix timestamps. Choose based on query patterns, precision needs, and human readability requirements. Use ISO 8601 strings for: + - Human-readable timestamps - Natural chronological sorting with ORDER BY - Business applications where readability matters - Built-in date functions like DATEPART, DATEDIFF Use numeric timestamps for: + - Compact storage - Mathematical operations on time values - High precision requirements @@ -918,6 +945,7 @@ This pattern ensures uniqueness constraints while maintaining performance within Hierarchical Partition Keys provide natural query boundaries using multiple fields as partition key levels, eliminating synthetic key complexity while optimizing query performance. **Standard Partition Key**: + ```json { "partitionKey": "account_123_test_456_chunk_001" // Synthetic composite @@ -925,6 +953,7 @@ Hierarchical Partition Keys provide natural query boundaries using multiple fiel ``` **Hierarchical Partition Key**: + ```json { "partitionKey": { @@ -936,17 +965,20 @@ Hierarchical Partition Keys provide natural query boundaries using multiple fiel ``` **Query Benefits**: + - Single partition queries: `WHERE accountId = "123" AND testId = "456"` - Prefix queries: `WHERE accountId = "123"` (efficient cross-partition) - Natural hierarchy eliminates synthetic key logic **When to Consider HPK**: + - Data has natural hierarchy (tenant → user → document) - Frequent prefix-based queries - Want to eliminate synthetic partition key complexity -- Apply only for Cosmos NoSQL API +- Apply only for Cosmos NoSQL API **Trade-offs**: + - Requires dedicated tier (not available on serverless) - Newer feature with less production history - Query patterns must align with hierarchy levels @@ -1006,10 +1038,12 @@ Example: Order Processing System • Update pattern: Individual item status updates (100 RPS) Option 1 - Combined aggregate (single document): + - Read cost: 1000 RPS × 1 RU = 1000 RU/s - Write cost: 100 RPS × 10 RU (rewrite entire order) = 1000 RU/s Option 2 - Separate items (multi-document): + - Read cost: 1000 RPS × 5 RU (query multiple items) = 5000 RU/s - Write cost: 100 RPS × 10 RU (update single item) = 1000 RU/s @@ -1036,6 +1070,7 @@ Example: Session tokens with 24-hour expiration ``` Container-level TTL configuration: + ```json { "defaultTtl": -1, // Enable TTL, no default expiration diff --git a/prompts/create-github-action-workflow-specification.prompt.md b/prompts/create-github-action-workflow-specification.prompt.md index f473afa..7621e1f 100644 --- a/prompts/create-github-action-workflow-specification.prompt.md +++ b/prompts/create-github-action-workflow-specification.prompt.md @@ -39,19 +39,19 @@ tags: [process, cicd, github-actions, automation, [domain-specific-tags]] ## Execution Flow Diagram -```mermaid -graph TD - A[Trigger Event] --> B[Job 1] - B --> C[Job 2] - C --> D[Job 3] - D --> E[End] - - B --> F[Parallel Job] - F --> D - - style A fill:#e1f5fe - style E fill:#e8f5e8 -``` + ```mermaid + graph TD + A[Trigger Event] --> B[Job 1] + B --> C[Job 2] + C --> D[Job 3] + D --> E[End] + + B --> F[Parallel Job] + F --> D + + style A fill:#e1f5fe + style E fill:#e8f5e8 + ``` ## Jobs & Dependencies @@ -82,23 +82,23 @@ graph TD ### Inputs -```yaml -# Environment Variables -ENV_VAR_1: string # Purpose: [description] -ENV_VAR_2: secret # Purpose: [description] - -# Repository Triggers -paths: [list of path filters] -branches: [list of branch patterns] -``` + ```yaml + # Environment Variables + ENV_VAR_1: string # Purpose: [description] + ENV_VAR_2: secret # Purpose: [description] + + # Repository Triggers + paths: [list of path filters] + branches: [list of branch patterns] + ``` ### Outputs -```yaml -# Job Outputs -job_1_output: string # Description: [purpose] -build_artifact: file # Description: [content type] -``` + ```yaml + # Job Outputs + job_1_output: string # Description: [purpose] + build_artifact: file # Description: [content type] + ``` ### Secrets & Variables diff --git a/prompts/create-oo-component-documentation.prompt.md b/prompts/create-oo-component-documentation.prompt.md index 8795134..70b107c 100644 --- a/prompts/create-oo-component-documentation.prompt.md +++ b/prompts/create-oo-component-documentation.prompt.md @@ -94,44 +94,44 @@ Include a comprehensive mermaid diagram that shows: - **Data flow** - Direction of dependencies and interactions - **Inheritance/composition** - Class hierarchies and composition relationships -```mermaid -graph TD - subgraph "Component System" - A[Main Component] --> B[Internal Service] - A --> C[Internal Repository] - B --> D[Business Logic] - C --> E[Data Access Layer] - end - - subgraph "External Dependencies" - F[External API] - G[Database] - H[Third-party Library] - I[Configuration Service] - end - - A --> F - E --> G - B --> H - A --> I - - classDiagram - class MainComponent { - +property: Type - +method(): ReturnType - +asyncMethod(): Promise~Type~ - } - class InternalService { - +businessOperation(): Result - } - class ExternalAPI { - <> - +apiCall(): Data - } - - MainComponent --> InternalService - MainComponent --> ExternalAPI -``` + ```mermaid + graph TD + subgraph "Component System" + A[Main Component] --> B[Internal Service] + A --> C[Internal Repository] + B --> D[Business Logic] + C --> E[Data Access Layer] + end + + subgraph "External Dependencies" + F[External API] + G[Database] + H[Third-party Library] + I[Configuration Service] + end + + A --> F + E --> G + B --> H + A --> I + + classDiagram + class MainComponent { + +property: Type + +method(): ReturnType + +asyncMethod(): Promise~Type~ + } + class InternalService { + +businessOperation(): Result + } + class ExternalAPI { + <> + +apiCall(): Data + } + + MainComponent --> InternalService + MainComponent --> ExternalAPI + ``` ## 3. Interface Documentation @@ -154,20 +154,20 @@ graph TD ### Basic Usage -```csharp -// Basic usage example -var component = new ComponentName(); -component.DoSomething(); -``` + ```csharp + // Basic usage example + var component = new ComponentName(); + component.DoSomething(); + ``` ### Advanced Usage -```csharp -// Advanced configuration patterns -var options = new ComponentOptions(); -var component = ComponentFactory.Create(options); -await component.ProcessAsync(data); -``` + ```csharp + // Advanced configuration patterns + var options = new ComponentOptions(); + var component = ComponentFactory.Create(options); + await component.ProcessAsync(data); + ``` - USE-001: Provide basic usage examples - USE-002: Show advanced configuration patterns diff --git a/prompts/java-mcp-server-generator.prompt.md b/prompts/java-mcp-server-generator.prompt.md index 0d2b68d..6449384 100644 --- a/prompts/java-mcp-server-generator.prompt.md +++ b/prompts/java-mcp-server-generator.prompt.md @@ -689,53 +689,53 @@ A Model Context Protocol server built with Java and the official MCP Java SDK. ## Build ### Maven -```bash -mvn clean package -``` + ```bash + mvn clean package + ``` ### Gradle -```bash -./gradlew build -``` + ```bash + ./gradlew build + ``` ## Run ### Maven -```bash -java -jar target/my-mcp-server-1.0.0.jar -``` + ```bash + java -jar target/my-mcp-server-1.0.0.jar + ``` ### Gradle -```bash -./gradlew run -``` + ```bash + ./gradlew run + ``` ## Testing ### Maven -```bash -mvn test -``` + ```bash + mvn test + ``` ### Gradle -```bash -./gradlew test -``` + ```bash + ./gradlew test + ``` ## Integration with Claude Desktop Add to `claude_desktop_config.json`: -```json -{ - "mcpServers": { - "my-mcp-server": { - "command": "java", - "args": ["-jar", "/path/to/my-mcp-server-1.0.0.jar"] + ```json + { + "mcpServers": { + "my-mcp-server": { + "command": "java", + "args": ["-jar", "/path/to/my-mcp-server-1.0.0.jar"] + } + } } - } -} -``` + ``` ## License diff --git a/prompts/php-mcp-server-generator.prompt.md b/prompts/php-mcp-server-generator.prompt.md index 29bcbc7..fe8f748 100644 --- a/prompts/php-mcp-server-generator.prompt.md +++ b/prompts/php-mcp-server-generator.prompt.md @@ -97,36 +97,36 @@ phpstan.neon ## Installation -```bash -composer install -``` + ```bash + composer install + ``` ## Usage ### Start Server (Stdio) -```bash -php server.php -``` + ```bash + php server.php + ``` ### Configure in Claude Desktop -```json -{ - "mcpServers": { - "{project-name}": { - "command": "php", - "args": ["/absolute/path/to/server.php"] + ```json + { + "mcpServers": { + "{project-name}": { + "command": "php", + "args": ["/absolute/path/to/server.php"] + } + } } - } -} -``` + ``` ## Testing -```bash -vendor/bin/phpunit -``` + ```bash + vendor/bin/phpunit + ``` ## Tools @@ -136,9 +136,9 @@ vendor/bin/phpunit Test with MCP Inspector: -```bash -npx @modelcontextprotocol/inspector php server.php -``` + ```bash + npx @modelcontextprotocol/inspector php server.php + ``` ``` ### server.php diff --git a/prompts/postgresql-optimization.prompt.md b/prompts/postgresql-optimization.prompt.md index 4f6341f..d387b38 100644 --- a/prompts/postgresql-optimization.prompt.md +++ b/prompts/postgresql-optimization.prompt.md @@ -365,9 +365,9 @@ SELECT * FROM users WHERE data @> '{"role": "admin"}'; [Improved SQL with explanations] **Recommended Indexes**: -```sql -CREATE INDEX idx_table_column ON table(column); -``` + ```sql + CREATE INDEX idx_table_column ON table(column); + ``` **Performance Impact**: Expected 80% improvement in execution time ``` diff --git a/prompts/ruby-mcp-server-generator.prompt.md b/prompts/ruby-mcp-server-generator.prompt.md index 0dee38d..b4a6e5a 100644 --- a/prompts/ruby-mcp-server-generator.prompt.md +++ b/prompts/ruby-mcp-server-generator.prompt.md @@ -547,9 +547,9 @@ A Model Context Protocol server built with Ruby and the official MCP Ruby SDK. ## Installation -```bash -bundle install -``` + ```bash + bundle install + ``` ## Usage @@ -557,68 +557,68 @@ bundle install Run the server: -```bash -bundle exec bin/mcp-server -``` + ```bash + bundle exec bin/mcp-server + ``` Then send JSON-RPC requests: -```bash -{"jsonrpc":"2.0","id":"1","method":"ping"} -{"jsonrpc":"2.0","id":"2","method":"tools/list"} -{"jsonrpc":"2.0","id":"3","method":"tools/call","params":{"name":"greet","arguments":{"name":"Ruby"}}} -``` + ```bash + {"jsonrpc":"2.0","id":"1","method":"ping"} + {"jsonrpc":"2.0","id":"2","method":"tools/list"} + {"jsonrpc":"2.0","id":"3","method":"tools/call","params":{"name":"greet","arguments":{"name":"Ruby"}}} + ``` ### Rails Integration Add to your Rails controller: -```ruby -class McpController < ApplicationController - def index - server = MyMcpServer::Server.new( - server_context: { user_id: current_user.id } - ) - render json: server.handle_json(request.body.read) - end -end -``` + ```ruby + class McpController < ApplicationController + def index + server = MyMcpServer::Server.new( + server_context: { user_id: current_user.id } + ) + render json: server.handle_json(request.body.read) + end + end + ``` ## Testing Run tests: -```bash -bundle exec rake test -``` + ```bash + bundle exec rake test + ``` Run linter: -```bash -bundle exec rake rubocop -``` + ```bash + bundle exec rake rubocop + ``` Run all checks: -```bash -bundle exec rake -``` + ```bash + bundle exec rake + ``` ## Integration with Claude Desktop Add to `claude_desktop_config.json`: -```json -{ - "mcpServers": { - "my-mcp-server": { - "command": "bundle", - "args": ["exec", "bin/mcp-server"], - "cwd": "/path/to/my-mcp-server" + ```json + { + "mcpServers": { + "my-mcp-server": { + "command": "bundle", + "args": ["exec", "bin/mcp-server"], + "cwd": "/path/to/my-mcp-server" + } + } } - } -} -``` + ``` ## Project Structure diff --git a/prompts/rust-mcp-server-generator.prompt.md b/prompts/rust-mcp-server-generator.prompt.md index c19bc3d..b28ddf4 100644 --- a/prompts/rust-mcp-server-generator.prompt.md +++ b/prompts/rust-mcp-server-generator.prompt.md @@ -101,44 +101,44 @@ Cargo.lock ## Installation -```bash -cargo build --release -``` + ```bash + cargo build --release + ``` ## Usage ### Stdio Transport -```bash -cargo run -``` + ```bash + cargo run + ``` ### SSE Transport -```bash -cargo run --features http -- --transport sse -``` + ```bash + cargo run --features http -- --transport sse + ``` ### HTTP Transport -```bash -cargo run --features http -- --transport http -``` + ```bash + cargo run --features http -- --transport http + ``` ## Configuration Configure in your MCP client (e.g., Claude Desktop): -```json -{ - "mcpServers": { - "{project-name}": { - "command": "path/to/target/release/{project-name}", - "args": [] + ```json + { + "mcpServers": { + "{project-name}": { + "command": "path/to/target/release/{project-name}", + "args": [] + } + } } - } -} -``` + ``` ## Tools @@ -148,15 +148,15 @@ Configure in your MCP client (e.g., Claude Desktop): Run tests: -```bash -cargo test -``` + ```bash + cargo test + ``` Run with logging: -```bash -RUST_LOG=debug cargo run -``` + ```bash + RUST_LOG=debug cargo run + ``` ``` ### src/main.rs diff --git a/prompts/sql-code-review.prompt.md b/prompts/sql-code-review.prompt.md index c35432f..aed4c31 100644 --- a/prompts/sql-code-review.prompt.md +++ b/prompts/sql-code-review.prompt.md @@ -277,14 +277,14 @@ WHERE order_date >= '2024-01-01' **Recommendation**: [Specific fix with code example] **Before**: -```sql --- Problematic SQL -``` + ```sql + -- Problematic SQL + ``` **After**: -```sql --- Improved SQL -``` + ```sql + -- Improved SQL + ``` **Expected Improvement**: [Performance gain, security benefit] ``` diff --git a/prompts/swift-mcp-server-generator.prompt.md b/prompts/swift-mcp-server-generator.prompt.md index b148785..a98b233 100644 --- a/prompts/swift-mcp-server-generator.prompt.md +++ b/prompts/swift-mcp-server-generator.prompt.md @@ -576,29 +576,29 @@ A Model Context Protocol server built with Swift. ## Installation -```bash -swift build -c release -``` + ```bash + swift build -c release + ``` ## Usage Run the server: -```bash -swift run -``` + ```bash + swift run + ``` Or with logging: -```bash -LOG_LEVEL=debug swift run -``` + ```bash + LOG_LEVEL=debug swift run + ``` ## Testing -```bash -swift test -``` + ```bash + swift test + ``` ## Development diff --git a/prompts/update-oo-component-documentation.prompt.md b/prompts/update-oo-component-documentation.prompt.md index 0274dda..74df098 100644 --- a/prompts/update-oo-component-documentation.prompt.md +++ b/prompts/update-oo-component-documentation.prompt.md @@ -103,9 +103,9 @@ Update the mermaid diagram to show current: - **Data flow** - Current direction of dependencies and interactions - **Inheritance/composition** - Current class hierarchies and composition relationships -```mermaid -[Update diagram to reflect current architecture] -``` + ```mermaid + [Update diagram to reflect current architecture] + ``` ## 3. Interface Documentation @@ -128,15 +128,15 @@ Update the mermaid diagram to show current: ### Basic Usage -```csharp -// Update basic usage example to current API -``` + ```csharp + // Update basic usage example to current API + ``` ### Advanced Usage -```csharp -// Update advanced configuration patterns to current implementation -``` + ```csharp + // Update advanced configuration patterns to current implementation + ``` - USE-001: Update basic usage examples - USE-002: Refresh advanced configuration patterns diff --git a/prompts/update-specification.prompt.md b/prompts/update-specification.prompt.md index a623be5..a0e6475 100644 --- a/prompts/update-specification.prompt.md +++ b/prompts/update-specification.prompt.md @@ -111,9 +111,9 @@ tags: [Optional: List of relevant tags or categories, e.g., `infrastructure`, `p ## 9. Examples & Edge Cases -```code -// Code snippet or data example demonstrating the correct application of the guidelines, including edge cases -``` + ```code + // Code snippet or data example demonstrating the correct application of the guidelines, including edge cases + ``` ## 10. Validation Criteria diff --git a/scripts/indent-nested-md-code.js b/scripts/indent-nested-md-code.js new file mode 100644 index 0000000..8dbb239 --- /dev/null +++ b/scripts/indent-nested-md-code.js @@ -0,0 +1,156 @@ +#!/usr/bin/env node +/** + * Indent nested Markdown code fences (``` ... ```) that appear inside other fenced code blocks + * to ensure proper rendering on GitHub. Only modifies .md/.prompt.md/.instructions.md files + * under the specified folders (prompts/, instructions/, collections/). + * + * Strategy: + * - Parse each file line-by-line + * - Detect outer fenced code blocks (up to 3 leading spaces + backticks >= 3) + * - Within an outer block, find any inner lines that also start with a fence marker (```...) + * that are not the true closing line of the outer block (same tick length and no language info), + * and treat them as the start of a nested block + * - Indent the inner block from its opening fence line through its next fence line (closing) + * by prefixing each of those lines with four spaces + * - Repeat for multiple nested "inner blocks" within the same outer block + * + * Notes: + * - We only consider backtick fences (```). Tilde fences (~~~) are uncommon in this repo and not targeted + * - We preserve existing content and whitespace beyond the added indentation for nested fences + */ + +const fs = require('fs'); +const path = require('path'); + +const ROOT = process.cwd(); +const TARGET_DIRS = ['prompts', 'instructions', 'collections']; +const VALID_EXTS = new Set(['.md', '.prompt.md', '.instructions.md']); + +function walk(dir) { + const results = []; + const stack = [dir]; + while (stack.length) { + const current = stack.pop(); + let entries = []; + try { + entries = fs.readdirSync(current, { withFileTypes: true }); + } catch { + continue; + } + for (const ent of entries) { + const full = path.join(current, ent.name); + if (ent.isDirectory()) { + stack.push(full); + } else if (ent.isFile()) { + const ext = getEffectiveExt(ent.name); + if (VALID_EXTS.has(ext)) { + results.push(full); + } + } + } + } + return results; +} + +function getEffectiveExt(filename) { + if (filename.endsWith('.prompt.md')) return '.prompt.md'; + if (filename.endsWith('.instructions.md')) return '.instructions.md'; + return path.extname(filename).toLowerCase(); +} + +// Regex helpers +const fenceLineRe = /^(? {0,3})(?`{3,})(?.*)$/; // up to 3 spaces + ``` + anything + +function processFile(filePath) { + const original = fs.readFileSync(filePath, 'utf8'); + const lines = original.split(/\r?\n/); + + let inOuter = false; + let outerIndent = ''; + let outerTicksLen = 0; + let i = 0; + let changed = false; + + while (i < lines.length) { + const line = lines[i]; + const m = line.match(fenceLineRe); + + if (!inOuter) { + // Look for start of an outer fence + if (m) { + inOuter = true; + outerIndent = m.groups.indent || ''; + outerTicksLen = m.groups.ticks.length; + } + i++; + continue; + } + + // We're inside an outer fence + if (m) { + // Is this the true closing fence for the current outer block? + const indentLen = (m.groups.indent || '').length; + const ticksLen = m.groups.ticks.length; + const restTrim = (m.groups.rest || '').trim(); + const isOuterCloser = indentLen <= outerIndent.length && ticksLen === outerTicksLen && restTrim === ''; + if (isOuterCloser) { + // End of outer block + inOuter = false; + outerIndent = ''; + outerTicksLen = 0; + i++; + continue; + } + + // Otherwise, treat as nested inner fence start; indent until the matching inner fence (inclusive) + changed = true; + const innerTicksLen = ticksLen; + lines[i] = ' ' + lines[i]; + i++; + // Indent lines until we find a fence line with the same tick length (closing the inner block) + while (i < lines.length) { + const innerLine = lines[i]; + const m2 = innerLine.match(fenceLineRe); + lines[i] = ' ' + innerLine; + i++; + if (m2 && m2.groups.ticks.length === innerTicksLen) break; // we've indented the closing inner fence; stop + } + continue; + } + + // Regular line inside outer block + i++; + } + + if (changed) { + fs.writeFileSync(filePath, lines.join('\n')); + return true; + } + return false; +} + +function main() { + const roots = TARGET_DIRS.map(d => path.join(ROOT, d)); + let files = []; + for (const d of roots) { + if (fs.existsSync(d) && fs.statSync(d).isDirectory()) { + files = files.concat(walk(d)); + } + } + + let modified = 0; + for (const f of files) { + try { + if (processFile(f)) modified++; + } catch (err) { + // Log and continue + console.error(`Error processing ${f}:`, err.message); + } + } + + console.log(`Processed ${files.length} files. Modified ${modified} file(s).`); +} + +if (require.main === module) { + main(); +}