diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
new file mode 100644
index 0000000..77bc1f0
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.md
@@ -0,0 +1,35 @@
+---
+name: Bug Report
+about: Create a report to help us improve Google Workspace MCP
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**Startup Logs**
+Include the startup output including everything from the Active Configuration section to "Uvicorn running"
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Platform (please complete the following information):**
+ - OS: [e.g. macOS, Ubuntu, Windows]
+- Container: [if applicable, e.g. Docker)
+ - Version [e.g. v1.2.0]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..bbcbbe7
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.gitignore b/.gitignore
index 59278b6..c73adf2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,8 @@
 __pycache__/
 *.py[cod]
 *.so
+.mcp.json
+claude.md
 
 # ---- Packaging ---------------------------------------------------------
 *.egg-info/
@@ -22,4 +24,8 @@ venv/
 client_secret.json
 
 # ---- Logs --------------------------------------------------------------
-mcp_server_debug.log
\ No newline at end of file
+mcp_server_debug.log
+
+# ---- Local development files -------------------------------------------
+/.credentials
+/.claude
diff --git a/Dockerfile b/Dockerfile
index 9ea191d..ea9ef4e 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -10,24 +10,14 @@ RUN apt-get update && apt-get install -y \
 # Install uv for faster dependency management
 RUN pip install --no-cache-dir uv
 
-# Copy dependency files
-COPY pyproject.toml uv.lock ./
+# Copy all application code first (needed for uv sync to work with local package)
+COPY . .
 
-# Install Python dependencies from pyproject.toml
-RUN uv pip install --system --no-cache \
-    fastapi>=0.115.12 \
-    fastmcp>=2.3.3 \
-    google-api-python-client>=2.168.0 \
-    google-auth-httplib2>=0.2.0 \
-    google-auth-oauthlib>=1.2.2 \
-    httpx>=0.28.1 \
-    "mcp[cli]>=1.6.0" \
-    sse-starlette>=2.3.3 \
-    uvicorn>=0.34.2 \
-    pyjwt>=2.10.1
+# Install Python dependencies using uv sync
+RUN uv sync --frozen --no-dev
 
-# Copy application code
-COPY . .
+# Activate virtual environment for all subsequent commands
+ENV PATH="/app/.venv/bin:$PATH"
 
 # Create placeholder client_secrets.json for lazy loading capability
 RUN echo '{"installed":{"client_id":"placeholder","client_secret":"placeholder","auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://oauth2.googleapis.com/token","redirect_uris":["http://localhost:8000/oauth2callback"]}}' > /app/client_secrets.json
@@ -68,5 +58,5 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
 RUN echo "=== Debug: Final startup test ===" && \
     python -c "print('Testing main.py import...'); import main; print('Main.py import successful')"
 
-# Command to run the application
-CMD ["python", "main.py", "--transport", "streamable-http"]
+# Command to run the application (use uv run to ensure virtual environment)
+CMD ["uv", "run", "main.py", "--transport", "streamable-http"]
diff --git a/README.md b/README.md
index ac9c56a..5d4b08c 100644
--- a/README.md
+++ b/README.md
@@ -7,13 +7,12 @@
 [![PyPI](https://img.shields.io/pypi/v/workspace-mcp.svg)](https://pypi.org/project/workspace-mcp/)
 [![PyPI Downloads](https://static.pepy.tech/badge/workspace-mcp/month)](https://pepy.tech/projects/workspace-mcp)
 [![Website](https://img.shields.io/badge/Website-workspacemcp.com-green.svg)](https://workspacemcp.com)
-[![Verified on MseeP](https://mseep.ai/badge.svg)](https://mseep.ai/app/eebbc4a6-0f8c-41b2-ace8-038e5516dba0)
 
-**The most feature-complete Google Workspace MCP server**, now with multi-user support and 1-click Claude installation
+**The most feature-complete Google Workspace MCP server**, now with Remote OAuth2.1 multi-user support and 1-click Claude installation.
 
 *Full natural language control over Google Calendar, Drive, Gmail, Docs, Sheets, Slides, Forms, Tasks, and Chat through all MCP clients, AI assistants and developer tools.*
 
-###### Support for all free Google accounts (Gmail, Docs, Drive etc) & Google Workspace plans (Starter, Standard, Plus, Enterprise, Non Profit etc) with their expanded app options like Chat & Spaces.
+###### Support for all free Google accounts (Gmail, Docs, Drive etc) & Google Workspace plans (Starter, Standard, Plus, Enterprise, Non Profit etc) with expanded app options like Chat & Spaces.
 
 </div>
 
@@ -42,22 +41,24 @@
 
 **This README was written with AI assistance, and here's why that matters**
 >
-> As a solo dev building open source tools that many never see outside use, comprehensive documentation often wouldn't happen without AI help. Using agentic dev tools like **Roo** & **Claude Code** that understand the entire codebase, AI doesn't just regurgitate generic content - it extracts real implementation details and creates accurate, specific documentation.
+> As a solo dev building open source tools, comprehensive documentation often wouldn't happen without AI help. Using agentic dev tools like **Roo** & **Claude Code** that understand the entire codebase, AI doesn't just regurgitate generic content - it extracts real implementation details and creates accurate, specific documentation.
 >
-> In this case, Sonnet 4 took a pass & a human (me) verified them 7/10/25.
+> In this case, Sonnet 4 took a pass & a human (me) verified them 8/9/25.
 </details>
 
 ## Overview
 
 A production-ready MCP server that integrates all major Google Workspace services with AI assistants. It supports both single-user operation and multi-user authentication via OAuth 2.1, making it a powerful backend for custom applications. Built with FastMCP for optimal performance, featuring advanced authentication handling, service caching, and streamlined development patterns.
 
+**Simplified Setup**: Now uses Google Desktop OAuth clients - no redirect URIs or port configuration needed!
+
 ## Features
 
 - **🔐 Advanced OAuth 2.0 & OAuth 2.1**: Secure authentication with automatic token refresh, transport-aware callback handling, session management, centralized scope management, and OAuth 2.1 bearer token support for multi-user environments with innovative CORS proxy architecture
 - **📅 Google Calendar**: Full calendar management with event CRUD operations
 - **📁 Google Drive**: File operations with native Microsoft Office format support (.docx, .xlsx)
 - **📧 Gmail**: Complete email management with search, send, and draft capabilities
-- **📄 Google Docs**: Document operations including content extraction, creation, and comment management
+- **📄 Google Docs**: Complete document management including content extraction, creation, full editing capabilities, and comment management
 - **📊 Google Sheets**: Comprehensive spreadsheet management with flexible cell operations and comment management
 - **🖼️ Google Slides**: Presentation management with slide creation, updates, content manipulation, and comment management
 - **📝 Google Forms**: Form creation, retrieval, publish settings, and response management
@@ -66,7 +67,7 @@ A production-ready MCP server that integrates all major Google Workspace service
 - **🔍 Google Custom Search**: Programmable Search Engine (PSE) integration for custom web searches
 - **🔄 All Transports**: Stdio, Streamable HTTP & SSE, OpenAPI compatibility via `mcpo`
 - **⚡ High Performance**: Service caching, thread-safe sessions, FastMCP integration
-- **🧩 Developer Friendly**: Minimal boilerplate, automatic service injection, centralized configuration
+- **Developer Friendly**: Minimal boilerplate, automatic service injection, centralized configuration
 
 ---
 
@@ -114,12 +115,12 @@ Claude Desktop stores these securely in the OS keychain; set them once in the ex
 ### Configuration
 
 1. **Google Cloud Setup**:
-   - Create OAuth 2.0 credentials (web application) in [Google Cloud Console](https://console.cloud.google.com/)
+   - Create OAuth 2.0 credentials in [Google Cloud Console](https://console.cloud.google.com/)
    - Create a new project (or use an existing one) for your MCP server.
    - Navigate to APIs & Services → Credentials.
    - Click Create Credentials → OAuth Client ID.
-   - Choose Web Application as the application type.
-   - Add redirect URI: `http://localhost:8000/oauth2callback`
+   - **Choose Desktop Application as the application type** (simpler setup, no redirect URIs needed!)
+   - Download your credentials and note the Client ID and Client Secret
 
    - **Enable APIs**:
    - In the Google Cloud Console, go to APIs & Services → Library.
@@ -149,7 +150,7 @@ Claude Desktop stores these securely in the OS keychain; set them once in the ex
      ```bash
      export GOOGLE_OAUTH_CLIENT_ID="your-client-id.apps.googleusercontent.com"
      export GOOGLE_OAUTH_CLIENT_SECRET="your-client-secret"
-     export GOOGLE_OAUTH_REDIRECT_URI="http://localhost:8000/oauth2callback"  # Optional
+     export GOOGLE_OAUTH_REDIRECT_URI="http://localhost:8000/oauth2callback"  # Optional - see Reverse Proxy Setup below
      ```
 
      **Option B: File-based (Traditional)**
@@ -188,10 +189,10 @@ Claude Desktop stores these securely in the OS keychain; set them once in the ex
 
 3. **Server Configuration**:
    The server's base URL and port can be customized using environment variables:
-   - `WORKSPACE_MCP_BASE_URI`: Sets the base URI for the server (default: http://localhost). This affects the `server_url` used to construct the default `OAUTH_REDIRECT_URI` if `GOOGLE_OAUTH_REDIRECT_URI` is not set.
-   - `WORKSPACE_MCP_PORT`: Sets the port the server listens on (default: 8000). This affects the server_url, port, and OAUTH_REDIRECT_URI.
+   - `WORKSPACE_MCP_BASE_URI`: Sets the base URI for the server (default: http://localhost). Note: do not include a port in `WORKSPACE_MCP_BASE_URI` - set that with the variable below.
+   - `WORKSPACE_MCP_PORT`: Sets the port the server listens on (default: 8000).
+   - `GOOGLE_OAUTH_REDIRECT_URI`: Override the OAuth redirect URI (useful for reverse proxy setups - see below).
    - `USER_GOOGLE_EMAIL`: Optional default email for authentication flows. If set, the LLM won't need to specify your email when calling `start_google_auth`.
-   - `GOOGLE_OAUTH_REDIRECT_URI`: Sets an override for OAuth redirect specifically, must include a full address (i.e. include port if necessary). Use this if you want to run your OAuth redirect separately from the MCP. This is not recommended outside of very specific cases
 
 ### Google Custom Search Setup
 
@@ -278,6 +279,24 @@ This architecture enables any OAuth 2.1 compliant client to authenticate users t
 
 </details>
 
+**MCP Inspector**: No additional configuration needed with desktop OAuth client.
+
+**Claude Code Inspector**: No additional configuration needed with desktop OAuth client.
+
+### VS Code MCP Client Support
+**VS Code mcp.json Configuration Example**:
+
+```json
+{
+    "servers": {
+        "google-workspace": {
+            "url": "http://localhost:8000/mcp/",
+            "type": "http"
+        }
+    }
+}
+```
+
 ### Connect to Claude Desktop
 
 The server supports two transport modes:
@@ -334,14 +353,36 @@ uvx workspace-mcp --tools gmail drive calendar
 
 > Run instantly without manual installation - you must configure OAuth credentials when using uvx. You can use either environment variables (recommended for production) or set the `GOOGLE_CLIENT_SECRET_PATH` (or legacy `GOOGLE_CLIENT_SECRETS`) environment variable to point to your `client_secret.json` file.
 
+#### Reverse Proxy Setup
+
+If you're running the MCP server behind a reverse proxy (nginx, Apache, Cloudflare, etc.), you'll need to configure `GOOGLE_OAUTH_REDIRECT_URI` to match your external URL:
+
+**Problem**: When behind a reverse proxy, the server constructs redirect URIs using internal ports (e.g., `http://localhost:8000/oauth2callback`) but Google expects the external URL (e.g., `https://your-domain.com/oauth2callback`).
+
+You also have options for:
+| `OAUTH_CUSTOM_REDIRECT_URIS` *(optional)* | Comma-separated list of additional redirect URIs |
+| `OAUTH_ALLOWED_ORIGINS` *(optional)* | Comma-separated list of additional CORS origins |
+
+**Solution**: Set `GOOGLE_OAUTH_REDIRECT_URI` to your external URL:
+
+```bash
+# External URL without port (nginx/Apache handling HTTPS)
+export GOOGLE_OAUTH_REDIRECT_URI="https://your-domain.com/oauth2callback"
+
+# Or with custom port if needed
+export GOOGLE_OAUTH_REDIRECT_URI="https://your-domain.com:8443/oauth2callback"
+```
+
+**Important**:
+- The redirect URI must exactly match what's configured in your Google Cloud Console
+- The server will use this value for all OAuth flows instead of constructing it from `WORKSPACE_MCP_BASE_URI` and `WORKSPACE_MCP_PORT`
+- Your reverse proxy must forward `/oauth2callback` requests to the MCP server
+
 ```bash
 # Set OAuth credentials via environment variables (recommended)
 export GOOGLE_OAUTH_CLIENT_ID="your-client-id.apps.googleusercontent.com"
 export GOOGLE_OAUTH_CLIENT_SECRET="your-client-secret"
 
-# Start the server with all Google Workspace tools
-uvx workspace-mcp
-
 # Start with specific tools only
 uvx workspace-mcp --tools gmail drive calendar tasks
 
@@ -401,17 +442,18 @@ If you need to use HTTP mode with Claude Desktop:
 
 ### First-Time Authentication
 
-The server features **transport-aware OAuth callback handling**:
+The server uses **Google Desktop OAuth** for simplified authentication:
 
-- **Stdio Mode**: Automatically starts a minimal HTTP server on port 8000 for OAuth callbacks
-- **HTTP Mode**: Uses the existing FastAPI server for OAuth callbacks
-- **Same OAuth Flow**: Both modes use `http://localhost:8000/oauth2callback` for consistency
+- **No redirect URIs needed**: Desktop OAuth clients handle authentication without complex callback URLs
+- **Automatic flow**: The server manages the entire OAuth process transparently
+- **Transport-agnostic**: Works seamlessly in both stdio and HTTP modes
 
 When calling a tool:
 1. Server returns authorization URL
 2. Open URL in browser and authorize
-3. Server handles OAuth callback automatically (on port 8000 in both modes)
-4. Retry the original request
+3. Google provides an authorization code
+4. Paste the code when prompted (or it's handled automatically)
+5. Server completes authentication and retries your request
 
 ---
 
@@ -426,8 +468,8 @@ When calling a tool:
 | `list_calendars` | List accessible calendars |
 | `get_events` | Retrieve events with time range filtering |
 | `get_event` | Fetch detailed information of a single event by ID |
-| `create_event` | Create events (all-day or timed) with optional Drive file attachments |
-| `modify_event` | Update existing events |
+| `create_event` | Create events (all-day or timed) with optional Drive file attachments and custom reminders |
+| `modify_event` | Update existing events with intelligent reminder handling |
 | `delete_event` | Remove events |
 
 ### 📁 Google Drive ([`drive_tools.py`](gdrive/drive_tools.py))
@@ -456,6 +498,13 @@ When calling a tool:
 | `get_doc_content` | Extract document text |
 | `list_docs_in_folder` | List docs in folder |
 | `create_doc` | Create new documents |
+| `update_doc_text` | Insert or replace text at specific positions |
+| `find_and_replace_doc` | Find and replace text throughout document |
+| `format_doc_text` | Apply text formatting (bold, italic, underline, fonts) |
+| `insert_doc_elements` | Add tables, lists, or page breaks |
+| `insert_doc_image` | Insert images from Drive or URLs |
+| `update_doc_headers_footers` | Modify document headers and footers |
+| `batch_update_doc` | Execute multiple document operations atomically |
 | `read_doc_comments` | Read all comments and replies |
 | `create_doc_comment` | Create new comments |
 | `reply_to_comment` | Reply to existing comments |
@@ -574,10 +623,10 @@ async def your_new_tool(service, param1: str, param2: int = 10):
 
 ## 🔒 Security
 
-- **Credentials**: Never commit `client_secret.json` or `.credentials/` directory
+- **Credentials**: Never commit `.env`, `client_secret.json` or the `.credentials/` directory to source control!
 - **OAuth Callback**: Uses `http://localhost:8000/oauth2callback` for development (requires `OAUTHLIB_INSECURE_TRANSPORT=1`)
 - **Transport-Aware Callbacks**: Stdio mode starts a minimal HTTP server only for OAuth, ensuring callbacks work in all modes
-- **Production**: Use HTTPS for callback URIs and configure accordingly
+- **Production**: Use HTTPS & OAuth 2.1 and configure accordingly
 - **Network Exposure**: Consider authentication when using `mcpo` over networks
 - **Scope Minimization**: Tools request only necessary permissions
 
@@ -637,6 +686,11 @@ MIT License - see `LICENSE` file for details.
 
 ---
 
+Validations:
+[![MCP Badge](https://lobehub.com/badge/mcp/taylorwilsdon-google_workspace_mcp)](https://lobehub.com/mcp/taylorwilsdon-google_workspace_mcp)
+[![Verified on MseeP](https://mseep.ai/badge.svg)](https://mseep.ai/app/eebbc4a6-0f8c-41b2-ace8-038e5516dba0)
+
+
 <div align="center">
 <img width="810" alt="Gmail Integration" src="https://github.com/user-attachments/assets/656cea40-1f66-40c1-b94c-5a2c900c969d" />
 <img width="810" alt="Calendar Management" src="https://github.com/user-attachments/assets/d3c2a834-fcca-4dc5-8990-6d6dc1d96048" />
diff --git a/auth/google_remote_auth_provider.py b/auth/google_remote_auth_provider.py
index b5fd6db..3731474 100644
--- a/auth/google_remote_auth_provider.py
+++ b/auth/google_remote_auth_provider.py
@@ -24,6 +24,7 @@ from pydantic import AnyHttpUrl
 try:
     from fastmcp.server.auth import RemoteAuthProvider
     from fastmcp.server.auth.providers.jwt import JWTVerifier
+
     REMOTEAUTHPROVIDER_AVAILABLE = True
 except ImportError:
     REMOTEAUTHPROVIDER_AVAILABLE = False
@@ -35,9 +36,10 @@ except ImportError:
 from auth.oauth_common_handlers import (
     handle_oauth_authorize,
     handle_proxy_token_exchange,
+    handle_oauth_protected_resource,
     handle_oauth_authorization_server,
     handle_oauth_client_config,
-    handle_oauth_register
+    handle_oauth_register,
 )
 
 logger = logging.getLogger(__name__)
@@ -45,114 +47,163 @@ logger = logging.getLogger(__name__)
 
 class GoogleRemoteAuthProvider(RemoteAuthProvider):
     """
-    RemoteAuthProvider implementation for Google Workspace using FastMCP v2.11.1+.
-    
+    RemoteAuthProvider implementation for Google Workspace.
+
     This provider extends RemoteAuthProvider to add:
     - OAuth proxy endpoints for CORS workaround
     - Dynamic client registration support
-    - Enhanced session management with issuer tracking
+    - Session management with issuer tracking
     """
-    
+
     def __init__(self):
         """Initialize the Google RemoteAuthProvider."""
         if not REMOTEAUTHPROVIDER_AVAILABLE:
             raise ImportError("FastMCP v2.11.1+ required for RemoteAuthProvider")
-        
+
         # Get configuration from environment
         self.client_id = os.getenv("GOOGLE_OAUTH_CLIENT_ID")
         self.client_secret = os.getenv("GOOGLE_OAUTH_CLIENT_SECRET")
         self.base_url = os.getenv("WORKSPACE_MCP_BASE_URI", "http://localhost")
         self.port = int(os.getenv("PORT", os.getenv("WORKSPACE_MCP_PORT", 8000)))
-        
+
         if not self.client_id:
-            logger.error("GOOGLE_OAUTH_CLIENT_ID not set - OAuth 2.1 authentication will not work")
-            raise ValueError("GOOGLE_OAUTH_CLIENT_ID environment variable is required for OAuth 2.1 authentication")
-        
+            logger.error(
+                "GOOGLE_OAUTH_CLIENT_ID not set - OAuth 2.1 authentication will not work"
+            )
+            raise ValueError(
+                "GOOGLE_OAUTH_CLIENT_ID environment variable is required for OAuth 2.1 authentication"
+            )
+
         # Configure JWT verifier for Google tokens
         token_verifier = JWTVerifier(
             jwks_uri="https://www.googleapis.com/oauth2/v3/certs",
             issuer="https://accounts.google.com",
             audience=self.client_id,  # Always use actual client_id
-            algorithm="RS256"
+            algorithm="RS256",
         )
-        
-        # Initialize RemoteAuthProvider with local server as the authorization server
-        # This ensures OAuth discovery points to our proxy endpoints instead of Google directly
+
+        # Initialize RemoteAuthProvider with base URL (no /mcp/ suffix)
+        # The /mcp/ resource URL is handled in the protected resource metadata endpoint
         super().__init__(
             token_verifier=token_verifier,
             authorization_servers=[AnyHttpUrl(f"{self.base_url}:{self.port}")],
-            resource_server_url=f"{self.base_url}:{self.port}"
+            resource_server_url=f"{self.base_url}:{self.port}",
         )
-        
-        logger.debug("GoogleRemoteAuthProvider initialized")
-    
+
+        logger.debug("GoogleRemoteAuthProvider")
+
     def get_routes(self) -> List[Route]:
         """
-        Add custom OAuth proxy endpoints to the standard protected resource routes.
-        
-        These endpoints work around Google's CORS restrictions and provide
-        dynamic client registration support.
+        Add OAuth routes at canonical locations.
         """
         # Get the standard OAuth protected resource routes from RemoteAuthProvider
-        routes = super().get_routes()
-        
-        # Log what routes we're getting from the parent
-        logger.debug(f"Registered {len(routes)} OAuth routes from parent")
-        
-        # Add our custom proxy endpoints using common handlers
-        routes.append(Route("/oauth2/authorize", handle_oauth_authorize, methods=["GET", "OPTIONS"]))
-        
-        routes.append(Route("/oauth2/token", handle_proxy_token_exchange, methods=["POST", "OPTIONS"]))
-        
-        routes.append(Route("/oauth2/register", handle_oauth_register, methods=["POST", "OPTIONS"]))
-        
-        routes.append(Route("/.well-known/oauth-authorization-server", handle_oauth_authorization_server, methods=["GET", "OPTIONS"]))
-        
-        routes.append(Route("/.well-known/oauth-client", handle_oauth_client_config, methods=["GET", "OPTIONS"]))
-        
+        parent_routes = super().get_routes()
+
+        # Filter out the parent's oauth-protected-resource route since we're replacing it
+        routes = [
+            r
+            for r in parent_routes
+            if r.path != "/.well-known/oauth-protected-resource"
+        ]
+
+        # Add our custom OAuth discovery endpoint that returns /mcp/ as the resource
+        routes.append(
+            Route(
+                "/.well-known/oauth-protected-resource",
+                handle_oauth_protected_resource,
+                methods=["GET", "OPTIONS"],
+            )
+        )
+
+        routes.append(
+            Route(
+                "/.well-known/oauth-authorization-server",
+                handle_oauth_authorization_server,
+                methods=["GET", "OPTIONS"],
+            )
+        )
+
+        routes.append(
+            Route(
+                "/.well-known/oauth-client",
+                handle_oauth_client_config,
+                methods=["GET", "OPTIONS"],
+            )
+        )
+
+        # Add OAuth flow endpoints
+        routes.append(
+            Route(
+                "/oauth2/authorize", handle_oauth_authorize, methods=["GET", "OPTIONS"]
+            )
+        )
+        routes.append(
+            Route(
+                "/oauth2/token",
+                handle_proxy_token_exchange,
+                methods=["POST", "OPTIONS"],
+            )
+        )
+        routes.append(
+            Route(
+                "/oauth2/register", handle_oauth_register, methods=["POST", "OPTIONS"]
+            )
+        )
+
+        logger.info(f"Registered {len(routes)} OAuth routes")
         return routes
-    
+
     async def verify_token(self, token: str) -> Optional[object]:
         """
         Override verify_token to handle Google OAuth access tokens.
-        
+
         Google OAuth access tokens (ya29.*) are opaque tokens that need to be
         verified using the tokeninfo endpoint, not JWT verification.
         """
         # Check if this is a Google OAuth access token (starts with ya29.)
         if token.startswith("ya29."):
-            logger.debug("Detected Google OAuth access token, using tokeninfo verification")
-            
+            logger.debug(
+                "Detected Google OAuth access token, using tokeninfo verification"
+            )
+
             try:
                 # Verify the access token using Google's tokeninfo endpoint
                 async with aiohttp.ClientSession() as session:
-                    url = f"https://oauth2.googleapis.com/tokeninfo?access_token={token}"
+                    url = (
+                        f"https://oauth2.googleapis.com/tokeninfo?access_token={token}"
+                    )
                     async with session.get(url) as response:
                         if response.status != 200:
-                            logger.error(f"Token verification failed: {response.status}")
+                            logger.error(
+                                f"Token verification failed: {response.status}"
+                            )
                             return None
-                        
+
                         token_info = await response.json()
-                        
+
                         # Verify the token is for our client
                         if token_info.get("aud") != self.client_id:
-                            logger.error(f"Token audience mismatch: expected {self.client_id}, got {token_info.get('aud')}")
+                            logger.error(
+                                f"Token audience mismatch: expected {self.client_id}, got {token_info.get('aud')}"
+                            )
                             return None
-                        
+
                         # Check if token is expired
                         expires_in = token_info.get("expires_in", 0)
                         if int(expires_in) <= 0:
                             logger.error("Token is expired")
                             return None
-                        
+
                         # Create an access token object that matches the expected interface
                         from types import SimpleNamespace
                         import time
-                        
+
                         # Calculate expires_at timestamp
                         expires_in = int(token_info.get("expires_in", 0))
-                        expires_at = int(time.time()) + expires_in if expires_in > 0 else 0
-                        
+                        expires_at = (
+                            int(time.time()) + expires_in if expires_in > 0 else 0
+                        )
+
                         access_token = SimpleNamespace(
                             claims={
                                 "email": token_info.get("email"),
@@ -166,26 +217,32 @@ class GoogleRemoteAuthProvider(RemoteAuthProvider):
                             client_id=self.client_id,  # Add client_id at top level
                             # Add other required fields
                             sub=token_info.get("sub", ""),
-                            email=token_info.get("email", "")
+                            email=token_info.get("email", ""),
                         )
-                        
+
                         user_email = token_info.get("email")
                         if user_email:
-                            from auth.oauth21_session_store import get_oauth21_session_store
+                            from auth.oauth21_session_store import (
+                                get_oauth21_session_store,
+                            )
+
                             store = get_oauth21_session_store()
                             session_id = f"google_{token_info.get('sub', 'unknown')}"
-                            
+
                             # Try to get FastMCP session ID for binding
                             mcp_session_id = None
                             try:
                                 from fastmcp.server.dependencies import get_context
+
                                 ctx = get_context()
-                                if ctx and hasattr(ctx, 'session_id'):
+                                if ctx and hasattr(ctx, "session_id"):
                                     mcp_session_id = ctx.session_id
-                                    logger.debug(f"Binding MCP session {mcp_session_id} to user {user_email}")
+                                    logger.debug(
+                                        f"Binding MCP session {mcp_session_id} to user {user_email}"
+                                    )
                             except Exception:
                                 pass
-                            
+
                             # Store session with issuer information
                             store.store_session(
                                 user_email=user_email,
@@ -193,39 +250,42 @@ class GoogleRemoteAuthProvider(RemoteAuthProvider):
                                 scopes=access_token.scopes,
                                 session_id=session_id,
                                 mcp_session_id=mcp_session_id,
-                                issuer="https://accounts.google.com"
+                                issuer="https://accounts.google.com",
                             )
-                            
+
                             logger.info(f"Verified OAuth token: {user_email}")
-                        
+
                         return access_token
-                        
+
             except Exception as e:
                 logger.error(f"Error verifying Google OAuth token: {e}")
                 return None
-        
+
         else:
             # For JWT tokens, use parent's JWT verification
             logger.debug("Using JWT verification for non-OAuth token")
             access_token = await super().verify_token(token)
-            
+
             if access_token and self.client_id:
                 # Extract user information from token claims
                 user_email = access_token.claims.get("email")
                 if user_email:
                     from auth.oauth21_session_store import get_oauth21_session_store
+
                     store = get_oauth21_session_store()
                     session_id = f"google_{access_token.claims.get('sub', 'unknown')}"
-                    
+
                     # Store session with issuer information
                     store.store_session(
                         user_email=user_email,
                         access_token=token,
                         scopes=access_token.scopes or [],
                         session_id=session_id,
-                        issuer="https://accounts.google.com"
+                        issuer="https://accounts.google.com",
                     )
-                    
-                    logger.debug(f"Successfully verified JWT token for user: {user_email}")
-            
-            return access_token
\ No newline at end of file
+
+                    logger.debug(
+                        f"Successfully verified JWT token for user: {user_email}"
+                    )
+
+            return access_token
diff --git a/auth/oauth21_integration.py b/auth/oauth21_integration.py
deleted file mode 100644
index 29243c6..0000000
--- a/auth/oauth21_integration.py
+++ /dev/null
@@ -1,246 +0,0 @@
-"""
-OAuth 2.1 Integration for Google Services
-
-This module provides integration between FastMCP OAuth sessions and Google services,
-allowing authenticated sessions to be passed through to Google API calls.
-"""
-
-import asyncio
-import logging
-from typing import Optional, Tuple, Any, Dict
-
-from googleapiclient.discovery import build
-
-from auth.google_auth import (
-    GoogleAuthenticationError,
-)
-
-logger = logging.getLogger(__name__)
-
-
-class OAuth21GoogleServiceBuilder:
-    """Builds Google services using FastMCP OAuth authenticated sessions."""
-
-    def __init__(self):
-        """
-        Initialize the service builder.
-        """
-        self._service_cache: Dict[str, Tuple[Any, str]] = {}
-
-    def extract_session_from_context(self, context: Optional[Dict[str, Any]] = None) -> Optional[str]:
-        """
-        Extract session ID from various context sources.
-
-        Args:
-            context: Context dictionary that may contain session information
-
-        Returns:
-            Session ID if found, None otherwise
-        """
-        if not context:
-            return None
-
-        # Try to extract from OAuth 2.1 auth context
-        if "auth_context" in context and hasattr(context["auth_context"], "session_id"):
-            return context["auth_context"].session_id
-
-        # Try direct session_id
-        if "session_id" in context:
-            return context["session_id"]
-
-        # Try from request state
-        if "request" in context:
-            request = context["request"]
-            if hasattr(request, "state") and hasattr(request.state, "auth"):
-                auth_ctx = request.state.auth
-                if hasattr(auth_ctx, "session_id"):
-                    return auth_ctx.session_id
-
-        return None
-
-    async def get_authenticated_service_with_session(
-        self,
-        service_name: str,
-        version: str,
-        tool_name: str,
-        user_google_email: str,
-        required_scopes: list[str],
-        session_id: Optional[str] = None,
-        auth_context: Optional[Any] = None,
-    ) -> Tuple[Any, str]:
-        """
-        Get authenticated Google service using OAuth 2.1 session if available.
-
-        Args:
-            service_name: Google service name (e.g., "gmail", "drive")
-            version: API version (e.g., "v1", "v3")
-            tool_name: Name of the tool for logging
-            user_google_email: User's Google email
-            required_scopes: Required OAuth scopes
-            session_id: OAuth 2.1 session ID
-            auth_context: OAuth 2.1 authentication context
-
-        Returns:
-            Tuple of (service instance, actual user email)
-
-        Raises:
-            GoogleAuthenticationError: If authentication fails
-        """
-        cache_key = f"{user_google_email}:{service_name}:{version}:{':'.join(sorted(required_scopes))}"
-
-        # Check cache first
-        if cache_key in self._service_cache:
-            logger.debug(f"[{tool_name}] Using cached service for {user_google_email}")
-            return self._service_cache[cache_key]
-
-        try:
-            # First check the global OAuth 2.1 session store
-            from auth.oauth21_session_store import get_oauth21_session_store
-            store = get_oauth21_session_store()
-            credentials = store.get_credentials(user_google_email)
-
-            if credentials and credentials.valid:
-                logger.info(f"[{tool_name}] Found OAuth 2.1 credentials in global store for {user_google_email}")
-
-                # Build the service
-                service = await asyncio.to_thread(
-                    build, service_name, version, credentials=credentials
-                )
-
-                # Cache the service
-                self._service_cache[cache_key] = (service, user_google_email)
-
-                return service, user_google_email
-
-            # If OAuth 2.1 is not enabled, fall back to legacy authentication
-            if not is_oauth21_enabled():
-                logger.debug(f"[{tool_name}] OAuth 2.1 is not enabled. Falling back to legacy authentication for {user_google_email}")
-                return await get_legacy_auth_service(
-                    service_name=service_name,
-                    version=version,
-                    tool_name=tool_name,
-                    user_google_email=user_google_email,
-                    required_scopes=required_scopes,
-                )
-
-            # If we are here, it means OAuth 2.1 is enabled but credentials are not found
-            logger.error(f"[{tool_name}] OAuth 2.1 is enabled, but no valid credentials found for {user_google_email}")
-            raise GoogleAuthenticationError(
-                f"OAuth 2.1 is enabled, but no valid credentials found for {user_google_email}"
-            )
-
-        except Exception as e:
-            logger.error(f"[{tool_name}] Authentication failed for {user_google_email}: {e}")
-            raise GoogleAuthenticationError(
-                f"Failed to authenticate for {service_name}: {str(e)}"
-            )
-
-    def clear_cache(self):
-        """Clear the service cache."""
-        self._service_cache.clear()
-        logger.debug("Cleared OAuth 2.1 service cache")
-
-
-# Global instance
-_global_service_builder: Optional[OAuth21GoogleServiceBuilder] = None
-
-
-def get_oauth21_service_builder() -> OAuth21GoogleServiceBuilder:
-    """Get the global OAuth 2.1 service builder instance."""
-    global _global_service_builder
-    if _global_service_builder is None:
-        _global_service_builder = OAuth21GoogleServiceBuilder()
-    return _global_service_builder
-
-
-def set_auth_layer(auth_layer):
-    """
-    Legacy compatibility function - no longer needed with FastMCP auth.
-    """
-    logger.info("set_auth_layer called - OAuth is now handled by FastMCP")
-
-
-_oauth21_enabled = False
-
-def is_oauth21_enabled() -> bool:
-    """
-    Check if the OAuth 2.1 authentication layer is active.
-    """
-    global _oauth21_enabled
-    return _oauth21_enabled
-
-
-def enable_oauth21():
-    """
-    Enable the OAuth 2.1 authentication layer.
-    """
-    global _oauth21_enabled
-    _oauth21_enabled = True
-    logger.debug("OAuth 2.1 authentication enabled")
-
-
-async def get_legacy_auth_service(
-    service_name: str,
-    version: str,
-    tool_name: str,
-    user_google_email: str,
-    required_scopes: list[str],
-) -> Tuple[Any, str]:
-    """
-    Get authenticated Google service using legacy authentication.
-    """
-    from auth.google_auth import get_authenticated_google_service as legacy_get_service
-
-    return await legacy_get_service(
-        service_name=service_name,
-        version=version,
-        tool_name=tool_name,
-        user_google_email=user_google_email,
-        required_scopes=required_scopes,
-    )
-
-
-async def get_authenticated_google_service_oauth21(
-    service_name: str,
-    version: str,
-    tool_name: str,
-    user_google_email: str,
-    required_scopes: list[str],
-    context: Optional[Dict[str, Any]] = None,
-) -> Tuple[Any, str]:
-    """
-    Enhanced version of get_authenticated_google_service that supports OAuth 2.1.
-
-    This function checks for OAuth 2.1 session context and uses it if available,
-    otherwise falls back to legacy authentication.
-
-    Args:
-        service_name: Google service name
-        version: API version
-        tool_name: Tool name for logging
-        user_google_email: User's Google email
-        required_scopes: Required OAuth scopes
-        context: Optional context containing session information
-
-    Returns:
-        Tuple of (service instance, actual user email)
-    """
-    builder = get_oauth21_service_builder()
-
-    # FastMCP handles context now - extract any session info
-    session_id = None
-    auth_context = None
-
-    if context:
-        session_id = builder.extract_session_from_context(context)
-        auth_context = context.get("auth_context")
-
-    return await builder.get_authenticated_service_with_session(
-        service_name=service_name,
-        version=version,
-        tool_name=tool_name,
-        user_google_email=user_google_email,
-        required_scopes=required_scopes,
-        session_id=session_id,
-        auth_context=auth_context,
-    )
\ No newline at end of file
diff --git a/auth/oauth_callback_server.py b/auth/oauth_callback_server.py
index 32d48c9..9693340 100644
--- a/auth/oauth_callback_server.py
+++ b/auth/oauth_callback_server.py
@@ -5,7 +5,6 @@ In streamable-http mode: Uses the existing FastAPI server
 In stdio mode: Starts a minimal HTTP server just for OAuth callbacks
 """
 
-import os
 import asyncio
 import logging
 import threading
@@ -17,9 +16,10 @@ from fastapi import FastAPI, Request
 from typing import Optional
 from urllib.parse import urlparse
 
-# Import moved inside functions to avoid circular import
 from auth.scopes import SCOPES
 from auth.oauth_responses import create_error_response, create_success_response, create_server_error_response
+from auth.google_auth import handle_auth_callback, check_client_secrets
+from auth.oauth_config import get_oauth_redirect_uri
 
 logger = logging.getLogger(__name__)
 
@@ -62,7 +62,6 @@ class MinimalOAuthServer:
 
             try:
                 # Check if we have credentials available (environment variables or file)
-                from auth.google_auth import check_client_secrets
                 error_message = check_client_secrets()
                 if error_message:
                     return create_server_error_response(error_message)
@@ -72,8 +71,7 @@ class MinimalOAuthServer:
                 # Session ID tracking removed - not needed
 
                 # Exchange code for credentials
-                from auth.google_auth import handle_auth_callback
-                redirect_uri = get_oauth_redirect_uri(port=self.port, base_uri=self.base_uri)
+                redirect_uri = get_oauth_redirect_uri()
                 verified_user_id, credentials = handle_auth_callback(
                     scopes=SCOPES,
                     authorization_response=str(request.url),
@@ -181,32 +179,6 @@ class MinimalOAuthServer:
 # Global instance for stdio mode
 _minimal_oauth_server: Optional[MinimalOAuthServer] = None
 
-def get_oauth_redirect_uri(port: int = 8000, base_uri: str = "http://localhost") -> str:
-    """
-    Get the appropriate OAuth redirect URI.
-
-    Priority:
-    1. GOOGLE_OAUTH_REDIRECT_URI environment variable
-    2. Constructed from port and base URI
-
-    Args:
-        port: Port number (default 8000)
-        base_uri: Base URI (default "http://localhost")
-
-    Returns:
-        OAuth redirect URI
-    """
-    # Highest priority: Use the environment variable if it's set
-    env_redirect_uri = os.getenv("GOOGLE_OAUTH_REDIRECT_URI")
-    if env_redirect_uri:
-        logger.info(f"Using redirect URI from GOOGLE_OAUTH_REDIRECT_URI: {env_redirect_uri}")
-        return env_redirect_uri
-
-    # Fallback to constructing the URI based on server settings
-    constructed_uri = f"{base_uri}:{port}/oauth2callback"
-    logger.info(f"Constructed redirect URI: {constructed_uri}")
-    return constructed_uri
-
 def ensure_oauth_callback_available(transport_mode: str = "stdio", port: int = 8000, base_uri: str = "http://localhost") -> tuple[bool, str]:
     """
     Ensure OAuth callback endpoint is available for the given transport mode.
diff --git a/auth/oauth_common_handlers.py b/auth/oauth_common_handlers.py
index 3c1be41..e532b64 100644
--- a/auth/oauth_common_handlers.py
+++ b/auth/oauth_common_handlers.py
@@ -16,22 +16,24 @@ from google.oauth2.credentials import Credentials
 from auth.oauth21_session_store import store_token_session
 from auth.google_auth import save_credentials_to_file
 from auth.scopes import get_current_scopes
-from core.config import WORKSPACE_MCP_BASE_URI, WORKSPACE_MCP_PORT
+from auth.oauth_config import get_oauth_config
+from auth.oauth_error_handling import (
+    OAuthError, OAuthValidationError, OAuthConfigurationError,
+    create_oauth_error_response, validate_token_request,
+    validate_registration_request, get_development_cors_headers,
+    log_security_event
+)
 
 logger = logging.getLogger(__name__)
 
 
 async def handle_oauth_authorize(request: Request):
     """Common handler for OAuth authorization proxy."""
+    origin = request.headers.get("origin")
+    
     if request.method == "OPTIONS":
-        return JSONResponse(
-            content={},
-            headers={
-                "Access-Control-Allow-Origin": "*",
-                "Access-Control-Allow-Methods": "GET, OPTIONS",
-                "Access-Control-Allow-Headers": "Content-Type"
-            }
-        )
+        cors_headers = get_development_cors_headers(origin)
+        return JSONResponse(content={}, headers=cors_headers)
 
     # Get query parameters
     params = dict(request.query_params)
@@ -55,50 +57,55 @@ async def handle_oauth_authorize(request: Request):
     # Build Google authorization URL
     google_auth_url = "https://accounts.google.com/o/oauth2/v2/auth?" + urlencode(params)
 
-    # Return redirect
+    # Return redirect with development CORS headers if needed
+    cors_headers = get_development_cors_headers(origin)
     return RedirectResponse(
         url=google_auth_url,
         status_code=302,
-        headers={
-            "Access-Control-Allow-Origin": "*"
-        }
+        headers=cors_headers
     )
 
 
 async def handle_proxy_token_exchange(request: Request):
-    """Common handler for OAuth token exchange proxy."""
+    """Common handler for OAuth token exchange proxy with comprehensive error handling."""
+    origin = request.headers.get("origin")
+    
     if request.method == "OPTIONS":
-        return JSONResponse(
-            content={},
-            headers={
-                "Access-Control-Allow-Origin": "*",
-                "Access-Control-Allow-Methods": "POST, OPTIONS",
-                "Access-Control-Allow-Headers": "Content-Type, Authorization"
-            }
-        )
+        cors_headers = get_development_cors_headers(origin)
+        return JSONResponse(content={}, headers=cors_headers)
     try:
-        # Get form data
-        body = await request.body()
-        content_type = request.headers.get("content-type", "application/x-www-form-urlencoded")
-        
-        # Parse form data to add missing client credentials
+        # Get form data with validation
+        try:
+            body = await request.body()
+            content_type = request.headers.get("content-type", "application/x-www-form-urlencoded")
+        except Exception as e:
+            raise OAuthValidationError(f"Failed to read request body: {e}")
+
+        # Parse and validate form data
         if content_type and "application/x-www-form-urlencoded" in content_type:
-            form_data = parse_qs(body.decode('utf-8'))
-            
+            try:
+                form_data = parse_qs(body.decode('utf-8'))
+            except Exception as e:
+                raise OAuthValidationError(f"Invalid form data: {e}")
+
+            # Convert to single values and validate
+            request_data = {k: v[0] if v else '' for k, v in form_data.items()}
+            validate_token_request(request_data)
+
             # Check if client_id is missing (public client)
             if 'client_id' not in form_data or not form_data['client_id'][0]:
                 client_id = os.getenv("GOOGLE_OAUTH_CLIENT_ID")
                 if client_id:
                     form_data['client_id'] = [client_id]
                     logger.debug("Added missing client_id to token request")
-            
+
             # Check if client_secret is missing (public client using PKCE)
             if 'client_secret' not in form_data:
                 client_secret = os.getenv("GOOGLE_OAUTH_CLIENT_SECRET")
                 if client_secret:
                     form_data['client_secret'] = [client_secret]
                     logger.debug("Added missing client_secret to token request")
-            
+
             # Reconstruct body with added credentials
             body = urlencode(form_data, doseq=True).encode('utf-8')
 
@@ -138,7 +145,7 @@ async def handle_proxy_token_exchange(request: Request):
                                     )
                                     user_email = id_token_claims.get("email")
                                     email_verified = id_token_claims.get("email_verified")
-                                    
+
                                     if not email_verified:
                                         logger.error(f"Email address for user {user_email} is not verified by Google. Aborting session creation.")
                                         return JSONResponse(content={"error": "Email address not verified"}, status_code=403)
@@ -152,7 +159,7 @@ async def handle_proxy_token_exchange(request: Request):
                                                 logger.info(f"Found MCP session ID for binding: {mcp_session_id}")
                                         except Exception as e:
                                             logger.debug(f"Could not get MCP session ID: {e}")
-                                        
+
                                         # Store the token session with MCP session binding
                                         session_id = store_token_session(response_data, user_email, mcp_session_id)
                                         logger.info(f"Stored OAuth session for {user_email} (session: {session_id}, mcp: {mcp_session_id})")
@@ -186,219 +193,201 @@ async def handle_proxy_token_exchange(request: Request):
                         except Exception as e:
                             logger.error(f"Failed to store OAuth session: {e}")
 
+                # Add development CORS headers
+                cors_headers = get_development_cors_headers(origin)
+                response_headers = {
+                    "Content-Type": "application/json",
+                    "Cache-Control": "no-store"
+                }
+                response_headers.update(cors_headers)
+                
                 return JSONResponse(
                     status_code=response.status,
                     content=response_data,
-                    headers={
-                        "Content-Type": "application/json",
-                        "Access-Control-Allow-Origin": "*",
-                        "Cache-Control": "no-store"
-                    }
+                    headers=response_headers
                 )
 
+    except OAuthError as e:
+        log_security_event("oauth_token_exchange_error", {
+            "error_code": e.error_code,
+            "description": e.description
+        }, request)
+        return create_oauth_error_response(e, origin)
     except Exception as e:
-        logger.error(f"Error in token proxy: {e}")
-        return JSONResponse(
-            status_code=500,
-            content={"error": "server_error", "error_description": str(e)},
-            headers={"Access-Control-Allow-Origin": "*"}
-        )
+        logger.error(f"Unexpected error in token proxy: {e}", exc_info=True)
+        log_security_event("oauth_token_exchange_unexpected_error", {
+            "error": str(e)
+        }, request)
+        error = OAuthConfigurationError("Internal server error")
+        return create_oauth_error_response(error, origin)
 
 
 async def handle_oauth_protected_resource(request: Request):
-    """Common handler for OAuth protected resource metadata."""
+    """
+    Handle OAuth protected resource metadata requests.
+    """
+    origin = request.headers.get("origin")
+    
+    # Handle preflight
     if request.method == "OPTIONS":
-        return JSONResponse(
-            content={},
-            headers={
-                "Access-Control-Allow-Origin": "*",
-                "Access-Control-Allow-Methods": "GET, OPTIONS",
-                "Access-Control-Allow-Headers": "Content-Type"
-            }
-        )
+        cors_headers = get_development_cors_headers(origin)
+        return JSONResponse(content={}, headers=cors_headers)
+
+    config = get_oauth_config()
+    base_url = config.get_oauth_base_url()
+
+    # For streamable-http transport, the MCP server runs at /mcp
+    # This is the actual resource being protected
+    resource_url = f"{base_url}/mcp"
 
+    # Build metadata response per RFC 9449
     metadata = {
-        "resource": f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}",
-        "authorization_servers": [
-            f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}"
-        ],
+        "resource": resource_url,  # The MCP server endpoint that needs protection
+        "authorization_servers": [base_url],  # Our proxy acts as the auth server
         "bearer_methods_supported": ["header"],
         "scopes_supported": get_current_scopes(),
         "resource_documentation": "https://developers.google.com/workspace",
         "client_registration_required": True,
-        "client_configuration_endpoint": f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/.well-known/oauth-client",
+        "client_configuration_endpoint": f"{base_url}/.well-known/oauth-client",
     }
 
+    # Log the response for debugging
+    logger.debug(f"Returning protected resource metadata: {metadata}")
+
+    # Add development CORS headers
+    cors_headers = get_development_cors_headers(origin)
+    response_headers = {
+        "Content-Type": "application/json; charset=utf-8",
+        "Cache-Control": "public, max-age=3600"
+    }
+    response_headers.update(cors_headers)
+    
     return JSONResponse(
         content=metadata,
-        headers={
-            "Content-Type": "application/json",
-            "Access-Control-Allow-Origin": "*"
-        }
+        headers=response_headers
     )
 
 
 async def handle_oauth_authorization_server(request: Request):
-    """Common handler for OAuth authorization server metadata."""
+    """
+    Handle OAuth authorization server metadata.
+    """
+    origin = request.headers.get("origin")
+    
     if request.method == "OPTIONS":
-        return JSONResponse(
-            content={},
-            headers={
-                "Access-Control-Allow-Origin": "*",
-                "Access-Control-Allow-Methods": "GET, OPTIONS",
-                "Access-Control-Allow-Headers": "Content-Type"
-            }
-        )
-
-    try:
-        # Fetch metadata from Google
-        async with aiohttp.ClientSession() as session:
-            url = "https://accounts.google.com/.well-known/openid-configuration"
-            async with session.get(url) as response:
-                if response.status == 200:
-                    metadata = await response.json()
-
-                    # Add OAuth 2.1 required fields
-                    metadata.setdefault("code_challenge_methods_supported", ["S256"])
-                    metadata.setdefault("pkce_required", True)
-
-                    # Override endpoints to use our proxies
-                    metadata["token_endpoint"] = f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/oauth2/token"
-                    metadata["authorization_endpoint"] = f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/oauth2/authorize"
-                    metadata["enable_dynamic_registration"] = True
-                    metadata["registration_endpoint"] = f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/oauth2/register"
-                    return JSONResponse(
-                        content=metadata,
-                        headers={
-                            "Content-Type": "application/json",
-                            "Access-Control-Allow-Origin": "*"
-                        }
-                    )
-
-        # Fallback metadata
-        return JSONResponse(
-            content={
-                "issuer": "https://accounts.google.com",
-                "authorization_endpoint": f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/oauth2/authorize",
-                "token_endpoint": f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/oauth2/token",
-                "userinfo_endpoint": "https://www.googleapis.com/oauth2/v2/userinfo",
-                "revocation_endpoint": "https://oauth2.googleapis.com/revoke",
-                "jwks_uri": "https://www.googleapis.com/oauth2/v3/certs",
-                "response_types_supported": ["code"],
-                "code_challenge_methods_supported": ["S256"],
-                "pkce_required": True,
-                "grant_types_supported": ["authorization_code", "refresh_token"],
-                "scopes_supported": get_current_scopes(),
-                "token_endpoint_auth_methods_supported": ["client_secret_basic", "client_secret_post"]
-            },
-            headers={
-                "Content-Type": "application/json",
-                "Access-Control-Allow-Origin": "*"
-            }
-        )
-
-    except Exception as e:
-        logger.error(f"Error fetching auth server metadata: {e}")
-        return JSONResponse(
-            status_code=500,
-            content={"error": "Failed to fetch authorization server metadata"},
-            headers={"Access-Control-Allow-Origin": "*"}
-        )
+        cors_headers = get_development_cors_headers(origin)
+        return JSONResponse(content={}, headers=cors_headers)
+
+    config = get_oauth_config()
+    
+    # Get authorization server metadata from centralized config
+    # Pass scopes directly to keep all metadata generation in one place
+    metadata = config.get_authorization_server_metadata(scopes=get_current_scopes())
+
+    logger.debug(f"Returning authorization server metadata: {metadata}")
+
+    # Add development CORS headers
+    cors_headers = get_development_cors_headers(origin)
+    response_headers = {
+        "Content-Type": "application/json; charset=utf-8",
+        "Cache-Control": "public, max-age=3600"
+    }
+    response_headers.update(cors_headers)
+    
+    return JSONResponse(
+        content=metadata,
+        headers=response_headers
+    )
 
 
 async def handle_oauth_client_config(request: Request):
     """Common handler for OAuth client configuration."""
+    origin = request.headers.get("origin")
+    
     if request.method == "OPTIONS":
-        return JSONResponse(
-            content={},
-            headers={
-                "Access-Control-Allow-Origin": "*",
-                "Access-Control-Allow-Methods": "GET, OPTIONS",
-                "Access-Control-Allow-Headers": "Content-Type"
-            }
-        )
+        cors_headers = get_development_cors_headers(origin)
+        return JSONResponse(content={}, headers=cors_headers)
 
     client_id = os.getenv("GOOGLE_OAUTH_CLIENT_ID")
     if not client_id:
+        cors_headers = get_development_cors_headers(origin)
         return JSONResponse(
             status_code=404,
             content={"error": "OAuth not configured"},
-            headers={"Access-Control-Allow-Origin": "*"}
+            headers=cors_headers
         )
 
+    # Get OAuth configuration
+    config = get_oauth_config()
+
     return JSONResponse(
         content={
             "client_id": client_id,
             "client_name": "Google Workspace MCP Server",
-            "client_uri": f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}",
+            "client_uri": config.base_url,
             "redirect_uris": [
-                f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/oauth2callback",
+                f"{config.base_url}/oauth2callback",
                 "http://localhost:5173/auth/callback"
             ],
             "grant_types": ["authorization_code", "refresh_token"],
             "response_types": ["code"],
             "scope": " ".join(get_current_scopes()),
             "token_endpoint_auth_method": "client_secret_basic",
-            "code_challenge_methods": ["S256"]
+            "code_challenge_methods": config.supported_code_challenge_methods[:1]  # Primary method only
         },
         headers={
-            "Content-Type": "application/json",
-            "Access-Control-Allow-Origin": "*"
+            "Content-Type": "application/json; charset=utf-8",
+            "Cache-Control": "public, max-age=3600",
+            **get_development_cors_headers(origin)
         }
     )
 
 
 async def handle_oauth_register(request: Request):
-    """Common handler for OAuth dynamic client registration."""
+    """Common handler for OAuth dynamic client registration with comprehensive error handling."""
+    origin = request.headers.get("origin")
+    
     if request.method == "OPTIONS":
-        return JSONResponse(
-            content={},
-            headers={
-                "Access-Control-Allow-Origin": "*",
-                "Access-Control-Allow-Methods": "POST, OPTIONS",
-                "Access-Control-Allow-Headers": "Content-Type, Authorization"
-            }
-        )
+        cors_headers = get_development_cors_headers(origin)
+        return JSONResponse(content={}, headers=cors_headers)
 
-    client_id = os.getenv("GOOGLE_OAUTH_CLIENT_ID")
-    client_secret = os.getenv("GOOGLE_OAUTH_CLIENT_SECRET")
+    config = get_oauth_config()
 
-    if not client_id or not client_secret:
-        return JSONResponse(
-            status_code=400,
-            content={"error": "invalid_request", "error_description": "OAuth not configured"},
-            headers={"Access-Control-Allow-Origin": "*"}
-        )
+    if not config.is_configured():
+        error = OAuthConfigurationError("OAuth client credentials not configured")
+        return create_oauth_error_response(error, origin)
 
     try:
-        # Parse the registration request
-        body = await request.json()
-        logger.info(f"Dynamic client registration request received: {body}")
+        # Parse and validate the registration request
+        try:
+            body = await request.json()
+        except Exception as e:
+            raise OAuthValidationError(f"Invalid JSON in registration request: {e}")
+
+        validate_registration_request(body)
+        logger.info("Dynamic client registration request received")
 
         # Extract redirect URIs from the request or use defaults
         redirect_uris = body.get("redirect_uris", [])
         if not redirect_uris:
-            redirect_uris = [
-                f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/oauth2callback",
-                "http://localhost:5173/auth/callback"
-            ]
+            redirect_uris = config.get_redirect_uris()
 
         # Build the registration response with our pre-configured credentials
         response_data = {
-            "client_id": client_id,
-            "client_secret": client_secret,
+            "client_id": config.client_id,
+            "client_secret": config.client_secret,
             "client_name": body.get("client_name", "Google Workspace MCP Server"),
-            "client_uri": body.get("client_uri", f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}"),
+            "client_uri": body.get("client_uri", config.base_url),
             "redirect_uris": redirect_uris,
             "grant_types": body.get("grant_types", ["authorization_code", "refresh_token"]),
             "response_types": body.get("response_types", ["code"]),
             "scope": body.get("scope", " ".join(get_current_scopes())),
             "token_endpoint_auth_method": body.get("token_endpoint_auth_method", "client_secret_basic"),
-            "code_challenge_methods": ["S256"],
+            "code_challenge_methods": config.supported_code_challenge_methods,
             # Additional OAuth 2.1 fields
             "client_id_issued_at": int(time.time()),
             "registration_access_token": "not-required",  # We don't implement client management
-            "registration_client_uri": f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/oauth2/register/{client_id}"
+            "registration_client_uri": f"{config.get_oauth_base_url()}/oauth2/register/{config.client_id}"
         }
 
         logger.info("Dynamic client registration successful - returning pre-configured Google credentials")
@@ -408,15 +397,21 @@ async def handle_oauth_register(request: Request):
             content=response_data,
             headers={
                 "Content-Type": "application/json",
-                "Access-Control-Allow-Origin": "*",
-                "Cache-Control": "no-store"
+                "Cache-Control": "no-store",
+                **get_development_cors_headers(origin)
             }
         )
 
+    except OAuthError as e:
+        log_security_event("oauth_registration_error", {
+            "error_code": e.error_code,
+            "description": e.description
+        }, request)
+        return create_oauth_error_response(e, origin)
     except Exception as e:
-        logger.error(f"Error in dynamic client registration: {e}")
-        return JSONResponse(
-            status_code=400,
-            content={"error": "invalid_request", "error_description": str(e)},
-            headers={"Access-Control-Allow-Origin": "*"}
-        )
\ No newline at end of file
+        logger.error(f"Unexpected error in client registration: {e}", exc_info=True)
+        log_security_event("oauth_registration_unexpected_error", {
+            "error": str(e)
+        }, request)
+        error = OAuthConfigurationError("Internal server error")
+        return create_oauth_error_response(error, origin)
\ No newline at end of file
diff --git a/auth/oauth_config.py b/auth/oauth_config.py
new file mode 100644
index 0000000..8a19fe6
--- /dev/null
+++ b/auth/oauth_config.py
@@ -0,0 +1,319 @@
+"""
+OAuth Configuration Management
+
+This module centralizes OAuth-related configuration to eliminate hardcoded values
+scattered throughout the codebase. It provides environment variable support and
+sensible defaults for all OAuth-related settings.
+
+Supports both OAuth 2.0 and OAuth 2.1 with automatic client capability detection.
+"""
+
+import os
+from typing import List, Optional, Dict, Any
+
+
+class OAuthConfig:
+    """
+    Centralized OAuth configuration management.
+
+    This class eliminates the hardcoded configuration anti-pattern identified
+    in the challenge review by providing a single source of truth for all
+    OAuth-related configuration values.
+    """
+
+    def __init__(self):
+        # Base server configuration
+        self.base_uri = os.getenv("WORKSPACE_MCP_BASE_URI", "http://localhost")
+        self.port = int(os.getenv("PORT", os.getenv("WORKSPACE_MCP_PORT", "8000")))
+        self.base_url = f"{self.base_uri}:{self.port}"
+
+        # OAuth client configuration
+        self.client_id = os.getenv("GOOGLE_OAUTH_CLIENT_ID")
+        self.client_secret = os.getenv("GOOGLE_OAUTH_CLIENT_SECRET")
+
+        # OAuth 2.1 configuration
+        self.oauth21_enabled = os.getenv("MCP_ENABLE_OAUTH21", "false").lower() == "true"
+        self.pkce_required = self.oauth21_enabled  # PKCE is mandatory in OAuth 2.1
+        self.supported_code_challenge_methods = ["S256", "plain"] if not self.oauth21_enabled else ["S256"]
+
+        # Transport mode (will be set at runtime)
+        self._transport_mode = "stdio"  # Default
+
+        # Redirect URI configuration
+        self.redirect_uri = self._get_redirect_uri()
+
+    def _get_redirect_uri(self) -> str:
+        """
+        Get the OAuth redirect URI, supporting reverse proxy configurations.
+
+        Returns:
+            The configured redirect URI
+        """
+        explicit_uri = os.getenv("GOOGLE_OAUTH_REDIRECT_URI")
+        if explicit_uri:
+            return explicit_uri
+        return f"{self.base_url}/oauth2callback"
+
+    def get_redirect_uris(self) -> List[str]:
+        """
+        Get all valid OAuth redirect URIs.
+
+        Returns:
+            List of all supported redirect URIs
+        """
+        uris = []
+
+        # Primary redirect URI
+        uris.append(self.redirect_uri)
+
+        # Custom redirect URIs from environment
+        custom_uris = os.getenv("OAUTH_CUSTOM_REDIRECT_URIS")
+        if custom_uris:
+            uris.extend([uri.strip() for uri in custom_uris.split(",")])
+
+        # Remove duplicates while preserving order
+        return list(dict.fromkeys(uris))
+
+    def get_allowed_origins(self) -> List[str]:
+        """
+        Get allowed CORS origins for OAuth endpoints.
+
+        Returns:
+            List of allowed origins for CORS
+        """
+        origins = []
+
+        # Server's own origin
+        origins.append(self.base_url)
+
+        # VS Code and development origins
+        origins.extend([
+            "vscode-webview://",
+            "https://vscode.dev",
+            "https://github.dev",
+        ])
+
+        # Custom origins from environment
+        custom_origins = os.getenv("OAUTH_ALLOWED_ORIGINS")
+        if custom_origins:
+            origins.extend([origin.strip() for origin in custom_origins.split(",")])
+
+        return list(dict.fromkeys(origins))
+
+    def is_configured(self) -> bool:
+        """
+        Check if OAuth is properly configured.
+
+        Returns:
+            True if OAuth client credentials are available
+        """
+        return bool(self.client_id and self.client_secret)
+
+    def get_oauth_base_url(self) -> str:
+        """
+        Get OAuth base URL for constructing OAuth endpoints.
+
+        Returns:
+            Base URL for OAuth endpoints
+        """
+        return self.base_url
+
+    def validate_redirect_uri(self, uri: str) -> bool:
+        """
+        Validate if a redirect URI is allowed.
+
+        Args:
+            uri: The redirect URI to validate
+
+        Returns:
+            True if the URI is allowed, False otherwise
+        """
+        allowed_uris = self.get_redirect_uris()
+        return uri in allowed_uris
+
+    def get_environment_summary(self) -> dict:
+        """
+        Get a summary of the current OAuth configuration.
+
+        Returns:
+            Dictionary with configuration summary (excluding secrets)
+        """
+        return {
+            "base_url": self.base_url,
+            "redirect_uri": self.redirect_uri,
+            "client_configured": bool(self.client_id),
+            "oauth21_enabled": self.oauth21_enabled,
+            "pkce_required": self.pkce_required,
+            "transport_mode": self._transport_mode,
+            "total_redirect_uris": len(self.get_redirect_uris()),
+            "total_allowed_origins": len(self.get_allowed_origins()),
+        }
+
+    def set_transport_mode(self, mode: str) -> None:
+        """
+        Set the current transport mode for OAuth callback handling.
+
+        Args:
+            mode: Transport mode ("stdio", "streamable-http", etc.)
+        """
+        self._transport_mode = mode
+
+    def get_transport_mode(self) -> str:
+        """
+        Get the current transport mode.
+
+        Returns:
+            Current transport mode
+        """
+        return self._transport_mode
+
+    def is_oauth21_enabled(self) -> bool:
+        """
+        Check if OAuth 2.1 mode is enabled.
+
+        Returns:
+            True if OAuth 2.1 is enabled
+        """
+        return self.oauth21_enabled
+
+    def detect_oauth_version(self, request_params: Dict[str, Any]) -> str:
+        """
+        Detect OAuth version based on request parameters.
+
+        This method implements a conservative detection strategy:
+        - Only returns "oauth21" when we have clear indicators
+        - Defaults to "oauth20" for backward compatibility
+        - Respects the global oauth21_enabled flag
+
+        Args:
+            request_params: Request parameters from authorization or token request
+
+        Returns:
+            "oauth21" or "oauth20" based on detection
+        """
+        # If OAuth 2.1 is not enabled globally, always return OAuth 2.0
+        if not self.oauth21_enabled:
+            return "oauth20"
+
+        # Use the structured type for cleaner detection logic
+        from auth.oauth_types import OAuthVersionDetectionParams
+        params = OAuthVersionDetectionParams.from_request(request_params)
+
+        # Clear OAuth 2.1 indicator: PKCE is present
+        if params.has_pkce:
+            return "oauth21"
+
+        # For public clients in OAuth 2.1 mode, we require PKCE
+        # But since they didn't send PKCE, fall back to OAuth 2.0
+        # This ensures backward compatibility
+
+        # Default to OAuth 2.0 for maximum compatibility
+        return "oauth20"
+
+    def get_authorization_server_metadata(self, scopes: Optional[List[str]] = None) -> Dict[str, Any]:
+        """
+        Get OAuth authorization server metadata per RFC 8414.
+
+        Args:
+            scopes: Optional list of supported scopes to include in metadata
+
+        Returns:
+            Authorization server metadata dictionary
+        """
+        metadata = {
+            "issuer": self.base_url,
+            "authorization_endpoint": f"{self.base_url}/oauth2/authorize",
+            "token_endpoint": f"{self.base_url}/oauth2/token",
+            "registration_endpoint": f"{self.base_url}/oauth2/register",
+            "jwks_uri": "https://www.googleapis.com/oauth2/v3/certs",
+            "response_types_supported": ["code", "token"],
+            "grant_types_supported": ["authorization_code", "refresh_token"],
+            "token_endpoint_auth_methods_supported": ["client_secret_post", "client_secret_basic"],
+            "code_challenge_methods_supported": self.supported_code_challenge_methods,
+        }
+
+        # Include scopes if provided
+        if scopes is not None:
+            metadata["scopes_supported"] = scopes
+
+        # Add OAuth 2.1 specific metadata
+        if self.oauth21_enabled:
+            metadata["pkce_required"] = True
+            # OAuth 2.1 deprecates implicit flow
+            metadata["response_types_supported"] = ["code"]
+            # OAuth 2.1 requires exact redirect URI matching
+            metadata["require_exact_redirect_uri"] = True
+
+        return metadata
+
+
+# Global configuration instance
+_oauth_config = None
+
+
+def get_oauth_config() -> OAuthConfig:
+    """
+    Get the global OAuth configuration instance.
+
+    Returns:
+        The singleton OAuth configuration instance
+    """
+    global _oauth_config
+    if _oauth_config is None:
+        _oauth_config = OAuthConfig()
+    return _oauth_config
+
+
+def reload_oauth_config() -> OAuthConfig:
+    """
+    Reload the OAuth configuration from environment variables.
+
+    This is useful for testing or when environment variables change.
+
+    Returns:
+        The reloaded OAuth configuration instance
+    """
+    global _oauth_config
+    _oauth_config = OAuthConfig()
+    return _oauth_config
+
+
+# Convenience functions for backward compatibility
+def get_oauth_base_url() -> str:
+    """Get OAuth base URL."""
+    return get_oauth_config().get_oauth_base_url()
+
+
+def get_redirect_uris() -> List[str]:
+    """Get all valid OAuth redirect URIs."""
+    return get_oauth_config().get_redirect_uris()
+
+
+def get_allowed_origins() -> List[str]:
+    """Get allowed CORS origins."""
+    return get_oauth_config().get_allowed_origins()
+
+
+def is_oauth_configured() -> bool:
+    """Check if OAuth is properly configured."""
+    return get_oauth_config().is_configured()
+
+
+def set_transport_mode(mode: str) -> None:
+    """Set the current transport mode."""
+    get_oauth_config().set_transport_mode(mode)
+
+
+def get_transport_mode() -> str:
+    """Get the current transport mode."""
+    return get_oauth_config().get_transport_mode()
+
+
+def is_oauth21_enabled() -> bool:
+    """Check if OAuth 2.1 is enabled."""
+    return get_oauth_config().is_oauth21_enabled()
+
+
+def get_oauth_redirect_uri() -> str:
+    """Get the primary OAuth redirect URI."""
+    return get_oauth_config().redirect_uri
\ No newline at end of file
diff --git a/auth/oauth_error_handling.py b/auth/oauth_error_handling.py
new file mode 100644
index 0000000..befd103
--- /dev/null
+++ b/auth/oauth_error_handling.py
@@ -0,0 +1,321 @@
+"""
+OAuth Error Handling and Validation
+
+This module provides comprehensive error handling and input validation for OAuth
+endpoints, addressing the inconsistent error handling identified in the challenge review.
+"""
+
+import logging
+from typing import Optional, Dict, Any, List
+from starlette.responses import JSONResponse
+from starlette.requests import Request
+from urllib.parse import urlparse
+import re
+
+logger = logging.getLogger(__name__)
+
+
+class OAuthError(Exception):
+    """Base exception for OAuth-related errors."""
+    
+    def __init__(self, error_code: str, description: str, status_code: int = 400):
+        self.error_code = error_code
+        self.description = description
+        self.status_code = status_code
+        super().__init__(f"{error_code}: {description}")
+
+
+class OAuthValidationError(OAuthError):
+    """Exception for OAuth validation errors."""
+    
+    def __init__(self, description: str, field: Optional[str] = None):
+        error_code = "invalid_request"
+        if field:
+            description = f"Invalid {field}: {description}"
+        super().__init__(error_code, description, 400)
+
+
+class OAuthConfigurationError(OAuthError):
+    """Exception for OAuth configuration errors."""
+    
+    def __init__(self, description: str):
+        super().__init__("server_error", description, 500)
+
+
+def create_oauth_error_response(error: OAuthError, origin: Optional[str] = None) -> JSONResponse:
+    """
+    Create a standardized OAuth error response.
+    
+    Args:
+        error: The OAuth error to convert to a response
+        origin: Optional origin for development CORS headers
+        
+    Returns:
+        JSONResponse with standardized error format
+    """
+    headers = {
+        "Content-Type": "application/json",
+        "Cache-Control": "no-store"
+    }
+    
+    # Add development CORS headers if needed
+    cors_headers = get_development_cors_headers(origin)
+    headers.update(cors_headers)
+    
+    content = {
+        "error": error.error_code,
+        "error_description": error.description
+    }
+    
+    logger.warning(f"OAuth error response: {error.error_code} - {error.description}")
+    
+    return JSONResponse(
+        status_code=error.status_code,
+        content=content,
+        headers=headers
+    )
+
+
+def validate_redirect_uri(uri: str) -> None:
+    """
+    Validate an OAuth redirect URI.
+    
+    Args:
+        uri: The redirect URI to validate
+        
+    Raises:
+        OAuthValidationError: If the URI is invalid
+    """
+    if not uri:
+        raise OAuthValidationError("Redirect URI is required", "redirect_uri")
+    
+    try:
+        parsed = urlparse(uri)
+    except Exception:
+        raise OAuthValidationError("Malformed redirect URI", "redirect_uri")
+    
+    # Basic URI validation
+    if not parsed.scheme or not parsed.netloc:
+        raise OAuthValidationError("Redirect URI must be absolute", "redirect_uri")
+    
+    # Security checks
+    if parsed.scheme not in ["http", "https"]:
+        raise OAuthValidationError("Redirect URI must use HTTP or HTTPS", "redirect_uri")
+    
+    # Additional security for production
+    if parsed.scheme == "http" and parsed.hostname not in ["localhost", "127.0.0.1"]:
+        logger.warning(f"Insecure redirect URI: {uri}")
+
+
+def validate_client_id(client_id: str) -> None:
+    """
+    Validate an OAuth client ID.
+    
+    Args:
+        client_id: The client ID to validate
+        
+    Raises:
+        OAuthValidationError: If the client ID is invalid
+    """
+    if not client_id:
+        raise OAuthValidationError("Client ID is required", "client_id")
+    
+    if len(client_id) < 10:
+        raise OAuthValidationError("Client ID is too short", "client_id")
+    
+    # Basic format validation for Google client IDs
+    if not re.match(r'^[a-zA-Z0-9\-_.]+$', client_id):
+        raise OAuthValidationError("Client ID contains invalid characters", "client_id")
+
+
+def validate_authorization_code(code: str) -> None:
+    """
+    Validate an OAuth authorization code.
+    
+    Args:
+        code: The authorization code to validate
+        
+    Raises:
+        OAuthValidationError: If the code is invalid
+    """
+    if not code:
+        raise OAuthValidationError("Authorization code is required", "code")
+    
+    if len(code) < 10:
+        raise OAuthValidationError("Authorization code is too short", "code")
+    
+    # Check for suspicious patterns
+    if any(char in code for char in [' ', '\n', '\t', '<', '>']):
+        raise OAuthValidationError("Authorization code contains invalid characters", "code")
+
+
+def validate_scopes(scopes: List[str]) -> None:
+    """
+    Validate OAuth scopes.
+    
+    Args:
+        scopes: List of scopes to validate
+        
+    Raises:
+        OAuthValidationError: If the scopes are invalid
+    """
+    if not scopes:
+        return  # Empty scopes list is acceptable
+    
+    for scope in scopes:
+        if not scope:
+            raise OAuthValidationError("Empty scope is not allowed", "scope")
+        
+        if len(scope) > 200:
+            raise OAuthValidationError("Scope is too long", "scope")
+        
+        # Basic scope format validation
+        if not re.match(r'^[a-zA-Z0-9\-_.:/]+$', scope):
+            raise OAuthValidationError(f"Invalid scope format: {scope}", "scope")
+
+
+def validate_token_request(request_data: Dict[str, Any]) -> None:
+    """
+    Validate an OAuth token exchange request.
+    
+    Args:
+        request_data: The token request data to validate
+        
+    Raises:
+        OAuthValidationError: If the request is invalid
+    """
+    grant_type = request_data.get("grant_type")
+    if not grant_type:
+        raise OAuthValidationError("Grant type is required", "grant_type")
+    
+    if grant_type not in ["authorization_code", "refresh_token"]:
+        raise OAuthValidationError(f"Unsupported grant type: {grant_type}", "grant_type")
+    
+    if grant_type == "authorization_code":
+        code = request_data.get("code")
+        validate_authorization_code(code)
+        
+        redirect_uri = request_data.get("redirect_uri")
+        if redirect_uri:
+            validate_redirect_uri(redirect_uri)
+    
+    client_id = request_data.get("client_id")
+    if client_id:
+        validate_client_id(client_id)
+
+
+def validate_registration_request(request_data: Dict[str, Any]) -> None:
+    """
+    Validate an OAuth client registration request.
+    
+    Args:
+        request_data: The registration request data to validate
+        
+    Raises:
+        OAuthValidationError: If the request is invalid
+    """
+    # Validate redirect URIs if provided
+    redirect_uris = request_data.get("redirect_uris", [])
+    if redirect_uris:
+        if not isinstance(redirect_uris, list):
+            raise OAuthValidationError("redirect_uris must be an array", "redirect_uris")
+        
+        for uri in redirect_uris:
+            validate_redirect_uri(uri)
+    
+    # Validate grant types if provided
+    grant_types = request_data.get("grant_types", [])
+    if grant_types:
+        if not isinstance(grant_types, list):
+            raise OAuthValidationError("grant_types must be an array", "grant_types")
+        
+        allowed_grant_types = ["authorization_code", "refresh_token"]
+        for grant_type in grant_types:
+            if grant_type not in allowed_grant_types:
+                raise OAuthValidationError(f"Unsupported grant type: {grant_type}", "grant_types")
+    
+    # Validate response types if provided
+    response_types = request_data.get("response_types", [])
+    if response_types:
+        if not isinstance(response_types, list):
+            raise OAuthValidationError("response_types must be an array", "response_types")
+        
+        allowed_response_types = ["code"]
+        for response_type in response_types:
+            if response_type not in allowed_response_types:
+                raise OAuthValidationError(f"Unsupported response type: {response_type}", "response_types")
+
+
+def sanitize_user_input(value: str, max_length: int = 1000) -> str:
+    """
+    Sanitize user input to prevent injection attacks.
+    
+    Args:
+        value: The input value to sanitize
+        max_length: Maximum allowed length
+        
+    Returns:
+        Sanitized input value
+        
+    Raises:
+        OAuthValidationError: If the input is invalid
+    """
+    if not isinstance(value, str):
+        raise OAuthValidationError("Input must be a string")
+    
+    if len(value) > max_length:
+        raise OAuthValidationError(f"Input is too long (max {max_length} characters)")
+    
+    # Remove potentially dangerous characters
+    sanitized = re.sub(r'[<>"\'\0\n\r\t]', '', value)
+    
+    return sanitized.strip()
+
+
+def log_security_event(event_type: str, details: Dict[str, Any], request: Optional[Request] = None) -> None:
+    """
+    Log security-related events for monitoring.
+    
+    Args:
+        event_type: Type of security event
+        details: Event details
+        request: Optional request object for context
+    """
+    log_data = {
+        "event_type": event_type,
+        "details": details
+    }
+    
+    if request:
+        log_data["request"] = {
+            "method": request.method,
+            "path": request.url.path,
+            "user_agent": request.headers.get("user-agent", "unknown"),
+            "origin": request.headers.get("origin", "unknown")
+        }
+    
+    logger.warning(f"Security event: {log_data}")
+
+
+def get_development_cors_headers(origin: Optional[str] = None) -> Dict[str, str]:
+    """
+    Get minimal CORS headers for development scenarios only.
+    
+    Only allows localhost origins for development tools and inspectors.
+    
+    Args:
+        origin: The request origin (will be validated)
+        
+    Returns:
+        CORS headers for localhost origins only, empty dict otherwise
+    """
+    # Only allow localhost origins for development
+    if origin and (origin.startswith("http://localhost:") or origin.startswith("http://127.0.0.1:")):
+        return {
+            "Access-Control-Allow-Origin": origin,
+            "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
+            "Access-Control-Allow-Headers": "Content-Type, Authorization",
+            "Access-Control-Max-Age": "3600"
+        }
+    
+    return {}
\ No newline at end of file
diff --git a/auth/oauth_types.py b/auth/oauth_types.py
new file mode 100644
index 0000000..3a2867a
--- /dev/null
+++ b/auth/oauth_types.py
@@ -0,0 +1,78 @@
+"""
+Type definitions for OAuth authentication.
+
+This module provides structured types for OAuth-related parameters,
+improving code maintainability and type safety.
+"""
+
+from dataclasses import dataclass
+from typing import Optional, List, Dict, Any
+
+
+@dataclass
+class OAuth21ServiceRequest:
+    """
+    Encapsulates parameters for OAuth 2.1 service authentication requests.
+    
+    This parameter object pattern reduces function complexity and makes
+    it easier to extend authentication parameters in the future.
+    """
+    service_name: str
+    version: str
+    tool_name: str
+    user_google_email: str
+    required_scopes: List[str]
+    session_id: Optional[str] = None
+    auth_token_email: Optional[str] = None
+    allow_recent_auth: bool = False
+    context: Optional[Dict[str, Any]] = None
+    
+    def to_legacy_params(self) -> dict:
+        """Convert to legacy parameter format for backward compatibility."""
+        return {
+            "service_name": self.service_name,
+            "version": self.version,
+            "tool_name": self.tool_name,
+            "user_google_email": self.user_google_email,
+            "required_scopes": self.required_scopes,
+        }
+
+
+@dataclass
+class OAuthVersionDetectionParams:
+    """
+    Parameters used for OAuth version detection.
+    
+    Encapsulates the various signals we use to determine
+    whether a client supports OAuth 2.1 or needs OAuth 2.0.
+    """
+    client_id: Optional[str] = None
+    client_secret: Optional[str] = None
+    code_challenge: Optional[str] = None
+    code_challenge_method: Optional[str] = None
+    code_verifier: Optional[str] = None
+    authenticated_user: Optional[str] = None
+    session_id: Optional[str] = None
+    
+    @classmethod
+    def from_request(cls, request_params: Dict[str, Any]) -> "OAuthVersionDetectionParams":
+        """Create from raw request parameters."""
+        return cls(
+            client_id=request_params.get("client_id"),
+            client_secret=request_params.get("client_secret"),
+            code_challenge=request_params.get("code_challenge"),
+            code_challenge_method=request_params.get("code_challenge_method"),
+            code_verifier=request_params.get("code_verifier"),
+            authenticated_user=request_params.get("authenticated_user"),
+            session_id=request_params.get("session_id"),
+        )
+    
+    @property
+    def has_pkce(self) -> bool:
+        """Check if PKCE parameters are present."""
+        return bool(self.code_challenge or self.code_verifier)
+    
+    @property
+    def is_public_client(self) -> bool:
+        """Check if this appears to be a public client (no secret)."""
+        return bool(self.client_id and not self.client_secret)
\ No newline at end of file
diff --git a/auth/service_decorator.py b/auth/service_decorator.py
index 24029e2..e2718cb 100644
--- a/auth/service_decorator.py
+++ b/auth/service_decorator.py
@@ -334,9 +334,31 @@ def require_google_service(
                     # Log authentication status
                     logger.debug(f"[{tool_name}] Auth: {authenticated_user or 'none'} via {auth_method or 'none'} (session: {mcp_session_id[:8] if mcp_session_id else 'none'})")
 
-                    from auth.oauth21_integration import is_oauth21_enabled
-
+                    from auth.oauth_config import is_oauth21_enabled, get_oauth_config
+                    
+                    # Smart OAuth version detection and fallback
+                    use_oauth21 = False
+                    oauth_version = "oauth20"  # Default
+                    
                     if is_oauth21_enabled():
+                        # OAuth 2.1 is enabled globally, check client capabilities
+                        # Try to detect from context if this is an OAuth 2.1 capable client
+                        config = get_oauth_config()
+                        
+                        # Build request params from context for version detection
+                        request_params = {}
+                        if authenticated_user:
+                            request_params["authenticated_user"] = authenticated_user
+                        if mcp_session_id:
+                            request_params["session_id"] = mcp_session_id
+                        
+                        # Detect OAuth version based on client capabilities
+                        oauth_version = config.detect_oauth_version(request_params)
+                        use_oauth21 = (oauth_version == "oauth21")
+                        
+                        logger.debug(f"[{tool_name}] OAuth version detected: {oauth_version}, will use OAuth 2.1: {use_oauth21}")
+
+                    if use_oauth21:
                         logger.debug(f"[{tool_name}] Using OAuth 2.1 flow")
                         # The downstream get_authenticated_google_service_oauth21 will handle
                         # whether the user's token is valid for the requested resource.
@@ -352,8 +374,8 @@ def require_google_service(
                             allow_recent_auth=False,
                         )
                     else:
-                        # If OAuth 2.1 is not enabled, always use the legacy authentication method.
-                        logger.debug(f"[{tool_name}] Using legacy OAuth flow")
+                        # Use legacy OAuth 2.0 authentication
+                        logger.debug(f"[{tool_name}] Using legacy OAuth 2.0 flow")
                         service, actual_user_email = await get_authenticated_google_service(
                             service_name=service_name,
                             version=service_version,
@@ -449,7 +471,6 @@ def require_multiple_services(service_configs: List[Dict[str, Any]]):
 
                     # SIMPLIFIED: Get authentication state from context (set by AuthInfoMiddleware)
                     authenticated_user = None
-                    auth_method = None
                     mcp_session_id = None
 
                     try:
@@ -457,14 +478,13 @@ def require_multiple_services(service_configs: List[Dict[str, Any]]):
                         ctx = get_context()
                         if ctx:
                             authenticated_user = ctx.get_state("authenticated_user_email")
-                            auth_method = ctx.get_state("authenticated_via")
                             if hasattr(ctx, 'session_id'):
                                 mcp_session_id = ctx.session_id
                     except Exception as e:
                         logger.debug(f"[{tool_name}] Could not get FastMCP context: {e}")
 
                     # Use the same logic as single service decorator
-                    from auth.oauth21_integration import is_oauth21_enabled
+                    from auth.oauth_config import is_oauth21_enabled
 
                     if is_oauth21_enabled():
                         logger.debug(f"[{tool_name}] Attempting OAuth 2.1 authentication flow for {service_type}.")
diff --git a/core/config.py b/core/config.py
index 2e3985b..d90b8a9 100644
--- a/core/config.py
+++ b/core/config.py
@@ -2,31 +2,34 @@
 Shared configuration for Google Workspace MCP server.
 This module holds configuration values that need to be shared across modules
 to avoid circular imports.
+
+NOTE: OAuth configuration has been moved to auth.oauth_config for centralization.
+This module now imports from there for backward compatibility.
 """
 
 import os
+from auth.oauth_config import (
+    get_oauth_base_url,
+    get_oauth_redirect_uri,
+    set_transport_mode,
+    get_transport_mode,
+    is_oauth21_enabled
+)
 
 # Server configuration
 WORKSPACE_MCP_PORT = int(os.getenv("PORT", os.getenv("WORKSPACE_MCP_PORT", 8000)))
 WORKSPACE_MCP_BASE_URI = os.getenv("WORKSPACE_MCP_BASE_URI", "http://localhost")
-USER_GOOGLE_EMAIL = os.getenv("USER_GOOGLE_EMAIL", None)
-
-# Transport mode (will be set by main.py)
-_current_transport_mode = "stdio"  # Default to stdio
-
-
-def set_transport_mode(mode: str):
-    """Set the current transport mode for OAuth callback handling."""
-    global _current_transport_mode
-    _current_transport_mode = mode
-
-
-def get_transport_mode() -> str:
-    """Get the current transport mode."""
-    return _current_transport_mode
-
 
-def get_oauth_redirect_uri() -> str:
-    """Get OAuth redirect URI based on current configuration."""
-    # Use the standard OAuth callback path
-    return f"{WORKSPACE_MCP_BASE_URI}:{WORKSPACE_MCP_PORT}/oauth2callback"
\ No newline at end of file
+# Disable USER_GOOGLE_EMAIL in OAuth 2.1 multi-user mode
+USER_GOOGLE_EMAIL = None if is_oauth21_enabled() else os.getenv("USER_GOOGLE_EMAIL", None)
+
+# Re-export OAuth functions for backward compatibility
+__all__ = [
+    'WORKSPACE_MCP_PORT',
+    'WORKSPACE_MCP_BASE_URI',
+    'USER_GOOGLE_EMAIL',
+    'get_oauth_base_url',
+    'get_oauth_redirect_uri',
+    'set_transport_mode',
+    'get_transport_mode'
+]
\ No newline at end of file
diff --git a/core/server.py b/core/server.py
index caa4826..9885093 100644
--- a/core/server.py
+++ b/core/server.py
@@ -7,7 +7,6 @@ from fastapi.responses import HTMLResponse, JSONResponse
 from starlette.applications import Starlette
 from starlette.requests import Request
 from starlette.middleware import Middleware
-from fastapi.middleware.cors import CORSMiddleware
 
 from fastmcp import FastMCP
 
@@ -19,8 +18,6 @@ from auth.auth_info_middleware import AuthInfoMiddleware
 from auth.fastmcp_google_auth import GoogleWorkspaceAuthProvider
 from auth.scopes import SCOPES
 from core.config import (
-    WORKSPACE_MCP_PORT,
-    WORKSPACE_MCP_BASE_URI,
     USER_GOOGLE_EMAIL,
     get_transport_mode,
     set_transport_mode as _set_transport_mode,
@@ -41,31 +38,25 @@ logger = logging.getLogger(__name__)
 _auth_provider: Optional[Union[GoogleWorkspaceAuthProvider, GoogleRemoteAuthProvider]] = None
 
 # --- Middleware Definitions ---
-cors_middleware = Middleware(
-    CORSMiddleware,
-    allow_origins=["*"],
-    allow_credentials=True,
-    allow_methods=["*"],
-    allow_headers=["*"],
-)
 session_middleware = Middleware(MCPSessionMiddleware)
 
-# Custom FastMCP that adds CORS to streamable HTTP
-class CORSEnabledFastMCP(FastMCP):
+# Custom FastMCP that adds secure middleware stack for OAuth 2.1
+class SecureFastMCP(FastMCP):
     def streamable_http_app(self) -> "Starlette":
-        """Override to add CORS and session middleware to the app."""
+        """Override to add secure middleware stack for OAuth 2.1."""
         app = super().streamable_http_app()
-        # Add session middleware first (to set context before other middleware)
+
+        # Add middleware in order (first added = outermost layer)
+        # Session Management - extracts session info for MCP context
         app.user_middleware.insert(0, session_middleware)
-        # Add CORS as the second middleware
-        app.user_middleware.insert(1, cors_middleware)
+
         # Rebuild middleware stack
         app.middleware_stack = app.build_middleware_stack()
-        logger.info("Added session and CORS middleware to streamable HTTP app")
+        logger.info("Added middleware stack: Session Management")
         return app
 
 # --- Server Instance ---
-server = CORSEnabledFastMCP(
+server = SecureFastMCP(
     name="google_workspace",
     auth=None,
 )
@@ -86,32 +77,37 @@ def configure_server_for_http():
     This must be called BEFORE server.run().
     """
     global _auth_provider
+
     transport_mode = get_transport_mode()
 
     if transport_mode != "streamable-http":
         return
 
-    oauth21_enabled = os.getenv("MCP_ENABLE_OAUTH21", "false").lower() == "true"
+    # Use centralized OAuth configuration
+    from auth.oauth_config import get_oauth_config
+    config = get_oauth_config()
+    
+    # Check if OAuth 2.1 is enabled via centralized config
+    oauth21_enabled = config.is_oauth21_enabled()
 
     if oauth21_enabled:
-        if not os.getenv("GOOGLE_OAUTH_CLIENT_ID"):
-            logger.warning("⚠️  OAuth 2.1 enabled but GOOGLE_OAUTH_CLIENT_ID not set")
+        if not config.is_configured():
+            logger.warning("⚠️  OAuth 2.1 enabled but OAuth credentials not configured")
             return
 
         if GOOGLE_REMOTE_AUTH_AVAILABLE:
-            logger.info("🔐 OAuth 2.1 enabled")
+            logger.info("🔐 OAuth 2.1 enabled with automatic OAuth 2.0 fallback for legacy clients")
             try:
                 _auth_provider = GoogleRemoteAuthProvider()
                 server.auth = _auth_provider
                 set_auth_provider(_auth_provider)
-                from auth.oauth21_integration import enable_oauth21
-                enable_oauth21()
+                logger.debug("OAuth 2.1 authentication enabled")
             except Exception as e:
                 logger.error(f"Failed to initialize GoogleRemoteAuthProvider: {e}", exc_info=True)
         else:
             logger.error("OAuth 2.1 is enabled, but GoogleRemoteAuthProvider is not available.")
     else:
-        logger.info("OAuth 2.1 is DISABLED. Server will use legacy tool-based authentication.")
+        logger.info("OAuth 2.0 mode - Server will use legacy authentication.")
         server.auth = None
 
 def get_auth_provider() -> Optional[Union[GoogleWorkspaceAuthProvider, GoogleRemoteAuthProvider]]:
@@ -194,6 +190,19 @@ async def oauth2_callback(request: Request) -> HTMLResponse:
 # --- Tools ---
 @server.tool()
 async def start_google_auth(service_name: str, user_google_email: str = USER_GOOGLE_EMAIL) -> str:
+    """
+    Manually initiate Google OAuth authentication flow.
+    
+    NOTE: This tool should typically NOT be called directly. The authentication system 
+    automatically handles credential checks and prompts for authentication when needed.
+    Only use this tool if:
+    1. You need to re-authenticate with different credentials
+    2. You want to proactively authenticate before using other tools
+    3. The automatic authentication flow failed and you need to retry
+    
+    In most cases, simply try calling the Google Workspace tool you need - it will 
+    automatically handle authentication if required.
+    """
     if not user_google_email:
         raise ValueError("user_google_email must be provided.")
 
@@ -202,17 +211,12 @@ async def start_google_auth(service_name: str, user_google_email: str = USER_GOO
         return f"**Authentication Error:** {error_message}"
 
     try:
-        auth_url, _ = start_auth_flow(
-            scopes=SCOPES,
-            redirect_uri=get_oauth_redirect_uri_for_current_mode(),
-            login_hint=user_google_email
-        )
-        return (
-            "**Action Required: Authenticate with Google**\n\n"
-            "Please visit this URL to authenticate:\n\n"
-            f"**[Authenticate with Google]({auth_url})**\n\n"
-            "After authenticating, retry your request."
+        auth_message = await start_auth_flow(
+            user_google_email=user_google_email,
+            service_name=service_name,
+            redirect_uri=get_oauth_redirect_uri_for_current_mode()
         )
+        return auth_message
     except Exception as e:
         logger.error(f"Failed to start Google authentication flow: {e}", exc_info=True)
         return f"**Error:** An unexpected error occurred: {e}"
@@ -228,7 +232,7 @@ if os.getenv("MCP_ENABLE_OAUTH21", "false").lower() == "true" and not GOOGLE_REM
         handle_oauth_client_config,
         handle_oauth_register
     )
-    
+
     server.custom_route("/.well-known/oauth-protected-resource", methods=["GET", "OPTIONS"])(handle_oauth_protected_resource)
     server.custom_route("/.well-known/oauth-authorization-server", methods=["GET", "OPTIONS"])(handle_oauth_authorization_server)
     server.custom_route("/.well-known/oauth-client", methods=["GET", "OPTIONS"])(handle_oauth_client_config)
diff --git a/gcalendar/calendar_tools.py b/gcalendar/calendar_tools.py
index 700577f..5bca383 100644
--- a/gcalendar/calendar_tools.py
+++ b/gcalendar/calendar_tools.py
@@ -9,7 +9,8 @@ import logging
 import asyncio
 import re
 import uuid
-from typing import List, Optional, Dict, Any
+import json
+from typing import List, Optional, Dict, Any, Union
 
 from googleapiclient.errors import HttpError
 from googleapiclient.discovery import build
@@ -24,6 +25,65 @@ from core.server import server
 logger = logging.getLogger(__name__)
 
 
+def _parse_reminders_json(reminders_input: Optional[Union[str, List[Dict[str, Any]]]], function_name: str) -> List[Dict[str, Any]]:
+    """
+    Parse reminders from JSON string or list object and validate them.
+    
+    Args:
+        reminders_input: JSON string containing reminder objects or list of reminder objects
+        function_name: Name of calling function for logging
+        
+    Returns:
+        List of validated reminder objects
+    """
+    if not reminders_input:
+        return []
+    
+    # Handle both string (JSON) and list inputs
+    if isinstance(reminders_input, str):
+        try:
+            reminders = json.loads(reminders_input)
+            if not isinstance(reminders, list):
+                logger.warning(f"[{function_name}] Reminders must be a JSON array, got {type(reminders).__name__}")
+                return []
+        except json.JSONDecodeError as e:
+            logger.warning(f"[{function_name}] Invalid JSON for reminders: {e}")
+            return []
+    elif isinstance(reminders_input, list):
+        reminders = reminders_input
+    else:
+        logger.warning(f"[{function_name}] Reminders must be a JSON string or list, got {type(reminders_input).__name__}")
+        return []
+    
+    # Validate reminders
+    if len(reminders) > 5:
+        logger.warning(f"[{function_name}] More than 5 reminders provided, truncating to first 5")
+        reminders = reminders[:5]
+    
+    validated_reminders = []
+    for reminder in reminders:
+        if not isinstance(reminder, dict) or "method" not in reminder or "minutes" not in reminder:
+            logger.warning(f"[{function_name}] Invalid reminder format: {reminder}, skipping")
+            continue
+        
+        method = reminder["method"].lower()
+        if method not in ["popup", "email"]:
+            logger.warning(f"[{function_name}] Invalid reminder method '{method}', must be 'popup' or 'email', skipping")
+            continue
+        
+        minutes = reminder["minutes"]
+        if not isinstance(minutes, int) or minutes < 0 or minutes > 40320:
+            logger.warning(f"[{function_name}] Invalid reminder minutes '{minutes}', must be integer 0-40320, skipping")
+            continue
+        
+        validated_reminders.append({
+            "method": method,
+            "minutes": minutes
+        })
+    
+    return validated_reminders
+
+
 def _preserve_existing_fields(event_body: Dict[str, Any], existing_event: Dict[str, Any], field_mappings: Dict[str, Any]) -> None:
     """
     Helper function to preserve existing event fields when not explicitly provided.
@@ -243,6 +303,8 @@ async def create_event(
     timezone: Optional[str] = None,
     attachments: Optional[List[str]] = None,
     add_google_meet: bool = False,
+    reminders: Optional[Union[str, List[Dict[str, Any]]]] = None,
+    use_default_reminders: bool = True,
 ) -> str:
     """
     Creates a new event.
@@ -259,6 +321,8 @@ async def create_event(
         timezone (Optional[str]): Timezone (e.g., "America/New_York").
         attachments (Optional[List[str]]): List of Google Drive file URLs or IDs to attach to the event.
         add_google_meet (bool): Whether to add a Google Meet video conference to the event. Defaults to False.
+        reminders (Optional[Union[str, List[Dict[str, Any]]]]): JSON string or list of reminder objects. Each should have 'method' ("popup" or "email") and 'minutes' (0-40320). Max 5 reminders. Example: '[{"method": "popup", "minutes": 15}]' or [{"method": "popup", "minutes": 15}]
+        use_default_reminders (bool): Whether to use calendar's default reminders. If False, uses custom reminders. Defaults to True.
 
     Returns:
         str: Confirmation message of the successful event creation with event link.
@@ -294,6 +358,24 @@ async def create_event(
     if attendees:
         event_body["attendees"] = [{"email": email} for email in attendees]
 
+    # Handle reminders
+    if reminders is not None or not use_default_reminders:
+        # If custom reminders are provided, automatically disable default reminders
+        effective_use_default = use_default_reminders and reminders is None
+        
+        reminder_data = {
+            "useDefault": effective_use_default
+        }
+        if reminders is not None:
+            validated_reminders = _parse_reminders_json(reminders, "create_event")
+            if validated_reminders:
+                reminder_data["overrides"] = validated_reminders
+                logger.info(f"[create_event] Added {len(validated_reminders)} custom reminders")
+                if use_default_reminders:
+                    logger.info("[create_event] Custom reminders provided - disabling default reminders")
+        
+        event_body["reminders"] = reminder_data
+
     if add_google_meet:
         request_id = str(uuid.uuid4())
         event_body["conferenceData"] = {
@@ -397,6 +479,8 @@ async def modify_event(
     attendees: Optional[List[str]] = None,
     timezone: Optional[str] = None,
     add_google_meet: Optional[bool] = None,
+    reminders: Optional[Union[str, List[Dict[str, Any]]]] = None,
+    use_default_reminders: Optional[bool] = None,
 ) -> str:
     """
     Modifies an existing event.
@@ -413,6 +497,8 @@ async def modify_event(
         attendees (Optional[List[str]]): New attendee email addresses.
         timezone (Optional[str]): New timezone (e.g., "America/New_York").
         add_google_meet (Optional[bool]): Whether to add or remove Google Meet video conference. If True, adds Google Meet; if False, removes it; if None, leaves unchanged.
+        reminders (Optional[Union[str, List[Dict[str, Any]]]]): JSON string or list of reminder objects to replace existing reminders. Each should have 'method' ("popup" or "email") and 'minutes' (0-40320). Max 5 reminders. Example: '[{"method": "popup", "minutes": 15}]' or [{"method": "popup", "minutes": 15}]
+        use_default_reminders (Optional[bool]): Whether to use calendar's default reminders. If specified, overrides current reminder settings.
 
     Returns:
         str: Confirmation message of the successful event modification with event link.
@@ -445,6 +531,36 @@ async def modify_event(
         event_body["location"] = location
     if attendees is not None:
         event_body["attendees"] = [{"email": email} for email in attendees]
+    
+    # Handle reminders
+    if reminders is not None or use_default_reminders is not None:
+        reminder_data = {}
+        if use_default_reminders is not None:
+            reminder_data["useDefault"] = use_default_reminders
+        else:
+            # Preserve existing event's useDefault value if not explicitly specified
+            try:
+                existing_event = service.events().get(calendarId=calendar_id, eventId=event_id).execute()
+                reminder_data["useDefault"] = existing_event.get("reminders", {}).get("useDefault", True)
+            except Exception as e:
+                logger.warning(f"[modify_event] Could not fetch existing event for reminders: {e}")
+                reminder_data["useDefault"] = True  # Fallback to True if unable to fetch
+        
+        # If custom reminders are provided, automatically disable default reminders
+        if reminders is not None:
+            if reminder_data.get("useDefault", False):
+                reminder_data["useDefault"] = False
+                logger.info("[modify_event] Custom reminders provided - disabling default reminders")
+            
+            validated_reminders = _parse_reminders_json(reminders, "modify_event")
+            if reminders and not validated_reminders:
+                logger.warning("[modify_event] Reminders provided but failed validation. No custom reminders will be set.")
+            elif validated_reminders:
+                reminder_data["overrides"] = validated_reminders
+                logger.info(f"[modify_event] Updated reminders with {len(validated_reminders)} custom reminders")
+        
+        event_body["reminders"] = reminder_data
+
     if (
         timezone is not None
         and "start" not in event_body
diff --git a/gdocs/docs_helpers.py b/gdocs/docs_helpers.py
new file mode 100644
index 0000000..a59d668
--- /dev/null
+++ b/gdocs/docs_helpers.py
@@ -0,0 +1,299 @@
+"""
+Google Docs Helper Functions
+
+This module provides utility functions for common Google Docs operations
+to simplify the implementation of document editing tools.
+"""
+import logging
+from typing import Dict, Any, Optional, Tuple
+
+logger = logging.getLogger(__name__)
+
+def build_text_style(
+    bold: bool = None,
+    italic: bool = None,
+    underline: bool = None,
+    font_size: int = None,
+    font_family: str = None
+) -> tuple[Dict[str, Any], list[str]]:
+    """
+    Build text style object for Google Docs API requests.
+    
+    Args:
+        bold: Whether text should be bold
+        italic: Whether text should be italic
+        underline: Whether text should be underlined
+        font_size: Font size in points
+        font_family: Font family name
+    
+    Returns:
+        Tuple of (text_style_dict, list_of_field_names)
+    """
+    text_style = {}
+    fields = []
+    
+    if bold is not None:
+        text_style['bold'] = bold
+        fields.append('bold')
+    
+    if italic is not None:
+        text_style['italic'] = italic
+        fields.append('italic')
+    
+    if underline is not None:
+        text_style['underline'] = underline
+        fields.append('underline')
+    
+    if font_size is not None:
+        text_style['fontSize'] = {'magnitude': font_size, 'unit': 'PT'}
+        fields.append('fontSize')
+    
+    if font_family is not None:
+        text_style['weightedFontFamily'] = {'fontFamily': font_family}
+        fields.append('weightedFontFamily')
+    
+    return text_style, fields
+
+def create_insert_text_request(index: int, text: str) -> Dict[str, Any]:
+    """
+    Create an insertText request for Google Docs API.
+    
+    Args:
+        index: Position to insert text
+        text: Text to insert
+    
+    Returns:
+        Dictionary representing the insertText request
+    """
+    return {
+        'insertText': {
+            'location': {'index': index},
+            'text': text
+        }
+    }
+
+def create_delete_range_request(start_index: int, end_index: int) -> Dict[str, Any]:
+    """
+    Create a deleteContentRange request for Google Docs API.
+    
+    Args:
+        start_index: Start position of content to delete
+        end_index: End position of content to delete
+    
+    Returns:
+        Dictionary representing the deleteContentRange request
+    """
+    return {
+        'deleteContentRange': {
+            'range': {
+                'startIndex': start_index,
+                'endIndex': end_index
+            }
+        }
+    }
+
+def create_format_text_request(
+    start_index: int, 
+    end_index: int,
+    bold: bool = None,
+    italic: bool = None,
+    underline: bool = None,
+    font_size: int = None,
+    font_family: str = None
+) -> Optional[Dict[str, Any]]:
+    """
+    Create an updateTextStyle request for Google Docs API.
+    
+    Args:
+        start_index: Start position of text to format
+        end_index: End position of text to format
+        bold: Whether text should be bold
+        italic: Whether text should be italic
+        underline: Whether text should be underlined
+        font_size: Font size in points
+        font_family: Font family name
+    
+    Returns:
+        Dictionary representing the updateTextStyle request, or None if no styles provided
+    """
+    text_style, fields = build_text_style(bold, italic, underline, font_size, font_family)
+    
+    if not text_style:
+        return None
+    
+    return {
+        'updateTextStyle': {
+            'range': {
+                'startIndex': start_index,
+                'endIndex': end_index
+            },
+            'textStyle': text_style,
+            'fields': ','.join(fields)
+        }
+    }
+
+def create_find_replace_request(
+    find_text: str, 
+    replace_text: str, 
+    match_case: bool = False
+) -> Dict[str, Any]:
+    """
+    Create a replaceAllText request for Google Docs API.
+    
+    Args:
+        find_text: Text to find
+        replace_text: Text to replace with
+        match_case: Whether to match case exactly
+    
+    Returns:
+        Dictionary representing the replaceAllText request
+    """
+    return {
+        'replaceAllText': {
+            'containsText': {
+                'text': find_text,
+                'matchCase': match_case
+            },
+            'replaceText': replace_text
+        }
+    }
+
+def create_insert_table_request(index: int, rows: int, columns: int) -> Dict[str, Any]:
+    """
+    Create an insertTable request for Google Docs API.
+    
+    Args:
+        index: Position to insert table
+        rows: Number of rows
+        columns: Number of columns
+    
+    Returns:
+        Dictionary representing the insertTable request
+    """
+    return {
+        'insertTable': {
+            'location': {'index': index},
+            'rows': rows,
+            'columns': columns
+        }
+    }
+
+def create_insert_page_break_request(index: int) -> Dict[str, Any]:
+    """
+    Create an insertPageBreak request for Google Docs API.
+    
+    Args:
+        index: Position to insert page break
+    
+    Returns:
+        Dictionary representing the insertPageBreak request
+    """
+    return {
+        'insertPageBreak': {
+            'location': {'index': index}
+        }
+    }
+
+def create_insert_image_request(
+    index: int, 
+    image_uri: str,
+    width: int = None,
+    height: int = None
+) -> Dict[str, Any]:
+    """
+    Create an insertInlineImage request for Google Docs API.
+    
+    Args:
+        index: Position to insert image
+        image_uri: URI of the image (Drive URL or public URL)
+        width: Image width in points
+        height: Image height in points
+    
+    Returns:
+        Dictionary representing the insertInlineImage request
+    """
+    request = {
+        'insertInlineImage': {
+            'location': {'index': index},
+            'uri': image_uri
+        }
+    }
+    
+    # Add size properties if specified
+    object_size = {}
+    if width is not None:
+        object_size['width'] = {'magnitude': width, 'unit': 'PT'}
+    if height is not None:
+        object_size['height'] = {'magnitude': height, 'unit': 'PT'}
+    
+    if object_size:
+        request['insertInlineImage']['objectSize'] = object_size
+    
+    return request
+
+def create_bullet_list_request(
+    start_index: int, 
+    end_index: int,
+    list_type: str = "UNORDERED"
+) -> Dict[str, Any]:
+    """
+    Create a createParagraphBullets request for Google Docs API.
+    
+    Args:
+        start_index: Start of text range to convert to list
+        end_index: End of text range to convert to list
+        list_type: Type of list ("UNORDERED" or "ORDERED")
+    
+    Returns:
+        Dictionary representing the createParagraphBullets request
+    """
+    bullet_preset = (
+        'BULLET_DISC_CIRCLE_SQUARE' 
+        if list_type == "UNORDERED" 
+        else 'NUMBERED_DECIMAL_ALPHA_ROMAN'
+    )
+    
+    return {
+        'createParagraphBullets': {
+            'range': {
+                'startIndex': start_index,
+                'endIndex': end_index
+            },
+            'bulletPreset': bullet_preset
+        }
+    }
+
+def validate_operation(operation: Dict[str, Any]) -> Tuple[bool, str]:
+    """
+    Validate a batch operation dictionary.
+    
+    Args:
+        operation: Operation dictionary to validate
+    
+    Returns:
+        Tuple of (is_valid, error_message)
+    """
+    op_type = operation.get('type')
+    if not op_type:
+        return False, "Missing 'type' field"
+    
+    # Validate required fields for each operation type
+    required_fields = {
+        'insert_text': ['index', 'text'],
+        'delete_text': ['start_index', 'end_index'],
+        'replace_text': ['start_index', 'end_index', 'text'],
+        'format_text': ['start_index', 'end_index'],
+        'insert_table': ['index', 'rows', 'columns'],
+        'insert_page_break': ['index'],
+        'find_replace': ['find_text', 'replace_text']
+    }
+    
+    if op_type not in required_fields:
+        return False, f"Unsupported operation type: {op_type or 'None'}"
+    
+    for field in required_fields[op_type]:
+        if field not in operation:
+            return False, f"Missing required field: {field}"
+    
+    return True, ""
+
diff --git a/gdocs/docs_structure.py b/gdocs/docs_structure.py
new file mode 100644
index 0000000..763160d
--- /dev/null
+++ b/gdocs/docs_structure.py
@@ -0,0 +1,340 @@
+"""
+Google Docs Document Structure Parsing and Analysis
+
+This module provides utilities for parsing and analyzing the structure
+of Google Docs documents, including finding tables, cells, and other elements.
+"""
+import logging
+from typing import Any, Optional
+
+logger = logging.getLogger(__name__)
+
+
+def parse_document_structure(doc_data: dict[str, Any]) -> dict[str, Any]:
+    """
+    Parse the full document structure into a navigable format.
+    
+    Args:
+        doc_data: Raw document data from Google Docs API
+    
+    Returns:
+        Dictionary containing parsed structure with elements and their positions
+    """
+    structure = {
+        'title': doc_data.get('title', ''),
+        'body': [],
+        'tables': [],
+        'headers': {},
+        'footers': {},
+        'total_length': 0
+    }
+    
+    body = doc_data.get('body', {})
+    content = body.get('content', [])
+    
+    for element in content:
+        element_info = _parse_element(element)
+        if element_info:
+            structure['body'].append(element_info)
+            if element_info['type'] == 'table':
+                structure['tables'].append(element_info)
+    
+    # Calculate total document length
+    if structure['body']:
+        last_element = structure['body'][-1]
+        structure['total_length'] = last_element.get('end_index', 0)
+    
+    # Parse headers and footers
+    for header_id, header_data in doc_data.get('headers', {}).items():
+        structure['headers'][header_id] = _parse_segment(header_data)
+    
+    for footer_id, footer_data in doc_data.get('footers', {}).items():
+        structure['footers'][footer_id] = _parse_segment(footer_data)
+    
+    return structure
+
+
+def _parse_element(element: dict[str, Any]) -> Optional[dict[str, Any]]:
+    """
+    Parse a single document element.
+    
+    Args:
+        element: Element data from document
+    
+    Returns:
+        Parsed element information or None
+    """
+    element_info = {
+        'start_index': element.get('startIndex', 0),
+        'end_index': element.get('endIndex', 0)
+    }
+    
+    if 'paragraph' in element:
+        paragraph = element['paragraph']
+        element_info['type'] = 'paragraph'
+        element_info['text'] = _extract_paragraph_text(paragraph)
+        element_info['style'] = paragraph.get('paragraphStyle', {})
+        
+    elif 'table' in element:
+        table = element['table']
+        element_info['type'] = 'table'
+        element_info['rows'] = len(table.get('tableRows', []))
+        element_info['columns'] = len(table.get('tableRows', [{}])[0].get('tableCells', []))
+        element_info['cells'] = _parse_table_cells(table)
+        element_info['table_style'] = table.get('tableStyle', {})
+        
+    elif 'sectionBreak' in element:
+        element_info['type'] = 'section_break'
+        element_info['section_style'] = element['sectionBreak'].get('sectionStyle', {})
+        
+    elif 'tableOfContents' in element:
+        element_info['type'] = 'table_of_contents'
+        
+    else:
+        return None
+    
+    return element_info
+
+
+def _parse_table_cells(table: dict[str, Any]) -> list[list[dict[str, Any]]]:
+    """
+    Parse table cells with their positions and content.
+    
+    Args:
+        table: Table element data
+    
+    Returns:
+        2D list of cell information
+    """
+    cells = []
+    for row_idx, row in enumerate(table.get('tableRows', [])):
+        row_cells = []
+        for col_idx, cell in enumerate(row.get('tableCells', [])):
+            # Find the first paragraph in the cell for insertion
+            insertion_index = cell.get('startIndex', 0) + 1  # Default fallback
+            
+            # Look for the first paragraph in cell content
+            content_elements = cell.get('content', [])
+            for element in content_elements:
+                if 'paragraph' in element:
+                    paragraph = element['paragraph']
+                    # Get the first element in the paragraph
+                    para_elements = paragraph.get('elements', [])
+                    if para_elements:
+                        first_element = para_elements[0]
+                        if 'startIndex' in first_element:
+                            insertion_index = first_element['startIndex']
+                            break
+            
+            cell_info = {
+                'row': row_idx,
+                'column': col_idx,
+                'start_index': cell.get('startIndex', 0),
+                'end_index': cell.get('endIndex', 0),
+                'insertion_index': insertion_index,  # Where to insert text in this cell
+                'content': _extract_cell_text(cell),
+                'content_elements': content_elements
+            }
+            row_cells.append(cell_info)
+        cells.append(row_cells)
+    return cells
+
+
+def _extract_paragraph_text(paragraph: dict[str, Any]) -> str:
+    """Extract text from a paragraph element."""
+    text_parts = []
+    for element in paragraph.get('elements', []):
+        if 'textRun' in element:
+            text_parts.append(element['textRun'].get('content', ''))
+    return ''.join(text_parts)
+
+
+def _extract_cell_text(cell: dict[str, Any]) -> str:
+    """Extract text content from a table cell."""
+    text_parts = []
+    for element in cell.get('content', []):
+        if 'paragraph' in element:
+            text_parts.append(_extract_paragraph_text(element['paragraph']))
+    return ''.join(text_parts)
+
+
+def _parse_segment(segment_data: dict[str, Any]) -> dict[str, Any]:
+    """Parse a document segment (header/footer)."""
+    return {
+        'content': segment_data.get('content', []),
+        'start_index': segment_data.get('content', [{}])[0].get('startIndex', 0) if segment_data.get('content') else 0,
+        'end_index': segment_data.get('content', [{}])[-1].get('endIndex', 0) if segment_data.get('content') else 0
+    }
+
+
+def find_tables(doc_data: dict[str, Any]) -> list[dict[str, Any]]:
+    """
+    Find all tables in the document with their positions and dimensions.
+    
+    Args:
+        doc_data: Raw document data from Google Docs API
+    
+    Returns:
+        List of table information dictionaries
+    """
+    tables = []
+    structure = parse_document_structure(doc_data)
+    
+    for idx, table_info in enumerate(structure['tables']):
+        tables.append({
+            'index': idx,
+            'start_index': table_info['start_index'],
+            'end_index': table_info['end_index'],
+            'rows': table_info['rows'],
+            'columns': table_info['columns'],
+            'cells': table_info['cells']
+        })
+    
+    return tables
+
+
+def get_table_cell_indices(doc_data: dict[str, Any], table_index: int = 0) -> Optional[list[list[tuple[int, int]]]]:
+    """
+    Get content indices for all cells in a specific table.
+    
+    Args:
+        doc_data: Raw document data from Google Docs API
+        table_index: Index of the table (0-based)
+    
+    Returns:
+        2D list of (start_index, end_index) tuples for each cell, or None if table not found
+    """
+    tables = find_tables(doc_data)
+    
+    if table_index >= len(tables):
+        logger.warning(f"Table index {table_index} not found. Document has {len(tables)} tables.")
+        return None
+    
+    table = tables[table_index]
+    cell_indices = []
+    
+    for row in table['cells']:
+        row_indices = []
+        for cell in row:
+            # Each cell contains at least one paragraph
+            # Find the first paragraph in the cell for content insertion
+            cell_content = cell.get('content_elements', [])
+            if cell_content:
+                # Look for the first paragraph in cell content
+                first_para = None
+                for element in cell_content:
+                    if 'paragraph' in element:
+                        first_para = element['paragraph']
+                        break
+                
+                if first_para and 'elements' in first_para and first_para['elements']:
+                    # Insert at the start of the first text run in the paragraph
+                    first_text_element = first_para['elements'][0]
+                    if 'textRun' in first_text_element:
+                        start_idx = first_text_element.get('startIndex', cell['start_index'] + 1)
+                        end_idx = first_text_element.get('endIndex', start_idx + 1)
+                        row_indices.append((start_idx, end_idx))
+                        continue
+            
+            # Fallback: use cell boundaries with safe margins
+            content_start = cell['start_index'] + 1  
+            content_end = cell['end_index'] - 1
+            row_indices.append((content_start, content_end))
+        cell_indices.append(row_indices)
+    
+    return cell_indices
+
+
+def find_element_at_index(doc_data: dict[str, Any], index: int) -> Optional[dict[str, Any]]:
+    """
+    Find what element exists at a given index in the document.
+    
+    Args:
+        doc_data: Raw document data from Google Docs API
+        index: Position in the document
+    
+    Returns:
+        Information about the element at that position, or None
+    """
+    structure = parse_document_structure(doc_data)
+    
+    for element in structure['body']:
+        if element['start_index'] <= index < element['end_index']:
+            element_copy = element.copy()
+            
+            # If it's a table, find which cell contains the index
+            if element['type'] == 'table' and 'cells' in element:
+                for row_idx, row in enumerate(element['cells']):
+                    for col_idx, cell in enumerate(row):
+                        if cell['start_index'] <= index < cell['end_index']:
+                            element_copy['containing_cell'] = {
+                                'row': row_idx,
+                                'column': col_idx,
+                                'cell_start': cell['start_index'],
+                                'cell_end': cell['end_index']
+                            }
+                            break
+            
+            return element_copy
+    
+    return None
+
+
+def get_next_paragraph_index(doc_data: dict[str, Any], after_index: int = 0) -> int:
+    """
+    Find the next safe position to insert content after a given index.
+    
+    Args:
+        doc_data: Raw document data from Google Docs API
+        after_index: Index after which to find insertion point
+    
+    Returns:
+        Safe index for insertion
+    """
+    structure = parse_document_structure(doc_data)
+    
+    # Find the first paragraph element after the given index
+    for element in structure['body']:
+        if element['type'] == 'paragraph' and element['start_index'] > after_index:
+            # Insert at the end of the previous element or start of this paragraph
+            return element['start_index']
+    
+    # If no paragraph found, return the end of document
+    return structure['total_length'] - 1 if structure['total_length'] > 0 else 1
+
+
+def analyze_document_complexity(doc_data: dict[str, Any]) -> dict[str, Any]:
+    """
+    Analyze document complexity and provide statistics.
+    
+    Args:
+        doc_data: Raw document data from Google Docs API
+    
+    Returns:
+        Dictionary with document statistics
+    """
+    structure = parse_document_structure(doc_data)
+    
+    stats = {
+        'total_elements': len(structure['body']),
+        'tables': len(structure['tables']),
+        'paragraphs': sum(1 for e in structure['body'] if e.get('type') == 'paragraph'),
+        'section_breaks': sum(1 for e in structure['body'] if e.get('type') == 'section_break'),
+        'total_length': structure['total_length'],
+        'has_headers': bool(structure['headers']),
+        'has_footers': bool(structure['footers'])
+    }
+    
+    # Add table statistics
+    if structure['tables']:
+        total_cells = sum(
+            table['rows'] * table['columns'] 
+            for table in structure['tables']
+        )
+        stats['total_table_cells'] = total_cells
+        stats['largest_table'] = max(
+            (t['rows'] * t['columns'] for t in structure['tables']),
+            default=0
+        )
+    
+    return stats
\ No newline at end of file
diff --git a/gdocs/docs_tables.py b/gdocs/docs_tables.py
new file mode 100644
index 0000000..0014121
--- /dev/null
+++ b/gdocs/docs_tables.py
@@ -0,0 +1,442 @@
+"""
+Google Docs Table Operations
+
+This module provides utilities for creating and manipulating tables
+in Google Docs, including population with data and formatting.
+"""
+import logging
+from typing import Dict, Any, List, Optional, Union, Tuple
+
+logger = logging.getLogger(__name__)
+
+
+def build_table_population_requests(
+    table_info: Dict[str, Any], 
+    data: List[List[str]],
+    bold_headers: bool = True
+) -> List[Dict[str, Any]]:
+    """
+    Build batch requests to populate a table with data.
+    
+    Args:
+        table_info: Table information from document structure including cell indices
+        data: 2D array of data to insert into table
+        bold_headers: Whether to make the first row bold
+    
+    Returns:
+        List of request dictionaries for batch update
+    """
+    requests = []
+    cells = table_info.get('cells', [])
+    
+    if not cells:
+        logger.warning("No cell information found in table_info")
+        return requests
+    
+    # Process each cell - ONLY INSERT, DON'T DELETE
+    for row_idx, row_data in enumerate(data):
+        if row_idx >= len(cells):
+            logger.warning(f"Data has more rows ({len(data)}) than table ({len(cells)})")
+            break
+            
+        for col_idx, cell_text in enumerate(row_data):
+            if col_idx >= len(cells[row_idx]):
+                logger.warning(f"Data has more columns ({len(row_data)}) than table row {row_idx} ({len(cells[row_idx])})")
+                break
+            
+            cell = cells[row_idx][col_idx]
+            
+            # For new empty tables, use the insertion index
+            # For tables with existing content, check if cell only contains newline
+            existing_content = cell.get('content', '').strip()
+            
+            # Only insert if we have text to insert
+            if cell_text:
+                # Use the specific insertion index for this cell
+                insertion_index = cell.get('insertion_index', cell['start_index'] + 1)
+                
+                # If cell only contains a newline, replace it
+                if existing_content == '' or existing_content == '\n':
+                    # Cell is empty (just newline), insert at the insertion index
+                    requests.append({
+                        'insertText': {
+                            'location': {'index': insertion_index},
+                            'text': cell_text
+                        }
+                    })
+                    
+                    # Apply bold formatting to first row if requested
+                    if bold_headers and row_idx == 0:
+                        requests.append({
+                            'updateTextStyle': {
+                                'range': {
+                                    'startIndex': insertion_index,
+                                    'endIndex': insertion_index + len(cell_text)
+                                },
+                                'textStyle': {'bold': True},
+                                'fields': 'bold'
+                            }
+                        })
+                else:
+                    # Cell has content, append after existing content
+                    # Find the end of existing content
+                    cell_end = cell['end_index'] - 1  # Don't include cell end marker
+                    requests.append({
+                        'insertText': {
+                            'location': {'index': cell_end},
+                            'text': cell_text
+                        }
+                    })
+                    
+                    # Apply bold formatting to first row if requested
+                    if bold_headers and row_idx == 0:
+                        requests.append({
+                            'updateTextStyle': {
+                                'range': {
+                                    'startIndex': cell_end,
+                                    'endIndex': cell_end + len(cell_text)
+                                },
+                                'textStyle': {'bold': True},
+                                'fields': 'bold'
+                            }
+                        })
+    
+    return requests
+
+
+def calculate_cell_positions(
+    table_start_index: int, 
+    rows: int, 
+    cols: int,
+    existing_table_data: Optional[Dict[str, Any]] = None
+) -> List[List[Dict[str, int]]]:
+    """
+    Calculate estimated positions for each cell in a table.
+    
+    Args:
+        table_start_index: Starting index of the table
+        rows: Number of rows
+        cols: Number of columns
+        existing_table_data: Optional existing table data with actual positions
+    
+    Returns:
+        2D list of cell position dictionaries
+    """
+    if existing_table_data and 'cells' in existing_table_data:
+        # Use actual positions from existing table
+        return existing_table_data['cells']
+    
+    # Estimate positions for a new table
+    # Note: These are estimates; actual positions depend on content
+    cells = []
+    current_index = table_start_index + 2  # Account for table start
+    
+    for row_idx in range(rows):
+        row_cells = []
+        for col_idx in range(cols):
+            # Each cell typically starts with a paragraph marker
+            cell_start = current_index
+            cell_end = current_index + 2  # Minimum cell size
+            
+            row_cells.append({
+                'row': row_idx,
+                'column': col_idx,
+                'start_index': cell_start,
+                'end_index': cell_end
+            })
+            
+            current_index = cell_end + 1
+        
+        cells.append(row_cells)
+    
+    return cells
+
+
+def format_table_data(raw_data: Union[List[List[str]], List[str], str]) -> List[List[str]]:
+    """
+    Normalize various data formats into a 2D array for table insertion.
+    
+    Args:
+        raw_data: Data in various formats (2D list, 1D list, or delimited string)
+    
+    Returns:
+        Normalized 2D list of strings
+    """
+    if isinstance(raw_data, str):
+        # Parse delimited string (detect delimiter)
+        lines = raw_data.strip().split('\n')
+        if '\t' in raw_data:
+            # Tab-delimited
+            return [line.split('\t') for line in lines]
+        elif ',' in raw_data:
+            # Comma-delimited (simple CSV)
+            return [line.split(',') for line in lines]
+        else:
+            # Space-delimited or single column
+            return [[cell.strip() for cell in line.split()] for line in lines]
+    
+    elif isinstance(raw_data, list):
+        if not raw_data:
+            return [[]]
+        
+        # Check if it's already a 2D list
+        if isinstance(raw_data[0], list):
+            # Ensure all cells are strings
+            return [[str(cell) for cell in row] for row in raw_data]
+        else:
+            # Convert 1D list to single-column table
+            return [[str(cell)] for cell in raw_data]
+    
+    else:
+        # Convert single value to 1x1 table
+        return [[str(raw_data)]]
+
+
+def create_table_with_data(
+    index: int,
+    data: List[List[str]],
+    headers: Optional[List[str]] = None,
+    bold_headers: bool = True
+) -> List[Dict[str, Any]]:
+    """
+    Create a table and populate it with data in one operation.
+    
+    Args:
+        index: Position to insert the table
+        data: 2D array of table data
+        headers: Optional header row (will be prepended to data)
+        bold_headers: Whether to make headers bold
+    
+    Returns:
+        List of request dictionaries for batch update
+    """
+    requests = []
+    
+    # Prepare data with headers if provided
+    if headers:
+        full_data = [headers] + data
+    else:
+        full_data = data
+    
+    # Normalize the data
+    full_data = format_table_data(full_data)
+    
+    if not full_data or not full_data[0]:
+        raise ValueError("Cannot create table with empty data")
+    
+    rows = len(full_data)
+    cols = len(full_data[0])
+    
+    # Ensure all rows have the same number of columns
+    for row in full_data:
+        while len(row) < cols:
+            row.append('')
+    
+    # Create the table
+    requests.append({
+        'insertTable': {
+            'location': {'index': index},
+            'rows': rows,
+            'columns': cols
+        }
+    })
+    
+    # We need to calculate where cells will be after table creation
+    # This is approximate - better to get actual positions after creation
+    estimated_cells = calculate_cell_positions(index, rows, cols)
+    
+    # Build text insertion requests for each cell
+    # Note: In practice, we'd need to get the actual document structure
+    # after table creation to get accurate indices
+    
+    return requests
+
+
+def build_table_style_requests(
+    table_start_index: int,
+    style_options: Dict[str, Any]
+) -> List[Dict[str, Any]]:
+    """
+    Build requests to style a table.
+    
+    Args:
+        table_start_index: Starting index of the table
+        style_options: Dictionary of style options
+            - border_width: Width of borders in points
+            - border_color: RGB color for borders
+            - background_color: RGB color for cell backgrounds
+            - header_background: RGB color for header row background
+    
+    Returns:
+        List of request dictionaries for styling
+    """
+    requests = []
+    
+    # Table cell style update
+    if any(k in style_options for k in ['border_width', 'border_color', 'background_color']):
+        table_cell_style = {}
+        fields = []
+        
+        if 'border_width' in style_options:
+            border_width = {'magnitude': style_options['border_width'], 'unit': 'PT'}
+            table_cell_style['borderTop'] = {'width': border_width}
+            table_cell_style['borderBottom'] = {'width': border_width}
+            table_cell_style['borderLeft'] = {'width': border_width}
+            table_cell_style['borderRight'] = {'width': border_width}
+            fields.extend(['borderTop', 'borderBottom', 'borderLeft', 'borderRight'])
+        
+        if 'border_color' in style_options:
+            border_color = {'color': {'rgbColor': style_options['border_color']}}
+            if 'borderTop' in table_cell_style:
+                table_cell_style['borderTop']['color'] = border_color['color']
+                table_cell_style['borderBottom']['color'] = border_color['color']
+                table_cell_style['borderLeft']['color'] = border_color['color']
+                table_cell_style['borderRight']['color'] = border_color['color']
+        
+        if 'background_color' in style_options:
+            table_cell_style['backgroundColor'] = {
+                'color': {'rgbColor': style_options['background_color']}
+            }
+            fields.append('backgroundColor')
+        
+        if table_cell_style and fields:
+            requests.append({
+                'updateTableCellStyle': {
+                    'tableStartLocation': {'index': table_start_index},
+                    'tableCellStyle': table_cell_style,
+                    'fields': ','.join(fields)
+                }
+            })
+    
+    # Header row specific styling
+    if 'header_background' in style_options:
+        requests.append({
+            'updateTableCellStyle': {
+                'tableRange': {
+                    'tableCellLocation': {
+                        'tableStartLocation': {'index': table_start_index},
+                        'rowIndex': 0,
+                        'columnIndex': 0
+                    },
+                    'rowSpan': 1,
+                    'columnSpan': 100  # Large number to cover all columns
+                },
+                'tableCellStyle': {
+                    'backgroundColor': {
+                        'color': {'rgbColor': style_options['header_background']}
+                    }
+                },
+                'fields': 'backgroundColor'
+            }
+        })
+    
+    return requests
+
+
+def extract_table_as_data(table_info: Dict[str, Any]) -> List[List[str]]:
+    """
+    Extract table content as a 2D array of strings.
+    
+    Args:
+        table_info: Table information from document structure
+    
+    Returns:
+        2D list of cell contents
+    """
+    data = []
+    cells = table_info.get('cells', [])
+    
+    for row in cells:
+        row_data = []
+        for cell in row:
+            row_data.append(cell.get('content', '').strip())
+        data.append(row_data)
+    
+    return data
+
+
+def find_table_by_content(
+    tables: List[Dict[str, Any]], 
+    search_text: str,
+    case_sensitive: bool = False
+) -> Optional[int]:
+    """
+    Find a table index by searching for content within it.
+    
+    Args:
+        tables: List of table information from document
+        search_text: Text to search for in table cells
+        case_sensitive: Whether to do case-sensitive search
+    
+    Returns:
+        Index of the first matching table, or None
+    """
+    search_text = search_text if case_sensitive else search_text.lower()
+    
+    for idx, table in enumerate(tables):
+        for row in table.get('cells', []):
+            for cell in row:
+                cell_content = cell.get('content', '')
+                if not case_sensitive:
+                    cell_content = cell_content.lower()
+                
+                if search_text in cell_content:
+                    return idx
+    
+    return None
+
+
+def validate_table_data(data: List[List[str]]) -> Tuple[bool, str]:
+    """
+    Validates table data format and provides specific error messages for LLMs.
+    
+    WHAT THIS CHECKS:
+    - Data is a 2D list (list of lists)
+    - All rows have consistent column counts
+    - Dimensions are within Google Docs limits
+    - No None or undefined values
+    
+    VALID FORMAT EXAMPLE:
+    [
+        ["Header1", "Header2"],     # Row 0 - 2 columns
+        ["Data1", "Data2"],        # Row 1 - 2 columns  
+        ["Data3", "Data4"]         # Row 2 - 2 columns
+    ]
+    
+    INVALID FORMATS:
+    - [["col1"], ["col1", "col2"]]  # Inconsistent column counts
+    - ["col1", "col2"]              # Not 2D (missing inner lists)
+    - [["col1", None]]              # Contains None values
+    - [] or [[]]                    # Empty data
+    
+    Args:
+        data: 2D array of data to validate
+    
+    Returns:
+        Tuple of (is_valid, error_message_with_examples)
+    """
+    if not data:
+        return False, "Data is empty. Use format: [['col1', 'col2'], ['row1col1', 'row1col2']]"
+    
+    if not isinstance(data, list):
+        return False, f"Data must be a list, got {type(data).__name__}. Use format: [['col1', 'col2'], ['row1col1', 'row1col2']]"
+    
+    if not all(isinstance(row, list) for row in data):
+        return False, f"Data must be a 2D list (list of lists). Each row must be a list. Check your format: {data}"
+    
+    # Check for consistent column count
+    col_counts = [len(row) for row in data]
+    if len(set(col_counts)) > 1:
+        return False, f"All rows must have same number of columns. Found: {col_counts}. Fix your data format."
+    
+    # Check for reasonable size
+    rows = len(data)
+    cols = col_counts[0] if col_counts else 0
+    
+    if rows > 1000:
+        return False, f"Too many rows ({rows}). Google Docs limit is 1000 rows."
+    
+    if cols > 20:
+        return False, f"Too many columns ({cols}). Google Docs limit is 20 columns."
+    
+    return True, f"Valid table data: {rows}x{cols} table format"
\ No newline at end of file
diff --git a/gdocs/docs_tools.py b/gdocs/docs_tools.py
index 822cf4f..4e9aef1 100644
--- a/gdocs/docs_tools.py
+++ b/gdocs/docs_tools.py
@@ -15,6 +15,43 @@ from core.utils import extract_office_xml_text, handle_http_errors
 from core.server import server
 from core.comments import create_comment_tools
 
+# Import helper functions for document operations
+from gdocs.docs_helpers import (
+    create_insert_text_request,
+    create_delete_range_request,
+    create_format_text_request,
+    create_find_replace_request,
+    create_insert_table_request,
+    create_insert_page_break_request,
+    create_insert_image_request,
+    create_bullet_list_request,
+    validate_operation
+)
+
+# Import document structure and table utilities
+from gdocs.docs_structure import (
+    parse_document_structure,
+    find_tables,
+    get_table_cell_indices,
+    find_element_at_index,
+    analyze_document_complexity
+)
+from gdocs.docs_tables import (
+    build_table_population_requests,
+    format_table_data,
+    validate_table_data,
+    extract_table_as_data,
+    find_table_by_content
+)
+
+# Import operation managers for complex business logic
+from gdocs.managers import (
+    TableOperationManager,
+    HeaderFooterManager,
+    ValidationManager,
+    BatchOperationManager
+)
+
 logger = logging.getLogger(__name__)
 
 @server.tool()
@@ -273,6 +310,751 @@ async def create_doc(
     return msg
 
 
+@server.tool()
+@handle_http_errors("modify_doc_text", service_type="docs")
+@require_google_service("docs", "docs_write")
+async def modify_doc_text(
+    service,
+    user_google_email: str,
+    document_id: str,
+    start_index: int,
+    end_index: int = None,
+    text: str = None,
+    bold: bool = None,
+    italic: bool = None,
+    underline: bool = None,
+    font_size: int = None,
+    font_family: str = None,
+) -> str:
+    """
+    Modifies text in a Google Doc - can insert/replace text and/or apply formatting in a single operation.
+
+    Args:
+        user_google_email: User's Google email address
+        document_id: ID of the document to update
+        start_index: Start position for operation (0-based)
+        end_index: End position for text replacement/formatting (if not provided with text, text is inserted)
+        text: New text to insert or replace with (optional - can format existing text without changing it)
+        bold: Whether to make text bold (True/False/None to leave unchanged)
+        italic: Whether to make text italic (True/False/None to leave unchanged) 
+        underline: Whether to underline text (True/False/None to leave unchanged)
+        font_size: Font size in points
+        font_family: Font family name (e.g., "Arial", "Times New Roman")
+
+    Returns:
+        str: Confirmation message with operation details
+    """
+    logger.info(f"[modify_doc_text] Doc={document_id}, start={start_index}, end={end_index}, text={text is not None}, formatting={any([bold, italic, underline, font_size, font_family])}")
+
+    # Input validation
+    validator = ValidationManager()
+    
+    is_valid, error_msg = validator.validate_document_id(document_id)
+    if not is_valid:
+        return f"Error: {error_msg}"
+    
+    # Validate that we have something to do
+    if text is None and not any([bold is not None, italic is not None, underline is not None, font_size, font_family]):
+        return "Error: Must provide either 'text' to insert/replace, or formatting parameters (bold, italic, underline, font_size, font_family)."
+    
+    # Validate text formatting params if provided
+    if any([bold is not None, italic is not None, underline is not None, font_size, font_family]):
+        is_valid, error_msg = validator.validate_text_formatting_params(bold, italic, underline, font_size, font_family)
+        if not is_valid:
+            return f"Error: {error_msg}"
+            
+        # For formatting, we need end_index
+        if end_index is None:
+            return "Error: 'end_index' is required when applying formatting."
+            
+        is_valid, error_msg = validator.validate_index_range(start_index, end_index)
+        if not is_valid:
+            return f"Error: {error_msg}"
+
+    requests = []
+    operations = []
+
+    # Handle text insertion/replacement
+    if text is not None:
+        if end_index is not None and end_index > start_index:
+            # Text replacement
+            if start_index == 0:
+                # Special case: Cannot delete at index 0 (first section break)
+                # Instead, we insert new text at index 1 and then delete the old text
+                requests.append(create_insert_text_request(1, text))
+                adjusted_end = end_index + len(text)
+                requests.append(create_delete_range_request(1 + len(text), adjusted_end))
+                operations.append(f"Replaced text from index {start_index} to {end_index}")
+            else:
+                # Normal replacement: delete old text, then insert new text
+                requests.extend([
+                    create_delete_range_request(start_index, end_index),
+                    create_insert_text_request(start_index, text)
+                ])
+                operations.append(f"Replaced text from index {start_index} to {end_index}")
+        else:
+            # Text insertion
+            actual_index = 1 if start_index == 0 else start_index
+            requests.append(create_insert_text_request(actual_index, text))
+            operations.append(f"Inserted text at index {start_index}")
+
+    # Handle formatting
+    if any([bold is not None, italic is not None, underline is not None, font_size, font_family]):
+        # Adjust range for formatting based on text operations
+        format_start = start_index
+        format_end = end_index
+        
+        if text is not None:
+            if end_index is not None and end_index > start_index:
+                # Text was replaced - format the new text
+                format_end = start_index + len(text)
+            else:
+                # Text was inserted - format the inserted text  
+                actual_index = 1 if start_index == 0 else start_index
+                format_start = actual_index
+                format_end = actual_index + len(text)
+        
+        # Handle special case for formatting at index 0
+        if format_start == 0:
+            format_start = 1
+        if format_end is not None and format_end <= format_start:
+            format_end = format_start + 1
+            
+        requests.append(create_format_text_request(format_start, format_end, bold, italic, underline, font_size, font_family))
+        
+        format_details = []
+        if bold is not None: format_details.append(f"bold={bold}")
+        if italic is not None: format_details.append(f"italic={italic}")  
+        if underline is not None: format_details.append(f"underline={underline}")
+        if font_size: format_details.append(f"font_size={font_size}")
+        if font_family: format_details.append(f"font_family={font_family}")
+        
+        operations.append(f"Applied formatting ({', '.join(format_details)}) to range {format_start}-{format_end}")
+
+    await asyncio.to_thread(
+        service.documents().batchUpdate(
+            documentId=document_id,
+            body={'requests': requests}
+        ).execute
+    )
+
+    link = f"https://docs.google.com/document/d/{document_id}/edit"
+    operation_summary = "; ".join(operations)
+    text_info = f" Text length: {len(text)} characters." if text else ""
+    return f"{operation_summary} in document {document_id}.{text_info} Link: {link}"
+
+@server.tool()
+@handle_http_errors("find_and_replace_doc", service_type="docs")
+@require_google_service("docs", "docs_write")
+async def find_and_replace_doc(
+    service,
+    user_google_email: str,
+    document_id: str,
+    find_text: str,
+    replace_text: str,
+    match_case: bool = False,
+) -> str:
+    """
+    Finds and replaces text throughout a Google Doc.
+
+    Args:
+        user_google_email: User's Google email address
+        document_id: ID of the document to update
+        find_text: Text to search for
+        replace_text: Text to replace with
+        match_case: Whether to match case exactly
+
+    Returns:
+        str: Confirmation message with replacement count
+    """
+    logger.info(f"[find_and_replace_doc] Doc={document_id}, find='{find_text}', replace='{replace_text}'")
+
+    requests = [create_find_replace_request(find_text, replace_text, match_case)]
+
+    result = await asyncio.to_thread(
+        service.documents().batchUpdate(
+            documentId=document_id,
+            body={'requests': requests}
+        ).execute
+    )
+
+    # Extract number of replacements from response
+    replacements = 0
+    if 'replies' in result and result['replies']:
+        reply = result['replies'][0]
+        if 'replaceAllText' in reply:
+            replacements = reply['replaceAllText'].get('occurrencesChanged', 0)
+
+    link = f"https://docs.google.com/document/d/{document_id}/edit"
+    return f"Replaced {replacements} occurrence(s) of '{find_text}' with '{replace_text}' in document {document_id}. Link: {link}"
+
+
+@server.tool()
+@handle_http_errors("insert_doc_elements", service_type="docs")
+@require_google_service("docs", "docs_write")
+async def insert_doc_elements(
+    service,
+    user_google_email: str,
+    document_id: str,
+    element_type: str,
+    index: int,
+    rows: int = None,
+    columns: int = None,
+    list_type: str = None,
+    text: str = None,
+) -> str:
+    """
+    Inserts structural elements like tables, lists, or page breaks into a Google Doc.
+
+    Args:
+        user_google_email: User's Google email address
+        document_id: ID of the document to update
+        element_type: Type of element to insert ("table", "list", "page_break")
+        index: Position to insert element (0-based)
+        rows: Number of rows for table (required for table)
+        columns: Number of columns for table (required for table)
+        list_type: Type of list ("UNORDERED", "ORDERED") (required for list)
+        text: Initial text content for list items
+
+    Returns:
+        str: Confirmation message with insertion details
+    """
+    logger.info(f"[insert_doc_elements] Doc={document_id}, type={element_type}, index={index}")
+    
+    # Handle the special case where we can't insert at the first section break
+    # If index is 0, bump it to 1 to avoid the section break
+    if index == 0:
+        logger.debug(f"Adjusting index from 0 to 1 to avoid first section break")
+        index = 1
+
+    requests = []
+
+    if element_type == "table":
+        if not rows or not columns:
+            return "Error: 'rows' and 'columns' parameters are required for table insertion."
+
+        requests.append(create_insert_table_request(index, rows, columns))
+        description = f"table ({rows}x{columns})"
+
+    elif element_type == "list":
+        if not list_type:
+            return "Error: 'list_type' parameter is required for list insertion ('UNORDERED' or 'ORDERED')."
+
+        if not text:
+            text = "List item"
+
+        # Insert text first, then create list
+        requests.extend([
+            create_insert_text_request(index, text + '\n'),
+            create_bullet_list_request(index, index + len(text), list_type)
+        ])
+        description = f"{list_type.lower()} list"
+
+    elif element_type == "page_break":
+        requests.append(create_insert_page_break_request(index))
+        description = "page break"
+
+    else:
+        return f"Error: Unsupported element type '{element_type}'. Supported types: 'table', 'list', 'page_break'."
+
+    await asyncio.to_thread(
+        service.documents().batchUpdate(
+            documentId=document_id,
+            body={'requests': requests}
+        ).execute
+    )
+
+    link = f"https://docs.google.com/document/d/{document_id}/edit"
+    return f"Inserted {description} at index {index} in document {document_id}. Link: {link}"
+
+@server.tool()
+@handle_http_errors("insert_doc_image", service_type="docs")
+@require_multiple_services([
+    {"service_type": "docs", "scopes": "docs_write", "param_name": "docs_service"},
+    {"service_type": "drive", "scopes": "drive_read", "param_name": "drive_service"}
+])
+async def insert_doc_image(
+    docs_service,
+    drive_service,
+    user_google_email: str,
+    document_id: str,
+    image_source: str,
+    index: int,
+    width: int = None,
+    height: int = None,
+) -> str:
+    """
+    Inserts an image into a Google Doc from Drive or a URL.
+
+    Args:
+        user_google_email: User's Google email address
+        document_id: ID of the document to update
+        image_source: Drive file ID or public image URL
+        index: Position to insert image (0-based)
+        width: Image width in points (optional)
+        height: Image height in points (optional)
+
+    Returns:
+        str: Confirmation message with insertion details
+    """
+    logger.info(f"[insert_doc_image] Doc={document_id}, source={image_source}, index={index}")
+    
+    # Handle the special case where we can't insert at the first section break
+    # If index is 0, bump it to 1 to avoid the section break
+    if index == 0:
+        logger.debug(f"Adjusting index from 0 to 1 to avoid first section break")
+        index = 1
+
+    # Determine if source is a Drive file ID or URL
+    is_drive_file = not (image_source.startswith('http://') or image_source.startswith('https://'))
+
+    if is_drive_file:
+        # Verify Drive file exists and get metadata
+        try:
+            file_metadata = await asyncio.to_thread(
+                drive_service.files().get(
+                    fileId=image_source,
+                    fields="id, name, mimeType"
+                ).execute
+            )
+            mime_type = file_metadata.get('mimeType', '')
+            if not mime_type.startswith('image/'):
+                return f"Error: File {image_source} is not an image (MIME type: {mime_type})."
+
+            image_uri = f"https://drive.google.com/uc?id={image_source}"
+            source_description = f"Drive file {file_metadata.get('name', image_source)}"
+        except Exception as e:
+            return f"Error: Could not access Drive file {image_source}: {str(e)}"
+    else:
+        image_uri = image_source
+        source_description = "URL image"
+
+    # Use helper to create image request
+    requests = [create_insert_image_request(index, image_uri, width, height)]
+
+    await asyncio.to_thread(
+        docs_service.documents().batchUpdate(
+            documentId=document_id,
+            body={'requests': requests}
+        ).execute
+    )
+
+    size_info = ""
+    if width or height:
+        size_info = f" (size: {width or 'auto'}x{height or 'auto'} points)"
+
+    link = f"https://docs.google.com/document/d/{document_id}/edit"
+    return f"Inserted {source_description}{size_info} at index {index} in document {document_id}. Link: {link}"
+
+@server.tool()
+@handle_http_errors("update_doc_headers_footers", service_type="docs")
+@require_google_service("docs", "docs_write")
+async def update_doc_headers_footers(
+    service,
+    user_google_email: str,
+    document_id: str,
+    section_type: str,
+    content: str,
+    header_footer_type: str = "DEFAULT",
+) -> str:
+    """
+    Updates headers or footers in a Google Doc.
+
+    Args:
+        user_google_email: User's Google email address
+        document_id: ID of the document to update
+        section_type: Type of section to update ("header" or "footer")
+        content: Text content for the header/footer
+        header_footer_type: Type of header/footer ("DEFAULT", "FIRST_PAGE_ONLY", "EVEN_PAGE")
+
+    Returns:
+        str: Confirmation message with update details
+    """
+    logger.info(f"[update_doc_headers_footers] Doc={document_id}, type={section_type}")
+    
+    # Input validation
+    validator = ValidationManager()
+    
+    is_valid, error_msg = validator.validate_document_id(document_id)
+    if not is_valid:
+        return f"Error: {error_msg}"
+    
+    is_valid, error_msg = validator.validate_header_footer_params(section_type, header_footer_type)
+    if not is_valid:
+        return f"Error: {error_msg}"
+    
+    is_valid, error_msg = validator.validate_text_content(content)
+    if not is_valid:
+        return f"Error: {error_msg}"
+
+    # Use HeaderFooterManager to handle the complex logic
+    header_footer_manager = HeaderFooterManager(service)
+    
+    success, message = await header_footer_manager.update_header_footer_content(
+        document_id, section_type, content, header_footer_type
+    )
+    
+    if success:
+        link = f"https://docs.google.com/document/d/{document_id}/edit"
+        return f"{message}. Link: {link}"
+    else:
+        return f"Error: {message}"
+
+@server.tool()
+@handle_http_errors("batch_update_doc", service_type="docs")
+@require_google_service("docs", "docs_write")
+async def batch_update_doc(
+    service,
+    user_google_email: str,
+    document_id: str,
+    operations: list,
+) -> str:
+    """
+    Executes multiple document operations in a single atomic batch update.
+
+    Args:
+        user_google_email: User's Google email address
+        document_id: ID of the document to update
+        operations: List of operation dictionaries. Each operation should contain:
+                   - type: Operation type ('insert_text', 'delete_text', 'replace_text', 'format_text', 'insert_table', 'insert_page_break')
+                   - Additional parameters specific to each operation type
+
+    Example operations:
+        [
+            {"type": "insert_text", "index": 1, "text": "Hello World"},
+            {"type": "format_text", "start_index": 1, "end_index": 12, "bold": true},
+            {"type": "insert_table", "index": 20, "rows": 2, "columns": 3}
+        ]
+
+    Returns:
+        str: Confirmation message with batch operation results
+    """
+    logger.debug(f"[batch_update_doc] Doc={document_id}, operations={len(operations)}")
+    
+    # Input validation
+    validator = ValidationManager()
+    
+    is_valid, error_msg = validator.validate_document_id(document_id)
+    if not is_valid:
+        return f"Error: {error_msg}"
+    
+    is_valid, error_msg = validator.validate_batch_operations(operations)
+    if not is_valid:
+        return f"Error: {error_msg}"
+
+    # Use BatchOperationManager to handle the complex logic
+    batch_manager = BatchOperationManager(service)
+    
+    success, message, metadata = await batch_manager.execute_batch_operations(
+        document_id, operations
+    )
+    
+    if success:
+        link = f"https://docs.google.com/document/d/{document_id}/edit"
+        replies_count = metadata.get('replies_count', 0)
+        return f"{message} on document {document_id}. API replies: {replies_count}. Link: {link}"
+    else:
+        return f"Error: {message}"
+
+@server.tool()
+@handle_http_errors("inspect_doc_structure", is_read_only=True, service_type="docs")
+@require_google_service("docs", "docs_read")
+async def inspect_doc_structure(
+    service,
+    user_google_email: str,
+    document_id: str,
+    detailed: bool = False,
+) -> str:
+    """
+    Essential tool for finding safe insertion points and understanding document structure.
+
+    USE THIS FOR:
+    - Finding the correct index for table insertion
+    - Understanding document layout before making changes
+    - Locating existing tables and their positions
+    - Getting document statistics and complexity info
+
+    CRITICAL FOR TABLE OPERATIONS:
+    ALWAYS call this BEFORE creating tables to get a safe insertion index.
+    Look for "total_length" in the output - use values less than this for insertion.
+
+    WHAT THE OUTPUT SHOWS:
+    - total_elements: Number of document elements
+    - total_length: Maximum safe index for insertion
+    - tables: Number of existing tables
+    - table_details: Position and dimensions of each table
+
+    WORKFLOW:
+    Step 1: Call this function
+    Step 2: Note the "total_length" value
+    Step 3: Use an index < total_length for table insertion
+    Step 4: Create your table
+
+    Args:
+        user_google_email: User's Google email address
+        document_id: ID of the document to inspect
+        detailed: Whether to return detailed structure information
+
+    Returns:
+        str: JSON string containing document structure and safe insertion indices
+    """
+    logger.debug(f"[inspect_doc_structure] Doc={document_id}, detailed={detailed}")
+
+    # Get the document
+    doc = await asyncio.to_thread(
+        service.documents().get(documentId=document_id).execute
+    )
+
+    if detailed:
+        # Return full parsed structure
+        structure = parse_document_structure(doc)
+
+        # Simplify for JSON serialization
+        result = {
+            'title': structure['title'],
+            'total_length': structure['total_length'],
+            'statistics': {
+                'elements': len(structure['body']),
+                'tables': len(structure['tables']),
+                'paragraphs': sum(1 for e in structure['body'] if e.get('type') == 'paragraph'),
+                'has_headers': bool(structure['headers']),
+                'has_footers': bool(structure['footers'])
+            },
+            'elements': []
+        }
+
+        # Add element summaries
+        for element in structure['body']:
+            elem_summary = {
+                'type': element['type'],
+                'start_index': element['start_index'],
+                'end_index': element['end_index']
+            }
+
+            if element['type'] == 'table':
+                elem_summary['rows'] = element['rows']
+                elem_summary['columns'] = element['columns']
+                elem_summary['cell_count'] = len(element.get('cells', []))
+            elif element['type'] == 'paragraph':
+                elem_summary['text_preview'] = element.get('text', '')[:100]
+
+            result['elements'].append(elem_summary)
+
+        # Add table details
+        if structure['tables']:
+            result['tables'] = []
+            for i, table in enumerate(structure['tables']):
+                table_data = extract_table_as_data(table)
+                result['tables'].append({
+                    'index': i,
+                    'position': {'start': table['start_index'], 'end': table['end_index']},
+                    'dimensions': {'rows': table['rows'], 'columns': table['columns']},
+                    'preview': table_data[:3] if table_data else []  # First 3 rows
+                })
+
+    else:
+        # Return basic analysis
+        result = analyze_document_complexity(doc)
+
+        # Add table information
+        tables = find_tables(doc)
+        if tables:
+            result['table_details'] = []
+            for i, table in enumerate(tables):
+                result['table_details'].append({
+                    'index': i,
+                    'rows': table['rows'],
+                    'columns': table['columns'],
+                    'start_index': table['start_index'],
+                    'end_index': table['end_index']
+                })
+
+    import json
+    link = f"https://docs.google.com/document/d/{document_id}/edit"
+    return f"Document structure analysis for {document_id}:\n\n{json.dumps(result, indent=2)}\n\nLink: {link}"
+
+@server.tool()
+@handle_http_errors("create_table_with_data", service_type="docs")
+@require_google_service("docs", "docs_write")
+async def create_table_with_data(
+    service,
+    user_google_email: str,
+    document_id: str,
+    table_data: list,
+    index: int,
+    bold_headers: bool = True,
+) -> str:
+    """
+    Creates a table and populates it with data in one reliable operation.
+
+    CRITICAL: YOU MUST CALL inspect_doc_structure FIRST TO GET THE INDEX!
+
+    MANDATORY WORKFLOW - DO THESE STEPS IN ORDER:
+
+    Step 1: ALWAYS call inspect_doc_structure first
+    Step 2: Use the 'total_length' value from inspect_doc_structure as your index
+    Step 3: Format data as 2D list: [["col1", "col2"], ["row1col1", "row1col2"]]
+    Step 4: Call this function with the correct index and data
+
+    EXAMPLE DATA FORMAT:
+    table_data = [
+        ["Header1", "Header2", "Header3"],    # Row 0 - headers
+        ["Data1", "Data2", "Data3"],          # Row 1 - first data row
+        ["Data4", "Data5", "Data6"]           # Row 2 - second data row
+    ]
+
+    CRITICAL INDEX REQUIREMENTS:
+    - NEVER use index values like 1, 2, 10 without calling inspect_doc_structure first
+    - ALWAYS get index from inspect_doc_structure 'total_length' field
+    - Index must be a valid insertion point in the document
+
+    DATA FORMAT REQUIREMENTS:
+    - Must be 2D list of strings only
+    - Each inner list = one table row
+    - All rows MUST have same number of columns
+    - Use empty strings "" for empty cells, never None
+    - Use debug_table_structure after creation to verify results
+
+    Args:
+        user_google_email: User's Google email address
+        document_id: ID of the document to update
+        table_data: 2D list of strings - EXACT format: [["col1", "col2"], ["row1col1", "row1col2"]]
+        index: Document position (MANDATORY: get from inspect_doc_structure 'total_length')
+        bold_headers: Whether to make first row bold (default: true)
+
+    Returns:
+        str: Confirmation with table details and link
+    """
+    logger.debug(f"[create_table_with_data] Doc={document_id}, index={index}")
+    
+    # Input validation
+    validator = ValidationManager()
+    
+    is_valid, error_msg = validator.validate_document_id(document_id)
+    if not is_valid:
+        return f"ERROR: {error_msg}"
+    
+    is_valid, error_msg = validator.validate_table_data(table_data)
+    if not is_valid:
+        return f"ERROR: {error_msg}"
+    
+    is_valid, error_msg = validator.validate_index(index, "Index")
+    if not is_valid:
+        return f"ERROR: {error_msg}"
+
+    # Use TableOperationManager to handle the complex logic
+    table_manager = TableOperationManager(service)
+    
+    # Try to create the table, and if it fails due to index being at document end, retry with index-1
+    success, message, metadata = await table_manager.create_and_populate_table(
+        document_id, table_data, index, bold_headers
+    )
+    
+    # If it failed due to index being at or beyond document end, retry with adjusted index
+    if not success and "must be less than the end index" in message:
+        logger.debug(f"Index {index} is at document boundary, retrying with index {index - 1}")
+        success, message, metadata = await table_manager.create_and_populate_table(
+            document_id, table_data, index - 1, bold_headers
+        )
+    
+    if success:
+        link = f"https://docs.google.com/document/d/{document_id}/edit"
+        rows = metadata.get('rows', 0)
+        columns = metadata.get('columns', 0)
+        populated_cells = metadata.get('populated_cells', 0)
+        
+        return f"SUCCESS: {message}. Table: {rows}x{columns}, Index: {index}. Link: {link}"
+    else:
+        return f"ERROR: {message}"
+
+
+@server.tool()
+@handle_http_errors("debug_table_structure", is_read_only=True, service_type="docs")
+@require_google_service("docs", "docs_read")
+async def debug_table_structure(
+    service,
+    user_google_email: str,
+    document_id: str,
+    table_index: int = 0,
+) -> str:
+    """
+    ESSENTIAL DEBUGGING TOOL - Use this whenever tables don't work as expected.
+
+    USE THIS IMMEDIATELY WHEN:
+    - Table population put data in wrong cells
+    - You get "table not found" errors
+    - Data appears concatenated in first cell
+    - Need to understand existing table structure
+    - Planning to use populate_existing_table
+
+    WHAT THIS SHOWS YOU:
+    - Exact table dimensions (rows × columns)
+    - Each cell's position coordinates (row,col)
+    - Current content in each cell
+    - Insertion indices for each cell
+    - Table boundaries and ranges
+
+    HOW TO READ THE OUTPUT:
+    - "dimensions": "2x3" = 2 rows, 3 columns
+    - "position": "(0,0)" = first row, first column
+    - "current_content": What's actually in each cell right now
+    - "insertion_index": Where new text would be inserted in that cell
+
+    WORKFLOW INTEGRATION:
+    1. After creating table → Use this to verify structure
+    2. Before populating → Use this to plan your data format
+    3. After population fails → Use this to see what went wrong
+    4. When debugging → Compare your data array to actual table structure
+
+    Args:
+        user_google_email: User's Google email address
+        document_id: ID of the document to inspect
+        table_index: Which table to debug (0 = first table, 1 = second table, etc.)
+
+    Returns:
+        str: Detailed JSON structure showing table layout, cell positions, and current content
+    """
+    logger.debug(f"[debug_table_structure] Doc={document_id}, table_index={table_index}")
+
+    # Get the document
+    doc = await asyncio.to_thread(
+        service.documents().get(documentId=document_id).execute
+    )
+
+    # Find tables
+    tables = find_tables(doc)
+    if table_index >= len(tables):
+        return f"Error: Table index {table_index} not found. Document has {len(tables)} table(s)."
+
+    table_info = tables[table_index]
+
+    import json
+
+    # Extract detailed cell information
+    debug_info = {
+        'table_index': table_index,
+        'dimensions': f"{table_info['rows']}x{table_info['columns']}",
+        'table_range': f"[{table_info['start_index']}-{table_info['end_index']}]",
+        'cells': []
+    }
+
+    for row_idx, row in enumerate(table_info['cells']):
+        row_info = []
+        for col_idx, cell in enumerate(row):
+            cell_debug = {
+                'position': f"({row_idx},{col_idx})",
+                'range': f"[{cell['start_index']}-{cell['end_index']}]",
+                'insertion_index': cell.get('insertion_index', 'N/A'),
+                'current_content': repr(cell.get('content', '')),
+                'content_elements_count': len(cell.get('content_elements', []))
+            }
+            row_info.append(cell_debug)
+        debug_info['cells'].append(row_info)
+
+    link = f"https://docs.google.com/document/d/{document_id}/edit"
+    return f"Table structure debug for table {table_index}:\n\n{json.dumps(debug_info, indent=2)}\n\nLink: {link}"
+
+
 # Create comment management tools for documents
 _comment_tools = create_comment_tools("document", "document_id")
 
diff --git a/gdocs/managers/__init__.py b/gdocs/managers/__init__.py
new file mode 100644
index 0000000..274d09c
--- /dev/null
+++ b/gdocs/managers/__init__.py
@@ -0,0 +1,18 @@
+"""
+Google Docs Operation Managers
+
+This package provides high-level manager classes for complex Google Docs operations,
+extracting business logic from the main tools module to improve maintainability.
+"""
+
+from .table_operation_manager import TableOperationManager
+from .header_footer_manager import HeaderFooterManager
+from .validation_manager import ValidationManager
+from .batch_operation_manager import BatchOperationManager
+
+__all__ = [
+    'TableOperationManager',
+    'HeaderFooterManager', 
+    'ValidationManager',
+    'BatchOperationManager'
+]
\ No newline at end of file
diff --git a/gdocs/managers/batch_operation_manager.py b/gdocs/managers/batch_operation_manager.py
new file mode 100644
index 0000000..f06dfbb
--- /dev/null
+++ b/gdocs/managers/batch_operation_manager.py
@@ -0,0 +1,303 @@
+"""
+Batch Operation Manager
+
+This module provides high-level batch operation management for Google Docs,
+extracting complex validation and request building logic.
+"""
+import logging
+import asyncio
+from typing import Any, Union, Dict, List, Tuple
+
+from gdocs.docs_helpers import (
+    create_insert_text_request,
+    create_delete_range_request,
+    create_format_text_request,
+    create_find_replace_request,
+    create_insert_table_request,
+    create_insert_page_break_request,
+    validate_operation
+)
+
+logger = logging.getLogger(__name__)
+
+
+class BatchOperationManager:
+    """
+    High-level manager for Google Docs batch operations.
+    
+    Handles complex multi-operation requests including:
+    - Operation validation and request building
+    - Batch execution with proper error handling
+    - Operation result processing and reporting
+    """
+    
+    def __init__(self, service):
+        """
+        Initialize the batch operation manager.
+        
+        Args:
+            service: Google Docs API service instance
+        """
+        self.service = service
+        
+    async def execute_batch_operations(
+        self,
+        document_id: str,
+        operations: list[dict[str, Any]]
+    ) -> tuple[bool, str, dict[str, Any]]:
+        """
+        Execute multiple document operations in a single atomic batch.
+        
+        This method extracts the complex logic from batch_update_doc tool function.
+        
+        Args:
+            document_id: ID of the document to update
+            operations: List of operation dictionaries
+            
+        Returns:
+            Tuple of (success, message, metadata)
+        """
+        logger.info(f"Executing batch operations on document {document_id}")
+        logger.info(f"Operations count: {len(operations)}")
+        
+        if not operations:
+            return False, "No operations provided. Please provide at least one operation.", {}
+            
+        try:
+            # Validate and build requests
+            requests, operation_descriptions = await self._validate_and_build_requests(operations)
+            
+            if not requests:
+                return False, "No valid requests could be built from operations", {}
+            
+            # Execute the batch
+            result = await self._execute_batch_requests(document_id, requests)
+            
+            # Process results
+            metadata = {
+                'operations_count': len(operations),
+                'requests_count': len(requests),
+                'replies_count': len(result.get('replies', [])),
+                'operation_summary': operation_descriptions[:5]  # First 5 operations
+            }
+            
+            summary = self._build_operation_summary(operation_descriptions)
+            
+            return True, f"Successfully executed {len(operations)} operations ({summary})", metadata
+            
+        except Exception as e:
+            logger.error(f"Failed to execute batch operations: {str(e)}")
+            return False, f"Batch operation failed: {str(e)}", {}
+    
+    async def _validate_and_build_requests(
+        self,
+        operations: list[dict[str, Any]]
+    ) -> tuple[list[dict[str, Any]], list[str]]:
+        """
+        Validate operations and build API requests.
+        
+        Args:
+            operations: List of operation dictionaries
+            
+        Returns:
+            Tuple of (requests, operation_descriptions)
+        """
+        requests = []
+        operation_descriptions = []
+        
+        for i, op in enumerate(operations):
+            # Validate operation structure
+            is_valid, error_msg = validate_operation(op)
+            if not is_valid:
+                raise ValueError(f"Operation {i+1}: {error_msg}")
+            
+            op_type = op.get('type')
+            
+            try:
+                # Build request based on operation type
+                result = self._build_operation_request(op, op_type)
+                
+                # Handle both single request and list of requests
+                if isinstance(result[0], list):
+                    # Multiple requests (e.g., replace_text)
+                    for req in result[0]:
+                        requests.append(req)
+                    operation_descriptions.append(result[1])
+                elif result[0]:
+                    # Single request
+                    requests.append(result[0])
+                    operation_descriptions.append(result[1])
+                    
+            except KeyError as e:
+                raise ValueError(f"Operation {i+1} ({op_type}) missing required field: {e}")
+            except Exception as e:
+                raise ValueError(f"Operation {i+1} ({op_type}) failed validation: {str(e)}")
+                
+        return requests, operation_descriptions
+    
+    def _build_operation_request(
+        self,
+        op: dict[str, Any],
+        op_type: str
+    ) -> Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], str]:
+        """
+        Build a single operation request.
+        
+        Args:
+            op: Operation dictionary
+            op_type: Operation type
+            
+        Returns:
+            Tuple of (request, description)
+        """
+        if op_type == 'insert_text':
+            request = create_insert_text_request(op['index'], op['text'])
+            description = f"insert text at {op['index']}"
+            
+        elif op_type == 'delete_text':
+            request = create_delete_range_request(op['start_index'], op['end_index'])
+            description = f"delete text {op['start_index']}-{op['end_index']}"
+            
+        elif op_type == 'replace_text':
+            # Replace is delete + insert (must be done in this order)
+            delete_request = create_delete_range_request(op['start_index'], op['end_index'])
+            insert_request = create_insert_text_request(op['start_index'], op['text'])
+            # Return both requests as a list
+            request = [delete_request, insert_request]
+            description = f"replace text {op['start_index']}-{op['end_index']} with '{op['text'][:20]}{'...' if len(op['text']) > 20 else ''}'"
+            
+        elif op_type == 'format_text':
+            request = create_format_text_request(
+                op['start_index'], op['end_index'],
+                op.get('bold'), op.get('italic'), op.get('underline'),
+                op.get('font_size'), op.get('font_family')
+            )
+            
+            if not request:
+                raise ValueError("No formatting options provided")
+                
+            # Build format description
+            format_changes = []
+            for param, name in [
+                ('bold', 'bold'), ('italic', 'italic'), ('underline', 'underline'),
+                ('font_size', 'font size'), ('font_family', 'font family')
+            ]:
+                if op.get(param) is not None:
+                    value = f"{op[param]}pt" if param == 'font_size' else op[param]
+                    format_changes.append(f"{name}: {value}")
+                    
+            description = f"format text {op['start_index']}-{op['end_index']} ({', '.join(format_changes)})"
+            
+        elif op_type == 'insert_table':
+            request = create_insert_table_request(op['index'], op['rows'], op['columns'])
+            description = f"insert {op['rows']}x{op['columns']} table at {op['index']}"
+            
+        elif op_type == 'insert_page_break':
+            request = create_insert_page_break_request(op['index'])
+            description = f"insert page break at {op['index']}"
+            
+        elif op_type == 'find_replace':
+            request = create_find_replace_request(
+                op['find_text'], op['replace_text'], op.get('match_case', False)
+            )
+            description = f"find/replace '{op['find_text']}' → '{op['replace_text']}'"
+            
+        else:
+            supported_types = [
+                'insert_text', 'delete_text', 'replace_text', 'format_text',
+                'insert_table', 'insert_page_break', 'find_replace'
+            ]
+            raise ValueError(f"Unsupported operation type '{op_type}'. Supported: {', '.join(supported_types)}")
+            
+        return request, description
+    
+    async def _execute_batch_requests(
+        self,
+        document_id: str,
+        requests: list[dict[str, Any]]
+    ) -> dict[str, Any]:
+        """
+        Execute the batch requests against the Google Docs API.
+        
+        Args:
+            document_id: Document ID
+            requests: List of API requests
+            
+        Returns:
+            API response
+        """
+        return await asyncio.to_thread(
+            self.service.documents().batchUpdate(
+                documentId=document_id,
+                body={'requests': requests}
+            ).execute
+        )
+    
+    def _build_operation_summary(self, operation_descriptions: list[str]) -> str:
+        """
+        Build a concise summary of operations performed.
+        
+        Args:
+            operation_descriptions: List of operation descriptions
+            
+        Returns:
+            Summary string
+        """
+        if not operation_descriptions:
+            return "no operations"
+            
+        summary_items = operation_descriptions[:3]  # Show first 3 operations
+        summary = ', '.join(summary_items)
+        
+        if len(operation_descriptions) > 3:
+            remaining = len(operation_descriptions) - 3
+            summary += f" and {remaining} more operation{'s' if remaining > 1 else ''}"
+            
+        return summary
+    
+    def get_supported_operations(self) -> dict[str, Any]:
+        """
+        Get information about supported batch operations.
+        
+        Returns:
+            Dictionary with supported operation types and their required parameters
+        """
+        return {
+            'supported_operations': {
+                'insert_text': {
+                    'required': ['index', 'text'],
+                    'description': 'Insert text at specified index'
+                },
+                'delete_text': {
+                    'required': ['start_index', 'end_index'],
+                    'description': 'Delete text in specified range'
+                },
+                'replace_text': {
+                    'required': ['start_index', 'end_index', 'text'],
+                    'description': 'Replace text in range with new text'
+                },
+                'format_text': {
+                    'required': ['start_index', 'end_index'],
+                    'optional': ['bold', 'italic', 'underline', 'font_size', 'font_family'],
+                    'description': 'Apply formatting to text range'
+                },
+                'insert_table': {
+                    'required': ['index', 'rows', 'columns'],
+                    'description': 'Insert table at specified index'
+                },
+                'insert_page_break': {
+                    'required': ['index'],
+                    'description': 'Insert page break at specified index'
+                },
+                'find_replace': {
+                    'required': ['find_text', 'replace_text'],
+                    'optional': ['match_case'],
+                    'description': 'Find and replace text throughout document'
+                }
+            },
+            'example_operations': [
+                {"type": "insert_text", "index": 1, "text": "Hello World"},
+                {"type": "format_text", "start_index": 1, "end_index": 12, "bold": True},
+                {"type": "insert_table", "index": 20, "rows": 2, "columns": 3}
+            ]
+        }
\ No newline at end of file
diff --git a/gdocs/managers/header_footer_manager.py b/gdocs/managers/header_footer_manager.py
new file mode 100644
index 0000000..31715e1
--- /dev/null
+++ b/gdocs/managers/header_footer_manager.py
@@ -0,0 +1,332 @@
+"""
+Header Footer Manager
+
+This module provides high-level operations for managing headers and footers
+in Google Docs, extracting complex logic from the main tools module.
+"""
+import logging
+import asyncio
+from typing import Any, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class HeaderFooterManager:
+    """
+    High-level manager for Google Docs header and footer operations.
+    
+    Handles complex header/footer operations including:
+    - Finding and updating existing headers/footers
+    - Content replacement with proper range calculation
+    - Section type management
+    """
+    
+    def __init__(self, service):
+        """
+        Initialize the header footer manager.
+        
+        Args:
+            service: Google Docs API service instance
+        """
+        self.service = service
+    
+    async def update_header_footer_content(
+        self,
+        document_id: str,
+        section_type: str,
+        content: str,
+        header_footer_type: str = "DEFAULT"
+    ) -> tuple[bool, str]:
+        """
+        Updates header or footer content in a document.
+        
+        This method extracts the complex logic from update_doc_headers_footers tool function.
+        
+        Args:
+            document_id: ID of the document to update
+            section_type: Type of section ("header" or "footer")
+            content: New content for the section
+            header_footer_type: Type of header/footer ("DEFAULT", "FIRST_PAGE_ONLY", "EVEN_PAGE")
+            
+        Returns:
+            Tuple of (success, message)
+        """
+        logger.info(f"Updating {section_type} in document {document_id}")
+        
+        # Validate section type
+        if section_type not in ["header", "footer"]:
+            return False, "section_type must be 'header' or 'footer'"
+            
+        # Validate header/footer type
+        if header_footer_type not in ["DEFAULT", "FIRST_PAGE_ONLY", "EVEN_PAGE"]:
+            return False, "header_footer_type must be 'DEFAULT', 'FIRST_PAGE_ONLY', or 'EVEN_PAGE'"
+        
+        try:
+            # Get document structure
+            doc = await self._get_document(document_id)
+            
+            # Find the target section
+            target_section, section_id = await self._find_target_section(
+                doc, section_type, header_footer_type
+            )
+            
+            if not target_section:
+                return False, f"No {section_type} found in document. Please create a {section_type} first in Google Docs."
+            
+            # Update the content
+            success = await self._replace_section_content(document_id, target_section, content)
+            
+            if success:
+                return True, f"Updated {section_type} content in document {document_id}"
+            else:
+                return False, f"Could not find content structure in {section_type} to update"
+                
+        except Exception as e:
+            logger.error(f"Failed to update {section_type}: {str(e)}")
+            return False, f"Failed to update {section_type}: {str(e)}"
+    
+    async def _get_document(self, document_id: str) -> dict[str, Any]:
+        """Get the full document data."""
+        return await asyncio.to_thread(
+            self.service.documents().get(documentId=document_id).execute
+        )
+    
+    async def _find_target_section(
+        self,
+        doc: dict[str, Any],
+        section_type: str,
+        header_footer_type: str
+    ) -> tuple[Optional[dict[str, Any]], Optional[str]]:
+        """
+        Find the target header or footer section.
+        
+        Args:
+            doc: Document data
+            section_type: "header" or "footer"
+            header_footer_type: Type of header/footer
+            
+        Returns:
+            Tuple of (section_data, section_id) or (None, None) if not found
+        """
+        if section_type == "header":
+            sections = doc.get('headers', {})
+        else:
+            sections = doc.get('footers', {})
+        
+        # Try to match section based on header_footer_type
+        # Google Docs API typically uses section IDs that correspond to types
+        
+        # First, try to find an exact match based on common patterns
+        for section_id, section_data in sections.items():
+            # Check if section_data contains type information
+            if 'type' in section_data and section_data['type'] == header_footer_type:
+                return section_data, section_id
+        
+        # If no exact match, try pattern matching on section ID
+        # Google Docs often uses predictable section ID patterns
+        target_patterns = {
+            "DEFAULT": ["default", "kix"],  # DEFAULT headers often have these patterns
+            "FIRST_PAGE": ["first", "firstpage"],
+            "EVEN_PAGE": ["even", "evenpage"],
+            "FIRST_PAGE_ONLY": ["first", "firstpage"]  # Legacy support
+        }
+        
+        patterns = target_patterns.get(header_footer_type, [])
+        for pattern in patterns:
+            for section_id, section_data in sections.items():
+                if pattern.lower() in section_id.lower():
+                    return section_data, section_id
+        
+        # If still no match, return the first available section as fallback
+        # This maintains backward compatibility
+        for section_id, section_data in sections.items():
+            return section_data, section_id
+            
+        return None, None
+    
+    async def _replace_section_content(
+        self,
+        document_id: str,
+        section: dict[str, Any],
+        new_content: str
+    ) -> bool:
+        """
+        Replace the content in a header or footer section.
+        
+        Args:
+            document_id: Document ID
+            section: Section data containing content elements
+            new_content: New content to insert
+            
+        Returns:
+            True if successful, False otherwise
+        """
+        content_elements = section.get('content', [])
+        if not content_elements:
+            return False
+            
+        # Find the first paragraph to replace content
+        first_para = self._find_first_paragraph(content_elements)
+        if not first_para:
+            return False
+        
+        # Calculate content range
+        start_index = first_para.get('startIndex', 0)
+        end_index = first_para.get('endIndex', 0)
+        
+        # Build requests to replace content
+        requests = []
+        
+        # Delete existing content if any (preserve paragraph structure)
+        if end_index > start_index:
+            requests.append({
+                'deleteContentRange': {
+                    'range': {
+                        'startIndex': start_index,
+                        'endIndex': end_index - 1  # Keep the paragraph end marker
+                    }
+                }
+            })
+        
+        # Insert new content
+        requests.append({
+            'insertText': {
+                'location': {'index': start_index},
+                'text': new_content
+            }
+        })
+        
+        try:
+            await asyncio.to_thread(
+                self.service.documents().batchUpdate(
+                    documentId=document_id,
+                    body={'requests': requests}
+                ).execute
+            )
+            return True
+            
+        except Exception as e:
+            logger.error(f"Failed to replace section content: {str(e)}")
+            return False
+    
+    def _find_first_paragraph(self, content_elements: list[dict[str, Any]]) -> Optional[dict[str, Any]]:
+        """Find the first paragraph element in content."""
+        for element in content_elements:
+            if 'paragraph' in element:
+                return element
+        return None
+    
+    async def get_header_footer_info(
+        self,
+        document_id: str
+    ) -> dict[str, Any]:
+        """
+        Get information about all headers and footers in the document.
+        
+        Args:
+            document_id: Document ID
+            
+        Returns:
+            Dictionary with header and footer information
+        """
+        try:
+            doc = await self._get_document(document_id)
+            
+            headers_info = {}
+            for header_id, header_data in doc.get('headers', {}).items():
+                headers_info[header_id] = self._extract_section_info(header_data)
+            
+            footers_info = {}
+            for footer_id, footer_data in doc.get('footers', {}).items():
+                footers_info[footer_id] = self._extract_section_info(footer_data)
+            
+            return {
+                'headers': headers_info,
+                'footers': footers_info,
+                'has_headers': bool(headers_info),
+                'has_footers': bool(footers_info)
+            }
+            
+        except Exception as e:
+            logger.error(f"Failed to get header/footer info: {str(e)}")
+            return {'error': str(e)}
+    
+    def _extract_section_info(self, section_data: dict[str, Any]) -> dict[str, Any]:
+        """Extract useful information from a header/footer section."""
+        content_elements = section_data.get('content', [])
+        
+        # Extract text content
+        text_content = ""
+        for element in content_elements:
+            if 'paragraph' in element:
+                para = element['paragraph']
+                for para_element in para.get('elements', []):
+                    if 'textRun' in para_element:
+                        text_content += para_element['textRun'].get('content', '')
+        
+        return {
+            'content_preview': text_content[:100] if text_content else "(empty)",
+            'element_count': len(content_elements),
+            'start_index': content_elements[0].get('startIndex', 0) if content_elements else 0,
+            'end_index': content_elements[-1].get('endIndex', 0) if content_elements else 0
+        }
+    
+    async def create_header_footer(
+        self,
+        document_id: str,
+        section_type: str,
+        header_footer_type: str = "DEFAULT"
+    ) -> tuple[bool, str]:
+        """
+        Create a new header or footer section.
+        
+        Args:
+            document_id: Document ID
+            section_type: "header" or "footer"
+            header_footer_type: Type of header/footer ("DEFAULT", "FIRST_PAGE", or "EVEN_PAGE")
+            
+        Returns:
+            Tuple of (success, message)
+        """
+        if section_type not in ["header", "footer"]:
+            return False, "section_type must be 'header' or 'footer'"
+        
+        # Map our type names to API type names
+        type_mapping = {
+            "DEFAULT": "DEFAULT",
+            "FIRST_PAGE": "FIRST_PAGE",
+            "EVEN_PAGE": "EVEN_PAGE",
+            "FIRST_PAGE_ONLY": "FIRST_PAGE"  # Support legacy name
+        }
+        
+        api_type = type_mapping.get(header_footer_type, header_footer_type)
+        if api_type not in ["DEFAULT", "FIRST_PAGE", "EVEN_PAGE"]:
+            return False, "header_footer_type must be 'DEFAULT', 'FIRST_PAGE', or 'EVEN_PAGE'"
+        
+        try:
+            # Build the request
+            request = {
+                'type': api_type
+            }
+            
+            # Create the appropriate request type
+            if section_type == "header":
+                batch_request = {'createHeader': request}
+            else:
+                batch_request = {'createFooter': request}
+            
+            # Execute the request
+            await asyncio.to_thread(
+                self.service.documents().batchUpdate(
+                    documentId=document_id,
+                    body={'requests': [batch_request]}
+                ).execute
+            )
+            
+            return True, f"Successfully created {section_type} with type {api_type}"
+            
+        except Exception as e:
+            error_msg = str(e)
+            if "already exists" in error_msg.lower():
+                return False, f"A {section_type} of type {api_type} already exists in the document"
+            return False, f"Failed to create {section_type}: {error_msg}"
\ No newline at end of file
diff --git a/gdocs/managers/table_operation_manager.py b/gdocs/managers/table_operation_manager.py
new file mode 100644
index 0000000..b00a9cb
--- /dev/null
+++ b/gdocs/managers/table_operation_manager.py
@@ -0,0 +1,341 @@
+"""
+Table Operation Manager
+
+This module provides high-level table operations that orchestrate
+multiple Google Docs API calls for complex table manipulations.
+"""
+import logging
+import asyncio
+from typing import List, Dict, Any, Optional, Tuple
+
+from gdocs.docs_helpers import create_insert_table_request
+from gdocs.docs_structure import find_tables
+from gdocs.docs_tables import validate_table_data
+
+logger = logging.getLogger(__name__)
+
+
+class TableOperationManager:
+    """
+    High-level manager for Google Docs table operations.
+    
+    Handles complex multi-step table operations including:
+    - Creating tables with data population
+    - Populating existing tables
+    - Managing cell-by-cell operations with proper index refreshing
+    """
+    
+    def __init__(self, service):
+        """
+        Initialize the table operation manager.
+        
+        Args:
+            service: Google Docs API service instance
+        """
+        self.service = service
+        
+    async def create_and_populate_table(
+        self,
+        document_id: str,
+        table_data: List[List[str]],
+        index: int,
+        bold_headers: bool = True
+    ) -> Tuple[bool, str, Dict[str, Any]]:
+        """
+        Creates a table and populates it with data in a reliable multi-step process.
+        
+        This method extracts the complex logic from create_table_with_data tool function.
+        
+        Args:
+            document_id: ID of the document to update
+            table_data: 2D list of strings for table content
+            index: Position to insert the table
+            bold_headers: Whether to make the first row bold
+            
+        Returns:
+            Tuple of (success, message, metadata)
+        """
+        logger.debug(f"Creating table at index {index}, dimensions: {len(table_data)}x{len(table_data[0]) if table_data and len(table_data) > 0 else 0}")
+        
+        # Validate input data
+        is_valid, error_msg = validate_table_data(table_data)
+        if not is_valid:
+            return False, f"Invalid table data: {error_msg}", {}
+            
+        rows = len(table_data)
+        cols = len(table_data[0])
+        
+        try:
+            # Step 1: Create empty table
+            await self._create_empty_table(document_id, index, rows, cols)
+            
+            # Step 2: Get fresh document structure to find actual cell positions
+            fresh_tables = await self._get_document_tables(document_id)
+            if not fresh_tables:
+                return False, "Could not find table after creation", {}
+                
+            # Use the last table (newly created one)
+            table_info = fresh_tables[-1]
+            
+            # Step 3: Populate each cell with proper index refreshing
+            population_count = await self._populate_table_cells(
+                document_id, table_data, bold_headers
+            )
+            
+            metadata = {
+                'rows': rows,
+                'columns': cols,
+                'populated_cells': population_count,
+                'table_index': len(fresh_tables) - 1
+            }
+            
+            return True, f"Successfully created {rows}x{cols} table and populated {population_count} cells", metadata
+            
+        except Exception as e:
+            logger.error(f"Failed to create and populate table: {str(e)}")
+            return False, f"Table creation failed: {str(e)}", {}
+    
+    async def _create_empty_table(
+        self, 
+        document_id: str, 
+        index: int, 
+        rows: int, 
+        cols: int
+    ) -> None:
+        """Create an empty table at the specified index."""
+        logger.debug(f"Creating {rows}x{cols} table at index {index}")
+        
+        await asyncio.to_thread(
+            self.service.documents().batchUpdate(
+                documentId=document_id,
+                body={'requests': [create_insert_table_request(index, rows, cols)]}
+            ).execute
+        )
+        
+    async def _get_document_tables(self, document_id: str) -> List[Dict[str, Any]]:
+        """Get fresh document structure and extract table information."""
+        doc = await asyncio.to_thread(
+            self.service.documents().get(documentId=document_id).execute
+        )
+        return find_tables(doc)
+    
+    async def _populate_table_cells(
+        self,
+        document_id: str,
+        table_data: List[List[str]],
+        bold_headers: bool
+    ) -> int:
+        """
+        Populate table cells with data, refreshing structure after each insertion.
+        
+        This prevents index shifting issues by getting fresh cell positions
+        before each insertion.
+        """
+        population_count = 0
+        
+        for row_idx, row_data in enumerate(table_data):
+            logger.debug(f"Processing row {row_idx}: {len(row_data)} cells")
+            
+            for col_idx, cell_text in enumerate(row_data):
+                if not cell_text:  # Skip empty cells
+                    continue
+                    
+                try:
+                    # CRITICAL: Refresh document structure before each insertion
+                    success = await self._populate_single_cell(
+                        document_id, row_idx, col_idx, cell_text, bold_headers and row_idx == 0
+                    )
+                    
+                    if success:
+                        population_count += 1
+                        logger.debug(f"Populated cell ({row_idx},{col_idx})")
+                    else:
+                        logger.warning(f"Failed to populate cell ({row_idx},{col_idx})")
+                        
+                except Exception as e:
+                    logger.error(f"Error populating cell ({row_idx},{col_idx}): {str(e)}")
+                    
+        return population_count
+    
+    async def _populate_single_cell(
+        self,
+        document_id: str,
+        row_idx: int,
+        col_idx: int,
+        cell_text: str,
+        apply_bold: bool = False
+    ) -> bool:
+        """
+        Populate a single cell with text, with optional bold formatting.
+        
+        Returns True if successful, False otherwise.
+        """
+        try:
+            # Get fresh table structure to avoid index shifting issues
+            tables = await self._get_document_tables(document_id)
+            if not tables:
+                return False
+                
+            table = tables[-1]  # Use the last table (newly created one)
+            cells = table.get('cells', [])
+            
+            # Bounds checking
+            if row_idx >= len(cells) or col_idx >= len(cells[row_idx]):
+                logger.error(f"Cell ({row_idx},{col_idx}) out of bounds")
+                return False
+                
+            cell = cells[row_idx][col_idx]
+            insertion_index = cell.get('insertion_index')
+            
+            if not insertion_index:
+                logger.warning(f"No insertion_index for cell ({row_idx},{col_idx})")
+                return False
+                
+            # Insert text
+            await asyncio.to_thread(
+                self.service.documents().batchUpdate(
+                    documentId=document_id,
+                    body={'requests': [{
+                        'insertText': {
+                            'location': {'index': insertion_index},
+                            'text': cell_text
+                        }
+                    }]}
+                ).execute
+            )
+            
+            # Apply bold formatting if requested
+            if apply_bold:
+                await self._apply_bold_formatting(
+                    document_id, insertion_index, insertion_index + len(cell_text)
+                )
+                
+            return True
+            
+        except Exception as e:
+            logger.error(f"Failed to populate single cell: {str(e)}")
+            return False
+    
+    async def _apply_bold_formatting(
+        self,
+        document_id: str,
+        start_index: int,
+        end_index: int
+    ) -> None:
+        """Apply bold formatting to a text range."""
+        await asyncio.to_thread(
+            self.service.documents().batchUpdate(
+                documentId=document_id,
+                body={'requests': [{
+                    'updateTextStyle': {
+                        'range': {
+                            'startIndex': start_index,
+                            'endIndex': end_index
+                        },
+                        'textStyle': {'bold': True},
+                        'fields': 'bold'
+                    }
+                }]}
+            ).execute
+        )
+    
+    async def populate_existing_table(
+        self,
+        document_id: str,
+        table_index: int,
+        table_data: List[List[str]],
+        clear_existing: bool = False
+    ) -> Tuple[bool, str, Dict[str, Any]]:
+        """
+        Populate an existing table with data.
+        
+        Args:
+            document_id: ID of the document
+            table_index: Index of the table to populate (0-based)
+            table_data: 2D list of data to insert
+            clear_existing: Whether to clear existing content first
+            
+        Returns:
+            Tuple of (success, message, metadata)
+        """
+        try:
+            tables = await self._get_document_tables(document_id)
+            if table_index >= len(tables):
+                return False, f"Table index {table_index} not found. Document has {len(tables)} tables", {}
+                
+            table_info = tables[table_index]
+            
+            # Validate dimensions
+            table_rows = table_info['rows']
+            table_cols = table_info['columns']
+            data_rows = len(table_data)
+            data_cols = len(table_data[0]) if table_data else 0
+            
+            if data_rows > table_rows or data_cols > table_cols:
+                return False, f"Data ({data_rows}x{data_cols}) exceeds table dimensions ({table_rows}x{table_cols})", {}
+            
+            # Populate cells
+            population_count = await self._populate_existing_table_cells(
+                document_id, table_index, table_data
+            )
+            
+            metadata = {
+                'table_index': table_index,
+                'populated_cells': population_count,
+                'table_dimensions': f"{table_rows}x{table_cols}",
+                'data_dimensions': f"{data_rows}x{data_cols}"
+            }
+            
+            return True, f"Successfully populated {population_count} cells in existing table", metadata
+            
+        except Exception as e:
+            return False, f"Failed to populate existing table: {str(e)}", {}
+    
+    async def _populate_existing_table_cells(
+        self,
+        document_id: str,
+        table_index: int,
+        table_data: List[List[str]]
+    ) -> int:
+        """Populate cells in an existing table."""
+        population_count = 0
+        
+        for row_idx, row_data in enumerate(table_data):
+            for col_idx, cell_text in enumerate(row_data):
+                if not cell_text:
+                    continue
+                    
+                # Get fresh table structure for each cell
+                tables = await self._get_document_tables(document_id)
+                if table_index >= len(tables):
+                    break
+                    
+                table = tables[table_index]
+                cells = table.get('cells', [])
+                
+                if row_idx >= len(cells) or col_idx >= len(cells[row_idx]):
+                    continue
+                    
+                cell = cells[row_idx][col_idx]
+                
+                # For existing tables, append to existing content
+                cell_end = cell['end_index'] - 1  # Don't include cell end marker
+                
+                try:
+                    await asyncio.to_thread(
+                        self.service.documents().batchUpdate(
+                            documentId=document_id,
+                            body={'requests': [{
+                                'insertText': {
+                                    'location': {'index': cell_end},
+                                    'text': cell_text
+                                }
+                            }]}
+                        ).execute
+                    )
+                    population_count += 1
+                    
+                except Exception as e:
+                    logger.error(f"Failed to populate existing cell ({row_idx},{col_idx}): {str(e)}")
+                    
+        return population_count
\ No newline at end of file
diff --git a/gdocs/managers/validation_manager.py b/gdocs/managers/validation_manager.py
new file mode 100644
index 0000000..b751343
--- /dev/null
+++ b/gdocs/managers/validation_manager.py
@@ -0,0 +1,381 @@
+"""
+Validation Manager
+
+This module provides centralized validation logic for Google Docs operations,
+extracting validation patterns from individual tool functions.
+"""
+import logging
+from typing import Dict, Any, List, Tuple, Optional, Union
+import re
+
+logger = logging.getLogger(__name__)
+
+
+class ValidationManager:
+    """
+    Centralized validation manager for Google Docs operations.
+    
+    Provides consistent validation patterns and error messages across
+    all document operations, reducing code duplication and improving
+    error message quality.
+    """
+    
+    def __init__(self):
+        """Initialize the validation manager."""
+        self.validation_rules = self._setup_validation_rules()
+    
+    def _setup_validation_rules(self) -> Dict[str, Any]:
+        """Setup validation rules and constraints."""
+        return {
+            'table_max_rows': 1000,
+            'table_max_columns': 20,
+            'document_id_pattern': r'^[a-zA-Z0-9-_]+$',
+            'max_text_length': 1000000,  # 1MB text limit
+            'font_size_range': (1, 400),  # Google Docs font size limits
+            'valid_header_footer_types': ["DEFAULT", "FIRST_PAGE_ONLY", "EVEN_PAGE"],
+            'valid_section_types': ["header", "footer"],
+            'valid_list_types': ["UNORDERED", "ORDERED"],
+            'valid_element_types': ["table", "list", "page_break"]
+        }
+    
+    def validate_document_id(self, document_id: str) -> Tuple[bool, str]:
+        """
+        Validate Google Docs document ID format.
+        
+        Args:
+            document_id: Document ID to validate
+            
+        Returns:
+            Tuple of (is_valid, error_message)
+        """
+        if not document_id:
+            return False, "Document ID cannot be empty"
+            
+        if not isinstance(document_id, str):
+            return False, f"Document ID must be a string, got {type(document_id).__name__}"
+        
+        # Basic length check (Google Docs IDs are typically 40+ characters)
+        if len(document_id) < 20:
+            return False, "Document ID appears too short to be valid"
+            
+        return True, ""
+    
+    def validate_table_data(self, table_data: List[List[str]]) -> Tuple[bool, str]:
+        """
+        Comprehensive validation for table data format.
+        
+        This extracts and centralizes table validation logic from multiple functions.
+        
+        Args:
+            table_data: 2D array of data to validate
+            
+        Returns:
+            Tuple of (is_valid, detailed_error_message)
+        """
+        if not table_data:
+            return False, "Table data cannot be empty. Required format: [['col1', 'col2'], ['row1col1', 'row1col2']]"
+        
+        if not isinstance(table_data, list):
+            return False, f"Table data must be a list, got {type(table_data).__name__}. Required format: [['col1', 'col2'], ['row1col1', 'row1col2']]"
+        
+        # Check if it's a 2D list
+        if not all(isinstance(row, list) for row in table_data):
+            non_list_rows = [i for i, row in enumerate(table_data) if not isinstance(row, list)]
+            return False, f"All rows must be lists. Rows {non_list_rows} are not lists. Required format: [['col1', 'col2'], ['row1col1', 'row1col2']]"
+        
+        # Check for empty rows
+        if any(len(row) == 0 for row in table_data):
+            empty_rows = [i for i, row in enumerate(table_data) if len(row) == 0]
+            return False, f"Rows cannot be empty. Empty rows found at indices: {empty_rows}"
+        
+        # Check column consistency
+        col_counts = [len(row) for row in table_data]
+        if len(set(col_counts)) > 1:
+            return False, f"All rows must have the same number of columns. Found column counts: {col_counts}. Fix your data structure."
+        
+        rows = len(table_data)
+        cols = col_counts[0]
+        
+        # Check dimension limits
+        if rows > self.validation_rules['table_max_rows']:
+            return False, f"Too many rows ({rows}). Maximum allowed: {self.validation_rules['table_max_rows']}"
+        
+        if cols > self.validation_rules['table_max_columns']:
+            return False, f"Too many columns ({cols}). Maximum allowed: {self.validation_rules['table_max_columns']}"
+        
+        # Check cell content types
+        for row_idx, row in enumerate(table_data):
+            for col_idx, cell in enumerate(row):
+                if cell is None:
+                    return False, f"Cell ({row_idx},{col_idx}) is None. All cells must be strings, use empty string '' for empty cells."
+                
+                if not isinstance(cell, str):
+                    return False, f"Cell ({row_idx},{col_idx}) is {type(cell).__name__}, not string. All cells must be strings. Value: {repr(cell)}"
+        
+        return True, f"Valid table data: {rows}×{cols} table format"
+    
+    def validate_text_formatting_params(
+        self,
+        bold: Optional[bool] = None,
+        italic: Optional[bool] = None,
+        underline: Optional[bool] = None,
+        font_size: Optional[int] = None,
+        font_family: Optional[str] = None
+    ) -> Tuple[bool, str]:
+        """
+        Validate text formatting parameters.
+        
+        Args:
+            bold: Bold setting
+            italic: Italic setting
+            underline: Underline setting
+            font_size: Font size in points
+            font_family: Font family name
+            
+        Returns:
+            Tuple of (is_valid, error_message)
+        """
+        # Check if at least one formatting option is provided
+        formatting_params = [bold, italic, underline, font_size, font_family]
+        if all(param is None for param in formatting_params):
+            return False, "At least one formatting parameter must be provided (bold, italic, underline, font_size, or font_family)"
+        
+        # Validate boolean parameters
+        for param, name in [(bold, 'bold'), (italic, 'italic'), (underline, 'underline')]:
+            if param is not None and not isinstance(param, bool):
+                return False, f"{name} parameter must be boolean (True/False), got {type(param).__name__}"
+        
+        # Validate font size
+        if font_size is not None:
+            if not isinstance(font_size, int):
+                return False, f"font_size must be an integer, got {type(font_size).__name__}"
+            
+            min_size, max_size = self.validation_rules['font_size_range']
+            if not (min_size <= font_size <= max_size):
+                return False, f"font_size must be between {min_size} and {max_size} points, got {font_size}"
+        
+        # Validate font family
+        if font_family is not None:
+            if not isinstance(font_family, str):
+                return False, f"font_family must be a string, got {type(font_family).__name__}"
+            
+            if not font_family.strip():
+                return False, "font_family cannot be empty"
+        
+        return True, ""
+    
+    def validate_index(self, index: int, context: str = "Index") -> Tuple[bool, str]:
+        """
+        Validate a single document index.
+        
+        Args:
+            index: Index to validate
+            context: Context description for error messages
+            
+        Returns:
+            Tuple of (is_valid, error_message)
+        """
+        if not isinstance(index, int):
+            return False, f"{context} must be an integer, got {type(index).__name__}"
+        
+        if index < 0:
+            return False, f"{context} {index} is negative. You MUST call inspect_doc_structure first to get the proper insertion index."
+        
+        return True, ""
+    
+    def validate_index_range(
+        self,
+        start_index: int,
+        end_index: Optional[int] = None,
+        document_length: Optional[int] = None
+    ) -> Tuple[bool, str]:
+        """
+        Validate document index ranges.
+        
+        Args:
+            start_index: Starting index
+            end_index: Ending index (optional)
+            document_length: Total document length for bounds checking
+            
+        Returns:
+            Tuple of (is_valid, error_message)
+        """
+        # Validate start_index
+        if not isinstance(start_index, int):
+            return False, f"start_index must be an integer, got {type(start_index).__name__}"
+        
+        if start_index < 0:
+            return False, f"start_index cannot be negative, got {start_index}"
+        
+        # Validate end_index if provided
+        if end_index is not None:
+            if not isinstance(end_index, int):
+                return False, f"end_index must be an integer, got {type(end_index).__name__}"
+            
+            if end_index <= start_index:
+                return False, f"end_index ({end_index}) must be greater than start_index ({start_index})"
+        
+        # Validate against document length if provided
+        if document_length is not None:
+            if start_index >= document_length:
+                return False, f"start_index ({start_index}) exceeds document length ({document_length})"
+            
+            if end_index is not None and end_index > document_length:
+                return False, f"end_index ({end_index}) exceeds document length ({document_length})"
+        
+        return True, ""
+    
+    def validate_element_insertion_params(
+        self,
+        element_type: str,
+        index: int,
+        **kwargs
+    ) -> Tuple[bool, str]:
+        """
+        Validate parameters for element insertion.
+        
+        Args:
+            element_type: Type of element to insert
+            index: Insertion index
+            **kwargs: Additional parameters specific to element type
+            
+        Returns:
+            Tuple of (is_valid, error_message)
+        """
+        # Validate element type
+        if element_type not in self.validation_rules['valid_element_types']:
+            valid_types = ', '.join(self.validation_rules['valid_element_types'])
+            return False, f"Invalid element_type '{element_type}'. Must be one of: {valid_types}"
+        
+        # Validate index
+        if not isinstance(index, int) or index < 0:
+            return False, f"index must be a non-negative integer, got {index}"
+        
+        # Validate element-specific parameters
+        if element_type == "table":
+            rows = kwargs.get('rows')
+            columns = kwargs.get('columns')
+            
+            if not rows or not columns:
+                return False, "Table insertion requires 'rows' and 'columns' parameters"
+            
+            if not isinstance(rows, int) or not isinstance(columns, int):
+                return False, "Table rows and columns must be integers"
+            
+            if rows <= 0 or columns <= 0:
+                return False, "Table rows and columns must be positive integers"
+            
+            if rows > self.validation_rules['table_max_rows']:
+                return False, f"Too many rows ({rows}). Maximum: {self.validation_rules['table_max_rows']}"
+            
+            if columns > self.validation_rules['table_max_columns']:
+                return False, f"Too many columns ({columns}). Maximum: {self.validation_rules['table_max_columns']}"
+        
+        elif element_type == "list":
+            list_type = kwargs.get('list_type')
+            
+            if not list_type:
+                return False, "List insertion requires 'list_type' parameter"
+            
+            if list_type not in self.validation_rules['valid_list_types']:
+                valid_types = ', '.join(self.validation_rules['valid_list_types'])
+                return False, f"Invalid list_type '{list_type}'. Must be one of: {valid_types}"
+        
+        return True, ""
+    
+    def validate_header_footer_params(
+        self,
+        section_type: str,
+        header_footer_type: str = "DEFAULT"
+    ) -> Tuple[bool, str]:
+        """
+        Validate header/footer operation parameters.
+        
+        Args:
+            section_type: Type of section ("header" or "footer")
+            header_footer_type: Specific header/footer type
+            
+        Returns:
+            Tuple of (is_valid, error_message)
+        """
+        if section_type not in self.validation_rules['valid_section_types']:
+            valid_types = ', '.join(self.validation_rules['valid_section_types'])
+            return False, f"section_type must be one of: {valid_types}, got '{section_type}'"
+        
+        if header_footer_type not in self.validation_rules['valid_header_footer_types']:
+            valid_types = ', '.join(self.validation_rules['valid_header_footer_types'])
+            return False, f"header_footer_type must be one of: {valid_types}, got '{header_footer_type}'"
+        
+        return True, ""
+    
+    def validate_batch_operations(self, operations: List[Dict[str, Any]]) -> Tuple[bool, str]:
+        """
+        Validate a list of batch operations.
+        
+        Args:
+            operations: List of operation dictionaries
+            
+        Returns:
+            Tuple of (is_valid, error_message)
+        """
+        if not operations:
+            return False, "Operations list cannot be empty"
+        
+        if not isinstance(operations, list):
+            return False, f"Operations must be a list, got {type(operations).__name__}"
+        
+        # Validate each operation
+        for i, op in enumerate(operations):
+            if not isinstance(op, dict):
+                return False, f"Operation {i+1} must be a dictionary, got {type(op).__name__}"
+            
+            if 'type' not in op:
+                return False, f"Operation {i+1} missing required 'type' field"
+            
+            # Validate operation-specific fields using existing validation logic
+            # This would call the validate_operation function from docs_helpers
+            # but we're centralizing the logic here
+            
+        return True, ""
+    
+    def validate_text_content(self, text: str, max_length: Optional[int] = None) -> Tuple[bool, str]:
+        """
+        Validate text content for insertion.
+        
+        Args:
+            text: Text to validate
+            max_length: Maximum allowed length
+            
+        Returns:
+            Tuple of (is_valid, error_message)
+        """
+        if not isinstance(text, str):
+            return False, f"Text must be a string, got {type(text).__name__}"
+        
+        max_len = max_length or self.validation_rules['max_text_length']
+        if len(text) > max_len:
+            return False, f"Text too long ({len(text)} characters). Maximum: {max_len}"
+        
+        return True, ""
+    
+    def get_validation_summary(self) -> Dict[str, Any]:
+        """
+        Get a summary of all validation rules and constraints.
+        
+        Returns:
+            Dictionary containing validation rules
+        """
+        return {
+            'constraints': self.validation_rules.copy(),
+            'supported_operations': {
+                'table_operations': ['create_table', 'populate_table'],
+                'text_operations': ['insert_text', 'format_text', 'find_replace'],
+                'element_operations': ['insert_table', 'insert_list', 'insert_page_break'],
+                'header_footer_operations': ['update_header', 'update_footer']
+            },
+            'data_formats': {
+                'table_data': "2D list of strings: [['col1', 'col2'], ['row1col1', 'row1col2']]",
+                'text_formatting': "Optional boolean/integer parameters for styling",
+                'document_indices': "Non-negative integers for position specification"
+            }
+        }
\ No newline at end of file
diff --git a/google_workspace_mcp.dxt b/google_workspace_mcp.dxt
index 4ac1d63..7cabe77 100644
Binary files a/google_workspace_mcp.dxt and b/google_workspace_mcp.dxt differ
diff --git a/gsheets/sheets_tools.py b/gsheets/sheets_tools.py
index 9a4ed36..9381aa4 100644
--- a/gsheets/sheets_tools.py
+++ b/gsheets/sheets_tools.py
@@ -6,7 +6,8 @@ This module provides MCP tools for interacting with Google Sheets API.
 
 import logging
 import asyncio
-from typing import List, Optional
+import json
+from typing import List, Optional, Union
 
 
 from auth.service_decorator import require_google_service
@@ -175,7 +176,7 @@ async def modify_sheet_values(
     user_google_email: str,
     spreadsheet_id: str,
     range_name: str,
-    values: Optional[List[List[str]]] = None,
+    values: Optional[Union[str, List[List[str]]]] = None,
     value_input_option: str = "USER_ENTERED",
     clear_values: bool = False,
 ) -> str:
@@ -186,7 +187,7 @@ async def modify_sheet_values(
         user_google_email (str): The user's Google email address. Required.
         spreadsheet_id (str): The ID of the spreadsheet. Required.
         range_name (str): The range to modify (e.g., "Sheet1!A1:D10", "A1:D10"). Required.
-        values (Optional[List[List[str]]]): 2D array of values to write/update. Required unless clear_values=True.
+        values (Optional[Union[str, List[List[str]]]]): 2D array of values to write/update. Can be a JSON string or Python list. Required unless clear_values=True.
         value_input_option (str): How to interpret input values ("RAW" or "USER_ENTERED"). Defaults to "USER_ENTERED".
         clear_values (bool): If True, clears the range instead of writing values. Defaults to False.
 
@@ -196,6 +197,23 @@ async def modify_sheet_values(
     operation = "clear" if clear_values else "write"
     logger.info(f"[modify_sheet_values] Invoked. Operation: {operation}, Email: '{user_google_email}', Spreadsheet: {spreadsheet_id}, Range: {range_name}")
 
+    # Parse values if it's a JSON string (MCP passes parameters as JSON strings)
+    if values is not None and isinstance(values, str):
+        try:
+            parsed_values = json.loads(values)
+            if not isinstance(parsed_values, list):
+                raise ValueError(f"Values must be a list, got {type(parsed_values).__name__}")
+            # Validate it's a list of lists
+            for i, row in enumerate(parsed_values):
+                if not isinstance(row, list):
+                    raise ValueError(f"Row {i} must be a list, got {type(row).__name__}")
+            values = parsed_values
+            logger.info(f"[modify_sheet_values] Parsed JSON string to Python list with {len(values)} rows")
+        except json.JSONDecodeError as e:
+            raise Exception(f"Invalid JSON format for values: {e}")
+        except ValueError as e:
+            raise Exception(f"Invalid values structure: {e}")
+
     if not clear_values and not values:
         raise Exception("Either 'values' must be provided or 'clear_values' must be True.")
 
diff --git a/helm-chart/workspace-mcp/Chart.yaml b/helm-chart/workspace-mcp/Chart.yaml
new file mode 100644
index 0000000..dcd7ca8
--- /dev/null
+++ b/helm-chart/workspace-mcp/Chart.yaml
@@ -0,0 +1,19 @@
+apiVersion: v2
+name: workspace-mcp
+description: A Helm chart for Google Workspace MCP Server - Comprehensive Google Workspace integration for AI assistants
+type: application
+version: 0.1.0
+appVersion: "1.2.0"
+keywords:
+  - mcp
+  - google
+  - workspace
+  - ai
+  - llm
+  - claude
+home: https://workspacemcp.com
+sources:
+  - https://github.com/taylorwilsdon/google_workspace_mcp
+maintainers:
+  - name: Taylor Wilsdon
+    email: taylor@taylorwilsdon.com
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/README.md b/helm-chart/workspace-mcp/README.md
new file mode 100644
index 0000000..ab7ccae
--- /dev/null
+++ b/helm-chart/workspace-mcp/README.md
@@ -0,0 +1,141 @@
+# Google Workspace MCP Server Helm Chart
+
+This Helm chart deploys the Google Workspace MCP Server on a Kubernetes cluster. The Google Workspace MCP Server provides comprehensive integration with Google Workspace services including Gmail, Calendar, Drive, Docs, Sheets, Slides, Forms, Tasks, and Chat.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- Google Cloud Project with OAuth 2.0 credentials
+- Enabled Google Workspace APIs
+
+## Installing the Chart
+
+To install the chart with the release name `workspace-mcp`:
+
+```bash
+# First, set your Google OAuth credentials
+helm install workspace-mcp ./helm-chart/workspace-mcp \
+  --set secrets.googleOAuth.clientId="your-client-id.apps.googleusercontent.com" \
+  --set secrets.googleOAuth.clientSecret="your-client-secret"
+```
+
+## Configuration
+
+The following table lists the configurable parameters and their default values:
+
+| Parameter | Description | Default |
+|-----------|-------------|---------|
+| `replicaCount` | Number of replicas | `1` |
+| `image.repository` | Container image repository | `workspace-mcp` |
+| `image.tag` | Container image tag | `""` (uses Chart.AppVersion) |
+| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
+| `secrets.googleOAuth.clientId` | Google OAuth Client ID | `""` (required) |
+| `secrets.googleOAuth.clientSecret` | Google OAuth Client Secret | `""` (required) |
+| `secrets.googleOAuth.userEmail` | Default user email for single-user mode | `""` |
+| `singleUserMode` | Enable single-user mode | `false` |
+| `tools.enabled` | List of tools to enable | `[]` (all tools enabled) |
+| `env.MCP_ENABLE_OAUTH21` | Enable OAuth 2.1 support | `"false"` |
+| `service.type` | Kubernetes service type | `ClusterIP` |
+| `service.port` | Service port | `8000` |
+| `ingress.enabled` | Enable ingress | `false` |
+| `resources.limits.cpu` | CPU limit | `500m` |
+| `resources.limits.memory` | Memory limit | `512Mi` |
+| `autoscaling.enabled` | Enable HPA | `false` |
+
+## Google OAuth Setup
+
+Before deploying, you need to set up Google OAuth credentials:
+
+1. Create a project in [Google Cloud Console](https://console.cloud.google.com/)
+2. Enable the required Google Workspace APIs
+3. Create OAuth 2.0 credentials (Web application)
+4. Set authorized redirect URI: `http://your-domain:8000/oauth2callback`
+
+## Examples
+
+### Basic deployment with specific tools:
+
+```bash
+helm install workspace-mcp ./helm-chart/workspace-mcp \
+  --set secrets.googleOAuth.clientId="your-client-id" \
+  --set secrets.googleOAuth.clientSecret="your-secret" \
+  --set tools.enabled="{gmail,calendar,drive}"
+```
+
+### Production deployment with ingress:
+
+```bash
+helm install workspace-mcp ./helm-chart/workspace-mcp \
+  --set secrets.googleOAuth.clientId="your-client-id" \
+  --set secrets.googleOAuth.clientSecret="your-secret" \
+  --set ingress.enabled=true \
+  --set ingress.hosts[0].host="workspace-mcp.yourdomain.com" \
+  --set ingress.hosts[0].paths[0].path="/" \
+  --set ingress.hosts[0].paths[0].pathType="Prefix"
+```
+
+### Single-user mode deployment:
+
+```bash
+helm install workspace-mcp ./helm-chart/workspace-mcp \
+  --set secrets.googleOAuth.clientId="your-client-id" \
+  --set secrets.googleOAuth.clientSecret="your-secret" \
+  --set singleUserMode=true \
+  --set secrets.googleOAuth.userEmail="user@yourdomain.com"
+```
+
+### Enable OAuth 2.1 for multi-user environments:
+
+```bash
+helm install workspace-mcp ./helm-chart/workspace-mcp \
+  --set secrets.googleOAuth.clientId="your-client-id" \
+  --set secrets.googleOAuth.clientSecret="your-secret" \
+  --set env.MCP_ENABLE_OAUTH21="true"
+```
+
+## Uninstalling the Chart
+
+To uninstall/delete the `workspace-mcp` deployment:
+
+```bash
+helm delete workspace-mcp
+```
+
+## Available Tools
+
+You can selectively enable tools using the `tools.enabled` parameter:
+
+- `gmail` - Gmail integration
+- `drive` - Google Drive integration  
+- `calendar` - Google Calendar integration
+- `docs` - Google Docs integration
+- `sheets` - Google Sheets integration
+- `slides` - Google Slides integration
+- `forms` - Google Forms integration
+- `tasks` - Google Tasks integration
+- `chat` - Google Chat integration
+- `search` - Google Custom Search integration
+
+If `tools.enabled` is empty or not set, all tools will be enabled.
+
+## Health Checks
+
+The chart includes health checks that verify the application is running correctly:
+
+- Liveness probe checks `/health` endpoint
+- Readiness probe ensures the service is ready to accept traffic
+- Configurable timing and thresholds via `healthCheck` values
+
+## Security
+
+- Runs as non-root user (UID 1000)
+- Uses read-only root filesystem where possible
+- Drops all Linux capabilities
+- Secrets are stored securely in Kubernetes secrets
+
+## Support
+
+For issues and questions:
+- GitHub: https://github.com/taylorwilsdon/google_workspace_mcp
+- Documentation: https://workspacemcp.com
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/NOTES.txt b/helm-chart/workspace-mcp/templates/NOTES.txt
new file mode 100644
index 0000000..8a4ed04
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/NOTES.txt
@@ -0,0 +1,55 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+  {{- range .paths }}
+  http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+  {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "workspace-mcp.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "workspace-mcp.fullname" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "workspace-mcp.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+  echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "workspace-mcp.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
+{{- end }}
+
+2. Check the health of your Google Workspace MCP Server:
+{{- if .Values.healthCheck.enabled }}
+  kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "workspace-mcp.name" . }},app.kubernetes.io/instance={{ .Release.Name }}"
+  
+  # View application logs
+  kubectl logs --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "workspace-mcp.name" . }},app.kubernetes.io/instance={{ .Release.Name }}"
+{{- end }}
+
+3. Configuration Summary:
+{{- if .Values.singleUserMode }}
+   - Mode: Single-user
+{{- else }}
+   - Mode: Multi-user
+{{- end }}
+{{- if .Values.tools.enabled }}
+   - Enabled Tools: {{ join ", " .Values.tools.enabled }}
+{{- else }}
+   - Enabled Tools: All Google Workspace tools
+{{- end }}
+{{- if eq .Values.env.MCP_ENABLE_OAUTH21 "true" }}
+   - OAuth 2.1: Enabled
+{{- else }}
+   - OAuth 2.1: Disabled (using legacy OAuth 2.0)
+{{- end }}
+
+4. Important Notes:
+   - Make sure you have configured your Google OAuth credentials in the secret
+   - The application requires internet access to reach Google APIs
+   - OAuth callback URL: {{ default "http://localhost" .Values.env.WORKSPACE_MCP_BASE_URI }}:{{ .Values.env.WORKSPACE_MCP_PORT }}/oauth2callback
+
+For more information about the Google Workspace MCP Server, visit:
+https://github.com/taylorwilsdon/google_workspace_mcp
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/_helpers.tpl b/helm-chart/workspace-mcp/templates/_helpers.tpl
new file mode 100644
index 0000000..6f0063f
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/_helpers.tpl
@@ -0,0 +1,62 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "workspace-mcp.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "workspace-mcp.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "workspace-mcp.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "workspace-mcp.labels" -}}
+helm.sh/chart: {{ include "workspace-mcp.chart" . }}
+{{ include "workspace-mcp.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "workspace-mcp.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "workspace-mcp.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "workspace-mcp.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "workspace-mcp.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/configmap.yaml b/helm-chart/workspace-mcp/templates/configmap.yaml
new file mode 100644
index 0000000..2be9648
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/configmap.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.env }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "workspace-mcp.fullname" . }}-config
+  labels:
+    {{- include "workspace-mcp.labels" . | nindent 4 }}
+data:
+  {{- range $key, $value := .Values.env }}
+  {{ $key }}: {{ $value | quote }}
+  {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/deployment.yaml b/helm-chart/workspace-mcp/templates/deployment.yaml
new file mode 100644
index 0000000..553fc9d
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/deployment.yaml
@@ -0,0 +1,128 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "workspace-mcp.fullname" . }}
+  labels:
+    {{- include "workspace-mcp.labels" . | nindent 4 }}
+spec:
+  {{- if not .Values.autoscaling.enabled }}
+  replicas: {{ .Values.replicaCount }}
+  {{- end }}
+  selector:
+    matchLabels:
+      {{- include "workspace-mcp.selectorLabels" . | nindent 6 }}
+  template:
+    metadata:
+      {{- with .Values.podAnnotations }}
+      annotations:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      labels:
+        {{- include "workspace-mcp.selectorLabels" . | nindent 8 }}
+    spec:
+      {{- with .Values.imagePullSecrets }}
+      imagePullSecrets:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ include "workspace-mcp.serviceAccountName" . }}
+      securityContext:
+        {{- toYaml .Values.podSecurityContext | nindent 8 }}
+      containers:
+        - name: {{ .Chart.Name }}
+          securityContext:
+            {{- toYaml .Values.securityContext | nindent 12 }}
+          image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+          imagePullPolicy: {{ .Values.image.pullPolicy }}
+          ports:
+            - name: http
+              containerPort: {{ .Values.service.targetPort }}
+              protocol: TCP
+          {{- if .Values.healthCheck.enabled }}
+          livenessProbe:
+            httpGet:
+              path: {{ .Values.healthCheck.path }}
+              port: http
+            initialDelaySeconds: {{ .Values.healthCheck.initialDelaySeconds }}
+            periodSeconds: {{ .Values.healthCheck.periodSeconds }}
+            timeoutSeconds: {{ .Values.healthCheck.timeoutSeconds }}
+            successThreshold: {{ .Values.healthCheck.successThreshold }}
+            failureThreshold: {{ .Values.healthCheck.failureThreshold }}
+          readinessProbe:
+            httpGet:
+              path: {{ .Values.healthCheck.path }}
+              port: http
+            initialDelaySeconds: {{ .Values.healthCheck.initialDelaySeconds }}
+            periodSeconds: {{ .Values.healthCheck.periodSeconds }}
+            timeoutSeconds: {{ .Values.healthCheck.timeoutSeconds }}
+            successThreshold: {{ .Values.healthCheck.successThreshold }}
+            failureThreshold: {{ .Values.healthCheck.failureThreshold }}
+          {{- end }}
+          env:
+            # Google OAuth credentials from secret
+            - name: GOOGLE_OAUTH_CLIENT_ID
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "workspace-mcp.fullname" . }}-oauth
+                  key: client-id
+            - name: GOOGLE_OAUTH_CLIENT_SECRET
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "workspace-mcp.fullname" . }}-oauth
+                  key: client-secret
+            {{- if .Values.secrets.googleOAuth.userEmail }}
+            - name: USER_GOOGLE_EMAIL
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "workspace-mcp.fullname" . }}-oauth
+                  key: user-email
+            {{- end }}
+            # Single-user mode
+            {{- if .Values.singleUserMode }}
+            - name: MCP_SINGLE_USER_MODE
+              value: "1"
+            {{- end }}
+            # Environment variables from values
+            {{- range $key, $value := .Values.env }}
+            - name: {{ $key }}
+              value: {{ $value | quote }}
+            {{- end }}
+          args:
+            - "--transport"
+            - "streamable-http"
+            {{- if .Values.singleUserMode }}
+            - "--single-user"
+            {{- end }}
+            {{- if .Values.tools.enabled }}
+            - "--tools"
+            {{- range .Values.tools.enabled }}
+            - {{ . }}
+            {{- end }}
+            {{- end }}
+          resources:
+            {{- toYaml .Values.resources | nindent 12 }}
+          volumeMounts:
+            - name: credentials
+              mountPath: /app/.credentials
+              readOnly: false
+            - name: tmp
+              mountPath: /tmp
+              readOnly: false
+      volumes:
+        - name: credentials
+          emptyDir:
+            sizeLimit: 100Mi
+        - name: tmp
+          emptyDir:
+            sizeLimit: 100Mi
+      {{- with .Values.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.affinity }}
+      affinity:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.tolerations }}
+      tolerations:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/hpa.yaml b/helm-chart/workspace-mcp/templates/hpa.yaml
new file mode 100644
index 0000000..ebaf476
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/hpa.yaml
@@ -0,0 +1,32 @@
+{{- if .Values.autoscaling.enabled }}
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: {{ include "workspace-mcp.fullname" . }}
+  labels:
+    {{- include "workspace-mcp.labels" . | nindent 4 }}
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: {{ include "workspace-mcp.fullname" . }}
+  minReplicas: {{ .Values.autoscaling.minReplicas }}
+  maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+  metrics:
+    {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
+    {{- end }}
+    {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
+    - type: Resource
+      resource:
+        name: memory
+        target:
+          type: Utilization
+          averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
+    {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/ingress.yaml b/helm-chart/workspace-mcp/templates/ingress.yaml
new file mode 100644
index 0000000..785bc5c
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/ingress.yaml
@@ -0,0 +1,59 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "workspace-mcp.fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+{{- if and .Values.ingress.className (not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class")) }}
+  {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
+{{- end }}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+  name: {{ $fullName }}
+  labels:
+    {{- include "workspace-mcp.labels" . | nindent 4 }}
+  {{- with .Values.ingress.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
+spec:
+  {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
+  ingressClassName: {{ .Values.ingress.className }}
+  {{- end }}
+  {{- if .Values.ingress.tls }}
+  tls:
+    {{- range .Values.ingress.tls }}
+    - hosts:
+        {{- range .hosts }}
+        - {{ . | quote }}
+        {{- end }}
+      secretName: {{ .secretName }}
+    {{- end }}
+  {{- end }}
+  rules:
+    {{- range .Values.ingress.hosts }}
+    - host: {{ .host | quote }}
+      http:
+        paths:
+          {{- range .paths }}
+          - path: {{ .path }}
+            {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
+            pathType: {{ .pathType }}
+            {{- end }}
+            backend:
+              {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+              service:
+                name: {{ $fullName }}
+                port:
+                  number: {{ $svcPort }}
+              {{- else }}
+              serviceName: {{ $fullName }}
+              servicePort: {{ $svcPort }}
+              {{- end }}
+          {{- end }}
+    {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/poddisruptionbudget.yaml b/helm-chart/workspace-mcp/templates/poddisruptionbudget.yaml
new file mode 100644
index 0000000..c37070f
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/poddisruptionbudget.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.podDisruptionBudget.enabled }}
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+  name: {{ include "workspace-mcp.fullname" . }}
+  labels:
+    {{- include "workspace-mcp.labels" . | nindent 4 }}
+spec:
+  selector:
+    matchLabels:
+      {{- include "workspace-mcp.selectorLabels" . | nindent 6 }}
+  {{- if .Values.podDisruptionBudget.minAvailable }}
+  minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
+  {{- end }}
+  {{- if .Values.podDisruptionBudget.maxUnavailable }}
+  maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
+  {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/secret.yaml b/helm-chart/workspace-mcp/templates/secret.yaml
new file mode 100644
index 0000000..a5f330b
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/secret.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "workspace-mcp.fullname" . }}-oauth
+  labels:
+    {{- include "workspace-mcp.labels" . | nindent 4 }}
+type: Opaque
+data:
+  {{- if .Values.secrets.googleOAuth.clientId }}
+  client-id: {{ .Values.secrets.googleOAuth.clientId | b64enc }}
+  {{- else }}
+  {{- fail "Google OAuth Client ID is required. Set values.secrets.googleOAuth.clientId" }}
+  {{- end }}
+  {{- if .Values.secrets.googleOAuth.clientSecret }}
+  client-secret: {{ .Values.secrets.googleOAuth.clientSecret | b64enc }}
+  {{- else }}
+  {{- fail "Google OAuth Client Secret is required. Set values.secrets.googleOAuth.clientSecret" }}
+  {{- end }}
+  {{- if .Values.secrets.googleOAuth.userEmail }}
+  user-email: {{ .Values.secrets.googleOAuth.userEmail | b64enc }}
+  {{- end }}
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/service.yaml b/helm-chart/workspace-mcp/templates/service.yaml
new file mode 100644
index 0000000..3bde636
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "workspace-mcp.fullname" . }}
+  labels:
+    {{- include "workspace-mcp.labels" . | nindent 4 }}
+spec:
+  type: {{ .Values.service.type }}
+  ports:
+    - port: {{ .Values.service.port }}
+      targetPort: {{ .Values.service.targetPort }}
+      protocol: TCP
+      name: http
+  selector:
+    {{- include "workspace-mcp.selectorLabels" . | nindent 4 }}
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/templates/serviceaccount.yaml b/helm-chart/workspace-mcp/templates/serviceaccount.yaml
new file mode 100644
index 0000000..b62c090
--- /dev/null
+++ b/helm-chart/workspace-mcp/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "workspace-mcp.serviceAccountName" . }}
+  labels:
+    {{- include "workspace-mcp.labels" . | nindent 4 }}
+  {{- with .Values.serviceAccount.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm-chart/workspace-mcp/values.yaml b/helm-chart/workspace-mcp/values.yaml
new file mode 100644
index 0000000..4a7547d
--- /dev/null
+++ b/helm-chart/workspace-mcp/values.yaml
@@ -0,0 +1,133 @@
+# Default values for workspace-mcp
+replicaCount: 1
+
+image:
+  repository: workspace-mcp
+  pullPolicy: IfNotPresent
+  # Uses Chart.AppVersion if not specified
+  tag: ""
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+  create: true
+  annotations: {}
+  name: ""
+
+podAnnotations: {}
+
+podSecurityContext:
+  fsGroup: 1000
+
+securityContext:
+  capabilities:
+    drop:
+    - ALL
+  readOnlyRootFilesystem: false
+  runAsNonRoot: true
+  runAsUser: 1000
+
+service:
+  type: ClusterIP
+  port: 8000
+  targetPort: 8000
+
+ingress:
+  enabled: false
+  className: ""
+  annotations: {}
+    # kubernetes.io/ingress.class: nginx
+    # kubernetes.io/tls-acme: "true"
+  hosts:
+    - host: workspace-mcp.local
+      paths:
+        - path: /
+          pathType: Prefix
+  tls: []
+  #  - secretName: workspace-mcp-tls
+  #    hosts:
+  #      - workspace-mcp.local
+
+resources:
+  limits:
+    cpu: 500m
+    memory: 512Mi
+  requests:
+    cpu: 250m
+    memory: 256Mi
+
+autoscaling:
+  enabled: false
+  minReplicas: 1
+  maxReplicas: 100
+  targetCPUUtilizationPercentage: 80
+  # targetMemoryUtilizationPercentage: 80
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+
+# Environment variables for the application
+env:
+  # Server configuration
+  WORKSPACE_MCP_PORT: "8000"
+  # Set the base URI for your Kubernetes environment
+  # For internal cluster access: "http://{{ include "workspace-mcp.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local"
+  # For external access: "https://your-domain.com" or "http://your-ingress-host"
+  WORKSPACE_MCP_BASE_URI: ""
+  
+  # OAuth 2.1 support
+  MCP_ENABLE_OAUTH21: "false"
+  
+  # Development only - set to "1" for local development
+  OAUTHLIB_INSECURE_TRANSPORT: "0"
+  
+  # Optional: Google Custom Search
+  # GOOGLE_PSE_API_KEY: ""
+  # GOOGLE_PSE_ENGINE_ID: ""
+
+# Secret configuration for sensitive data
+secrets:
+  # Google OAuth credentials (required)
+  googleOAuth:
+    # Set these values or create secret manually
+    clientId: ""
+    clientSecret: ""
+    # Optional: default user email for single-user setups
+    userEmail: ""
+
+# Tool selection - specify which Google Workspace tools to enable
+tools:
+  enabled: []
+  # Available tools: gmail, drive, calendar, docs, sheets, chat, forms, slides, tasks, search
+  # Example: ["gmail", "drive", "calendar"]
+  # Leave empty to enable all tools
+
+# Single-user mode configuration
+singleUserMode: false
+
+# Health check configuration
+healthCheck:
+  enabled: true
+  path: /health
+  initialDelaySeconds: 30
+  periodSeconds: 30
+  timeoutSeconds: 10
+  successThreshold: 1
+  failureThreshold: 3
+
+# Pod disruption budget
+podDisruptionBudget:
+  enabled: false
+  minAvailable: 1
+  # maxUnavailable: 1
+
+# Network policies
+networkPolicy:
+  enabled: false
+  ingress: []
+  egress: []
\ No newline at end of file
diff --git a/install_claude.py b/install_claude.py
deleted file mode 100644
index 3bccdba..0000000
--- a/install_claude.py
+++ /dev/null
@@ -1,256 +0,0 @@
-#!/usr/bin/env python3
-"""
-Auto-installer for Google Workspace MCP in Claude Desktop
-Enhanced version with OAuth configuration and installation options
-"""
-
-import json
-import os
-import platform
-import sys
-from pathlib import Path
-from typing import Dict, Optional, Tuple
-
-
-def get_claude_config_path() -> Path:
-    """Get the Claude Desktop config file path for the current platform."""
-    system = platform.system()
-    if system == "Darwin":  # macOS
-        return Path.home() / "Library/Application Support/Claude/claude_desktop_config.json"
-    elif system == "Windows":
-        appdata = os.environ.get("APPDATA")
-        if not appdata:
-            raise RuntimeError("APPDATA environment variable not found")
-        return Path(appdata) / "Claude/claude_desktop_config.json"
-    else:
-        raise RuntimeError(f"Unsupported platform: {system}")
-
-
-def prompt_yes_no(question: str, default: bool = True) -> bool:
-    """Prompt user for yes/no question."""
-    default_str = "Y/n" if default else "y/N"
-    while True:
-        response = input(f"{question} [{default_str}]: ").strip().lower()
-        if not response:
-            return default
-        if response in ['y', 'yes']:
-            return True
-        if response in ['n', 'no']:
-            return False
-        print("Please answer 'y' or 'n'")
-
-
-def get_oauth_credentials() -> Tuple[Optional[Dict[str, str]], Optional[str]]:
-    """Get OAuth credentials from user."""
-    print("\n🔑 OAuth Credentials Setup")
-    print("You need Google OAuth 2.0 credentials to use this server.")
-    print("\nYou can provide credentials in two ways:")
-    print("1. Environment variables (recommended for production)")
-    print("2. Client secrets JSON file")
-
-    use_env = prompt_yes_no("\nDo you want to use environment variables?", default=True)
-
-    env_vars = {}
-    client_secret_path = None
-
-    if use_env:
-        print("\n📝 Enter your OAuth credentials:")
-        client_id = input("Client ID (ends with .apps.googleusercontent.com): ").strip()
-        client_secret = input("Client Secret: ").strip()
-
-        if not client_id or not client_secret:
-            print("❌ Both Client ID and Client Secret are required!")
-            return None, None
-
-        env_vars["GOOGLE_OAUTH_CLIENT_ID"] = client_id
-        env_vars["GOOGLE_OAUTH_CLIENT_SECRET"] = client_secret
-
-        # Optional redirect URI
-        custom_redirect = input("Redirect URI (press Enter for default http://localhost:8000/oauth2callback): ").strip()
-        if custom_redirect:
-            env_vars["GOOGLE_OAUTH_REDIRECT_URI"] = custom_redirect
-
-    else:
-        print("\n📁 Client secrets file setup:")
-        default_path = "client_secret.json"
-        file_path = input(f"Path to client_secret.json file [{default_path}]: ").strip()
-
-        if not file_path:
-            file_path = default_path
-
-        # Check if file exists
-        if not Path(file_path).exists():
-            print(f"❌ File not found: {file_path}")
-            return None, None
-
-        client_secret_path = file_path
-
-    # Optional: Default user email
-    print("\n📧 Optional: Default user email (for single-user setups)")
-    user_email = input("Your Google email (press Enter to skip): ").strip()
-    if user_email:
-        env_vars["USER_GOOGLE_EMAIL"] = user_email
-
-    # Development mode
-    if prompt_yes_no("\n🔧 Enable development mode (OAUTHLIB_INSECURE_TRANSPORT)?", default=False):
-        env_vars["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
-
-    return env_vars, client_secret_path
-
-
-def get_installation_options() -> Dict[str, any]:
-    """Get installation options from user."""
-    options = {}
-
-    print("\n⚙️  Installation Options")
-
-    # Installation method
-    print("\nChoose installation method:")
-    print("1. uvx (recommended - auto-installs from PyPI)")
-    print("2. Development mode (requires local repository)")
-
-    method = input("Select method [1]: ").strip()
-    if method == "2":
-        options["dev_mode"] = True
-        cwd = input("Path to google_workspace_mcp repository [current directory]: ").strip()
-        options["cwd"] = cwd if cwd else os.getcwd()
-    else:
-        options["dev_mode"] = False
-
-    # Single-user mode
-    if prompt_yes_no("\n👤 Enable single-user mode (simplified authentication)?", default=False):
-        options["single_user"] = True
-
-    # Tool selection
-    print("\n🛠️  Tool Selection")
-    print("Available tools: gmail, drive, calendar, docs, sheets, forms, chat")
-    print("Leave empty to enable all tools")
-    tools = input("Enter tools to enable (comma-separated): ").strip()
-    if tools:
-        options["tools"] = [t.strip() for t in tools.split(",")]
-
-    # Transport mode
-    if prompt_yes_no("\n🌐 Use HTTP transport mode (for debugging)?", default=False):
-        options["http_mode"] = True
-
-    return options
-
-
-def create_server_config(options: Dict, env_vars: Dict, client_secret_path: Optional[str]) -> Dict:
-    """Create the server configuration."""
-    config = {}
-
-    if options.get("dev_mode"):
-        config["command"] = "uv"
-        config["args"] = ["run", "--directory", options["cwd"], "main.py"]
-    else:
-        config["command"] = "uvx"
-        config["args"] = ["workspace-mcp"]
-
-    # Add command line arguments
-    if options.get("single_user"):
-        config["args"].append("--single-user")
-
-    if options.get("tools"):
-        config["args"].extend(["--tools"] + options["tools"])
-
-    if options.get("http_mode"):
-        config["args"].extend(["--transport", "streamable-http"])
-
-    # Add environment variables
-    if env_vars or client_secret_path:
-        config["env"] = {}
-
-    if env_vars:
-        config["env"].update(env_vars)
-
-    if client_secret_path:
-        config["env"]["GOOGLE_CLIENT_SECRET_PATH"] = client_secret_path
-
-    return config
-
-
-def main():
-    print("🚀 Google Workspace MCP Installer for Claude Desktop")
-    print("=" * 50)
-
-    try:
-        config_path = get_claude_config_path()
-
-        # Check if config already exists
-        existing_config = {}
-        if config_path.exists():
-            with open(config_path, 'r') as f:
-                existing_config = json.load(f)
-
-            if "mcpServers" in existing_config and "Google Workspace" in existing_config["mcpServers"]:
-                print(f"\n⚠️  Google Workspace MCP is already configured in {config_path}")
-                if not prompt_yes_no("Do you want to reconfigure it?", default=True):
-                    print("Installation cancelled.")
-                    return
-
-        # Get OAuth credentials
-        env_vars, client_secret_path = get_oauth_credentials()
-        if env_vars is None and client_secret_path is None:
-            print("\n❌ OAuth credentials are required. Installation cancelled.")
-            sys.exit(1)
-
-        # Get installation options
-        options = get_installation_options()
-
-        # Create server configuration
-        server_config = create_server_config(options, env_vars, client_secret_path)
-
-        # Prepare final config
-        if "mcpServers" not in existing_config:
-            existing_config["mcpServers"] = {}
-
-        existing_config["mcpServers"]["Google Workspace"] = server_config
-
-        # Create directory if needed
-        config_path.parent.mkdir(parents=True, exist_ok=True)
-
-        # Write configuration
-        with open(config_path, 'w') as f:
-            json.dump(existing_config, f, indent=2)
-
-        print("\n✅ Successfully configured Google Workspace MCP!")
-        print(f"📁 Config file: {config_path}")
-
-        print("\n📋 Configuration Summary:")
-        print(f"  • Installation method: {'Development' if options.get('dev_mode') else 'uvx (PyPI)'}")
-        print(f"  • Authentication: {'Environment variables' if env_vars else 'Client secrets file'}")
-        if options.get("single_user"):
-            print("  • Single-user mode: Enabled")
-        if options.get("tools"):
-            print(f"  • Tools: {', '.join(options['tools'])}")
-        else:
-            print("  • Tools: All enabled")
-        if options.get("http_mode"):
-            print("  • Transport: HTTP mode")
-        else:
-            print("  • Transport: stdio (default)")
-
-        print("\n🚀 Next steps:")
-        print("1. Restart Claude Desktop")
-        print("2. The Google Workspace tools will be available in your chats!")
-        print("\n💡 The server will start automatically when Claude Desktop needs it.")
-
-        if options.get("http_mode"):
-            print("\n⚠️  Note: HTTP mode requires additional setup.")
-            print("   You may need to install and configure mcp-remote.")
-            print("   See the README for details.")
-
-    except KeyboardInterrupt:
-        print("\n\nInstallation cancelled by user.")
-        sys.exit(0)
-    except Exception as e:
-        print(f"\n❌ Error: {e}")
-        print("\n📋 Manual installation:")
-        print("1. Open Claude Desktop Settings → Developer → Edit Config")
-        print("2. Add the server configuration shown in the README")
-        sys.exit(1)
-
-
-if __name__ == "__main__":
-    main()
\ No newline at end of file
diff --git a/main.py b/main.py
index 8753673..022aed3 100644
--- a/main.py
+++ b/main.py
@@ -4,17 +4,19 @@ import os
 import sys
 from importlib import metadata
 from dotenv import load_dotenv
-from core.server import server, set_transport_mode, configure_server_for_http
 
-# Suppress googleapiclient discovery cache warning
-logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
+from auth.oauth_config import reload_oauth_config
 from core.utils import check_credentials_directory_permissions
+from core.server import server, set_transport_mode, configure_server_for_http
 
-# Load environment variables from .env file, specifying an explicit path
-# This prevents accidentally loading a .env file from a different directory
 dotenv_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.env')
 load_dotenv(dotenv_path=dotenv_path)
 
+# Suppress googleapiclient discovery cache warning
+logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
+
+reload_oauth_config()
+
 logging.basicConfig(
     level=logging.INFO,
     format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
@@ -92,6 +94,7 @@ def main():
     # Active Configuration
     safe_print("⚙️ Active Configuration:")
 
+
     # Redact client secret for security
     client_secret = os.getenv('GOOGLE_OAUTH_CLIENT_SECRET', 'Not Set')
     redacted_secret = f"{client_secret[:4]}...{client_secret[-4:]}" if len(client_secret) > 8 else "Invalid or too short"
@@ -140,11 +143,11 @@ def main():
 
     # Import specified tools or all tools if none specified
     tools_to_import = args.tools if args.tools is not None else tool_imports.keys()
-    
+
     # Set enabled tools for scope management
     from auth.scopes import set_enabled_tools
     set_enabled_tools(list(tools_to_import))
-    
+
     safe_print(f"🛠️  Loading {len(tools_to_import)} tool module{'s' if len(tools_to_import) != 1 else ''}:")
     for tool in tools_to_import:
         tool_imports[tool]()
@@ -153,7 +156,6 @@ def main():
 
     safe_print("📊 Configuration Summary:")
     safe_print(f"   🔧 Tools Enabled: {len(tools_to_import)}/{len(tool_imports)}")
-    safe_print("   🔑 Auth Method: OAuth 2.0 with PKCE")
     safe_print(f"   📝 Log Level: {logging.getLogger().getEffectiveLevel()}")
     safe_print("")
 
@@ -182,10 +184,10 @@ def main():
         # Configure auth initialization for FastMCP lifecycle events
         if args.transport == 'streamable-http':
             configure_server_for_http()
-            safe_print(f"")
+            safe_print("")
             safe_print(f"🚀 Starting HTTP server on {base_uri}:{port}")
         else:
-            safe_print(f"")
+            safe_print("")
             safe_print("🚀 Starting STDIO server")
             # Start minimal OAuth callback server for stdio mode
             from auth.oauth_callback_server import ensure_oauth_callback_available
diff --git a/manifest.json b/manifest.json
index f4e68eb..b58bdf8 100644
--- a/manifest.json
+++ b/manifest.json
@@ -2,7 +2,7 @@
   "dxt_version": "0.1",
   "name": "workspace-mcp",
   "display_name": "Google Workspace MCP",
-  "version": "1.1.9",
+  "version": "1.2.0",
   "description": "Full natural language control over Google Calendar, Drive, Gmail, Docs, Sheets, Slides, Forms, Tasks, Chat and Custom Search through all MCP clients, AI assistants and developer tools",
   "long_description": "A production-ready MCP server that integrates all major Google Workspace services with AI assistants. Includes Google PSE integration for custom web searches.",
   "author": {
@@ -28,6 +28,7 @@
       "env": {
         "GOOGLE_OAUTH_CLIENT_ID": "${user_config.GOOGLE_OAUTH_CLIENT_ID}",
         "GOOGLE_OAUTH_CLIENT_SECRET": "${user_config.GOOGLE_OAUTH_CLIENT_SECRET}",
+        "USER_GOOGLE_EMAIL": "${user_config.USER_GOOGLE_EMAIL}",
         "GOOGLE_OAUTH_REDIRECT_URI": "${user_config.GOOGLE_OAUTH_REDIRECT_URI}",
         "GOOGLE_CLIENT_SECRET_PATH": "${user_config.GOOGLE_CLIENT_SECRET_PATH}",
         "GOOGLE_CLIENT_SECRETS": "${user_config.GOOGLE_CLIENT_SECRETS}",
@@ -123,6 +124,16 @@
         "min_length": 24
       }
     },
+    "USER_GOOGLE_EMAIL": {
+      "type": "string",
+      "title": "User Google Email",
+      "description": "Optional default email for authentication flows. If set, the LLM won't need to specify your email when calling start_google_auth",
+      "required": false,
+      "sensitive": false,
+      "validation": {
+        "pattern": "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+[.][a-zA-Z]{2,}$"
+      }
+    },
     "GOOGLE_OAUTH_REDIRECT_URI": {
       "type": "string",
       "title": "Google OAuth Redirect URI",
@@ -246,4 +257,4 @@
     "type": "git",
     "url": "https://github.com/taylorwilsdon/google_workspace_mcp"
   }
-}
+}
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
index 2878444..6d06775 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
 
 [project]
 name = "workspace-mcp"
-version = "1.2.0"
+version = "1.3.3"
 description = "Comprehensive, highly performant Google Workspace Streamable HTTP & SSE MCP Server for Calendar, Gmail, Docs, Sheets, Slides & Drive"
 readme = "README.md"
 keywords = [ "mcp", "google", "workspace", "llm", "ai", "claude", "model", "context", "protocol", "server"]
@@ -22,6 +22,7 @@ dependencies = [
  "aiohttp>=3.9.0",
  "cachetools>=5.3.0",
  "cryptography>=41.0.0",
+ "python-dotenv>=1.1.0",
 ]
 classifiers = [
     "Development Status :: 4 - Beta",
@@ -60,6 +61,11 @@ Changelog = "https://github.com/taylorwilsdon/google_workspace_mcp/releases"
 [project.scripts]
 workspace-mcp = "main:main"
 
+[project.optional-dependencies]
+dev = [
+    "twine>=5.0.0",
+]
+
 [tool.setuptools]
-packages = [ "auth", "gcalendar", "core", "gdocs", "gdrive", "gmail", "gchat", "gsheets", "gforms", "gslides", "gtasks", "gsearch"]
+packages = [ "auth", "gcalendar", "core", "gdocs", "gdocs.managers", "gdrive", "gmail", "gchat", "gsheets", "gforms", "gslides", "gtasks", "gsearch"]
 py-modules = [ "main"]
