"""Download worker — consumes media:download stream."""
from __future__ import annotations

import uuid

import redis.asyncio as aioredis
import structlog
import structlog.contextvars

from app.config import get_settings
from app.db.models.job import JobStatus, JobType
from app.db.models.media import MediaStatus
from app.db.repositories.job import JobRepository
from app.db.repositories.media import MediaRepository
from app.db.repositories.user import UserRepository
from app.db.session import AsyncSessionLocal
from app.exceptions import DownloadError, QuotaExceededError, StorageError
from app.services.downloader import DownloaderService
from app.services.queue import STREAM_AI, QueueService
from app.services.storage import StorageService
from app.workers.base import BaseWorker

log = structlog.get_logger(__name__)
settings = get_settings()


class DownloadWorker(BaseWorker):
    stream = "media:download"
    group = "media:download-workers"

    def __init__(
        self,
        redis_client: aioredis.Redis,
        queue: QueueService,
        storage: StorageService,
        downloader: DownloaderService,
        worker_id: int = 0,
    ) -> None:
        super().__init__(redis_client, queue)
        self._storage = storage
        self._downloader = downloader
        self.consumer_name = f"download-worker-{worker_id}"

    async def _process(self, fields: dict[str, str]) -> None:
        job_id = uuid.UUID(fields["job_id"])
        structlog.contextvars.bind_contextvars(job_id=str(job_id), worker=self.consumer_name)

        async with AsyncSessionLocal() as session:
            job_repo = JobRepository(session)
            media_repo = MediaRepository(session)
            user_repo = UserRepository(session)

            job = await job_repo.get(job_id)
            if job is None:
                log.error("download.job_not_found", job_id=str(job_id))
                return

            # Acquire Redis lock to prevent duplicate processing
            lock_key = f"job:{job_id}:lock"
            lock_ttl = settings.job_claim_timeout_ms // 1000 + 30
            acquired = await self._r.set(lock_key, self.consumer_name, nx=True, ex=lock_ttl)
            if not acquired:
                log.warning("download.lock_not_acquired", job_id=str(job_id))
                return

            try:
                # Transition PENDING → RUNNING (optimistic locking)
                claimed = await job_repo.transition(job_id, JobStatus.PENDING, JobStatus.RUNNING)
                if not claimed:
                    log.warning("download.already_claimed", job_id=str(job_id))
                    return
                await job_repo.append_history_event(job_id, JobStatus.RUNNING)
                await session.commit()

                entity_id = job.entity_id
                media_item = await media_repo.get_media_item(entity_id)
                if media_item is None:
                    raise DownloadError("MediaItem record not found", is_permanent=True)

                # Update media status
                await media_repo.update_media_status(entity_id, MediaStatus.DOWNLOADING)
                await session.commit()

                # Check per-user storage quota
                user = await user_repo.get_by_id(job.user_id)
                if user and user.storage_used_bytes >= settings.max_storage_bytes_per_user:
                    raise QuotaExceededError("Storage quota exceeded")

                # Download
                result = await self._downloader.download(
                    media_item.source_url,
                    platform=media_item.source_platform or "web",
                )

                try:
                    # Build S3 keys
                    media_key = self._storage.make_media_key(
                        user_id=str(job.user_id),
                        job_id=str(job_id),
                        filename=result.file_path.name,
                    )
                    thumb_key: str | None = None

                    # Upload media
                    if await self._storage.object_exists(media_key):
                        log.info("download.s3_already_exists", s3_key=media_key)
                        file_size = result.file_path.stat().st_size
                    else:
                        file_size = await self._storage.upload_file(result.file_path, media_key)

                    # Upload thumbnail
                    if result.thumbnail_path and result.thumbnail_path.exists():
                        thumb_key = self._storage.make_thumbnail_key(
                            user_id=str(job.user_id),
                            job_id=str(job_id),
                        )
                        if not await self._storage.object_exists(thumb_key):
                            await self._storage.upload_file(result.thumbnail_path, thumb_key)

                    # Update media_item with S3 info
                    await media_repo.update_media_status(
                        entity_id,
                        MediaStatus.DOWNLOADED,
                        s3_key=media_key,
                        s3_bucket=settings.s3_bucket_name,
                        thumbnail_s3_key=thumb_key,
                        file_size_bytes=file_size,
                        duration_seconds=result.duration,
                        mime_type=result.mime_type,
                        page_title=result.title,
                        metadata_=result.metadata,
                    )

                    # Update user storage usage
                    await user_repo.add_storage_usage(job.user_id, file_size)
                    await session.commit()

                finally:
                    self._downloader.cleanup(result)

                # Enqueue AI analysis job
                ai_job = await job_repo.create(
                    entity_id=entity_id,
                    entity_type="media",
                    user_id=job.user_id,
                    canonical_url_hash=job.canonical_url_hash or "",
                    job_type=JobType.AI_ANALYZE,
                    payload=job.payload,
                )
                await self._queue.enqueue(STREAM_AI, job_id=str(ai_job.id))
                await job_repo.transition(job_id, JobStatus.RUNNING, JobStatus.DONE)
                await session.commit()

                log.info("download.complete", entity_id=str(entity_id), s3_key=media_key)

            except (DownloadError, StorageError, QuotaExceededError) as exc:
                await self._handle_failure(job_id, job_repo, session, exc)
            finally:
                await self._r.delete(lock_key)

        structlog.contextvars.unbind_contextvars("job_id", "worker")

    async def _handle_failure(
        self,
        job_id: uuid.UUID,
        job_repo: JobRepository,
        session: object,
        exc: Exception,
    ) -> None:
        from sqlalchemy.ext.asyncio import AsyncSession
        assert isinstance(session, AsyncSession)

        attempts = await job_repo.increment_attempts(job_id)
        await job_repo.append_history_event(job_id, "FAILED", str(exc))

        is_permanent = getattr(exc, "is_permanent", False)
        if is_permanent or attempts >= settings.job_max_attempts:
            await job_repo.transition(
                job_id, JobStatus.RUNNING, JobStatus.FAILED, error_message=str(exc)
            )
            await self._queue.to_dlq(
                self.stream, str(job_id), str(exc), attempts
            )
            log.error("download.permanent_failure", job_id=str(job_id), error=str(exc))
        else:
            # Requeue for retry (reset to PENDING — worker will pick it up)
            await job_repo.transition(job_id, JobStatus.RUNNING, JobStatus.FAILED)
            log.warning(
                "download.retry_scheduled",
                job_id=str(job_id),
                attempt=attempts,
                backoff=self._backoff(attempts),
            )

        await session.commit()
