Skip to content

IO Helpers

spark-fuse data source helpers.

build_qdrant_config

build_qdrant_config(spark: SparkSession, endpoint: Any, *, collection: Optional[str] = None, schema: Optional[StructType] = None, source_config: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, **kwargs: Any) -> Dict[str, Any]

Build the options payload consumed by the Qdrant data source.

Source code in src/spark_fuse/io/qdrant/reader.py
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
def build_qdrant_config(
    spark: SparkSession,
    endpoint: Any,
    *,
    collection: Optional[str] = None,
    schema: Optional[StructType] = None,
    source_config: Optional[Mapping[str, Any]] = None,
    headers: Optional[Mapping[str, str]] = None,
    **kwargs: Any,
) -> Dict[str, Any]:
    """Build the options payload consumed by the Qdrant data source."""

    config: Dict[str, Any] = {}
    for mapping in (source_config, kwargs):
        if mapping:
            config.update(mapping)

    endpoint_str = str(endpoint)
    if not _validate_http_url(endpoint_str):
        raise ValueError("endpoint must start with http:// or https:// for Qdrant reads")

    collection_name = collection or config.get("collection")
    if not collection_name or not str(collection_name).strip():
        raise ValueError("collection must be provided for Qdrant reads")
    config["collection"] = str(collection_name).strip()

    infer_schema = bool(config.get("infer_schema", schema is None))
    if not infer_schema and schema is None:
        raise ValueError("schema must be provided when infer_schema=False for Qdrant reads")

    base_headers: Dict[str, str] = {}
    for header_map in (config.get("headers"), headers):
        if isinstance(header_map, Mapping):
            base_headers.update({str(k): str(v) for k, v in header_map.items()})

    limit_value = config.get("limit")
    if limit_value is not None:
        limit_value = int(limit_value)
        if limit_value <= 0:
            raise ValueError("limit must be positive when provided")
        config["limit"] = limit_value

    page_size = int(config.get("page_size", _DEFAULT_PAGE_SIZE))
    if page_size <= 0:
        raise ValueError("page_size must be a positive integer")
    if limit_value is not None:
        page_size = min(page_size, int(limit_value))
    config["page_size"] = page_size

    max_pages = config.get("max_pages")
    if max_pages is not None:
        max_pages = int(max_pages)
        if max_pages <= 0:
            raise ValueError("max_pages must be positive when provided")
        config["max_pages"] = max_pages

    filter_value = config.get("filter")
    if filter_value is not None and not isinstance(filter_value, Mapping):
        raise TypeError("filter must be a mapping when provided")
    if isinstance(filter_value, Mapping):
        config["filter"] = _normalize_jsonable(filter_value)

    config_payload = {
        "endpoint": endpoint_str.rstrip("/"),
        "collection": config["collection"],
        "api_key": config.get("api_key"),
        "headers": base_headers,
        "timeout": float(config.get("timeout", 30.0)),
        "max_retries": int(config.get("max_retries", 3)),
        "backoff_factor": float(config.get("backoff_factor", 0.5)),
        "with_payload": _normalize_payload_option(config.get("with_payload", True)),
        "with_vectors": _normalize_vectors_option(config.get("with_vectors", False)),
        "limit": config.get("limit"),
        "page_size": config["page_size"],
        "max_pages": config.get("max_pages"),
        "filter": config.get("filter"),
        "offset": config.get("offset"),
        "infer_schema": infer_schema,
    }

    return config_payload

build_qdrant_write_config

build_qdrant_write_config(endpoint: Any, *, collection: str, id_field: Optional[str] = 'id', vector_field: str = 'vector', payload_fields: Optional[Sequence[str]] = None, wait: bool = True, batch_size: int = 128, api_key: Optional[str] = None, headers: Optional[Mapping[str, str]] = None, timeout: float = 30.0, max_retries: int = 3, backoff_factor: float = 0.5, create_collection: bool = False, distance: str = 'Cosine', payload_format: str = 'auto', write_method: str = 'auto', **overrides: Any) -> Dict[str, Any]

Build the config payload used for Qdrant writes (DataFrameWriter options).

Source code in src/spark_fuse/io/qdrant/writer.py
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
def build_qdrant_write_config(
    endpoint: Any,
    *,
    collection: str,
    id_field: Optional[str] = "id",
    vector_field: str = "vector",
    payload_fields: Optional[Sequence[str]] = None,
    wait: bool = True,
    batch_size: int = 128,
    api_key: Optional[str] = None,
    headers: Optional[Mapping[str, str]] = None,
    timeout: float = 30.0,
    max_retries: int = 3,
    backoff_factor: float = 0.5,
    create_collection: bool = False,
    distance: str = "Cosine",
    payload_format: str = "auto",
    write_method: str = "auto",
    **overrides: Any,
) -> Dict[str, Any]:
    """Build the config payload used for Qdrant writes (DataFrameWriter options)."""

    config: Dict[str, Any] = {}
    for mapping in (overrides,):
        if mapping:
            config.update(mapping)

    config["endpoint"] = endpoint
    config["collection"] = collection
    config["api_key"] = api_key
    config["headers"] = headers or {}
    config["timeout"] = timeout
    config["max_retries"] = max_retries
    config["backoff_factor"] = backoff_factor
    config["batch_size"] = batch_size
    config["wait"] = wait
    config["id_field"] = id_field
    config["vector_field"] = vector_field
    config["payload_fields"] = payload_fields
    config["create_collection"] = create_collection
    config["distance"] = distance
    config["payload_format"] = payload_format
    config["write_method"] = write_method

    # Validate by constructing the resolved config; return raw dict for JSON serialization.
    _QdrantWriteConfig.from_dict(config)
    return config

build_rest_api_config

build_rest_api_config(spark: SparkSession, source: Any, *, schema: Optional[StructType] = None, source_config: Optional[Mapping[str, Any]] = None, options: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, **kwargs: Any) -> Dict[str, Any]

Build the options payload consumed by the REST data source.

Source code in src/spark_fuse/io/rest_api.py
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
def build_rest_api_config(
    spark: SparkSession,
    source: Any,
    *,
    schema: Optional[StructType] = None,
    source_config: Optional[Mapping[str, Any]] = None,
    options: Optional[Mapping[str, Any]] = None,
    headers: Optional[Mapping[str, str]] = None,
    **kwargs: Any,
) -> Dict[str, Any]:
    """Build the options payload consumed by the REST data source."""

    config: Dict[str, Any] = {}
    for mapping in (source_config, options, kwargs):
        if mapping:
            config.update(mapping)

    records_field = config.get("records_field")
    if isinstance(records_field, str):
        records_path = records_field.split(".") if records_field else None
    elif isinstance(records_field, Sequence):
        records_path = [str(part) for part in records_field]
    elif records_field is None:
        records_path = None
    else:
        raise TypeError("records_field must be a string or sequence")

    infer_schema = bool(config.get("infer_schema", schema is None))
    if not infer_schema and schema is None:
        raise ValueError("schema must be provided when infer_schema=False for REST API reads")

    request_timeout = float(config.get("request_timeout", 30.0))
    max_retries = int(config.get("max_retries", 3))
    backoff_factor = float(config.get("retry_backoff", 0.5))

    base_headers: Dict[str, str] = {}
    for header_map in (config.get("headers"), headers):
        if isinstance(header_map, Mapping):
            base_headers.update({str(k): str(v) for k, v in header_map.items()})

    request_kwargs: Dict[str, Any] = {}
    if isinstance(config.get("request_kwargs"), Mapping):
        request_kwargs.update(config["request_kwargs"])

    request_type = str(config.get("request_type", "GET")).upper()
    if request_type not in {"GET", "POST"}:
        raise ValueError("request_type must be either 'GET' or 'POST'")

    request_body = config.get("request_body")
    if request_body is not None and request_type != "POST":
        raise ValueError("request_body is only supported when request_type='POST'")

    if request_body is not None:
        body_mode = config.get("request_body_type")
        if body_mode is None:
            body_mode = "json" if isinstance(request_body, Mapping) else "data"
        body_mode = str(body_mode).lower()
        if body_mode == "json":
            request_kwargs.setdefault("json", request_body)
        elif body_mode in {"data", "form"}:
            request_kwargs.setdefault("data", request_body)
        elif body_mode in {"raw", "content"}:
            request_kwargs.setdefault("data", request_body)
        else:
            raise ValueError(
                "request_body_type must be one of {'json', 'data', 'form', 'raw', 'content'}"
            )

    pagination = config.get("pagination")
    if pagination is not None and not isinstance(pagination, Mapping):
        raise TypeError("pagination configuration must be a mapping when provided")
    params = (
        dict(config.get("params", {}))
        if isinstance(config.get("params"), Mapping)
        else config.get("params", {})
    )
    if params and not isinstance(params, Mapping):
        raise TypeError("params configuration must be a mapping if provided")

    include_response_payload = bool(config.get("include_response_payload", False))
    response_payload_field: Optional[str] = None
    if include_response_payload:
        response_payload_field = str(config.get("response_payload_field", "response_payload"))
        if not response_payload_field:
            raise ValueError("response_payload_field must be a non-empty string when enabled")

    work_source: List[str]
    if isinstance(source, str):
        work_source = [source]
    elif isinstance(source, Sequence) and not isinstance(source, (str, bytes)):
        work_source = [str(url) for url in source]
    else:
        raise TypeError("source must be a string URL or a sequence of URLs for REST reads")
    for url in work_source:
        if not _validate_http_url(url):
            raise ValueError(f"Invalid REST endpoint: {url}")

    spark_parallelism = config.get("parallelism")
    if spark_parallelism is None:
        spark_parallelism = spark.sparkContext.defaultParallelism or 1

    payload_config = {
        "sources": work_source,
        "params": params or {},
        "pagination": pagination,
        "records_field": records_path,
        "request_type": request_type,
        "request_kwargs": _normalize_jsonable(request_kwargs),
        "headers": base_headers,
        "timeout": request_timeout,
        "max_retries": max_retries,
        "backoff_factor": backoff_factor,
        "include_response_payload": include_response_payload,
        "response_payload_field": response_payload_field,
        "parallelism": int(spark_parallelism),
        "infer_schema": infer_schema,
    }

    return payload_config

build_sparql_config

build_sparql_config(spark: SparkSession, source: Any, *, source_config: Optional[Mapping[str, Any]] = None, options: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, **kwargs: Any) -> Dict[str, Any]

Build the options payload consumed by the SPARQL data source.

Source code in src/spark_fuse/io/sparql.py
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
def build_sparql_config(
    spark: SparkSession,
    source: Any,
    *,
    source_config: Optional[Mapping[str, Any]] = None,
    options: Optional[Mapping[str, Any]] = None,
    headers: Optional[Mapping[str, str]] = None,
    **kwargs: Any,
) -> Dict[str, Any]:
    """Build the options payload consumed by the SPARQL data source."""

    config: Dict[str, Any] = {}
    for mapping in (source_config, options, kwargs):
        if mapping:
            config.update(mapping)

    endpoint: Optional[str] = None
    queries: List[str] = []

    if isinstance(source, Mapping):
        endpoint = source.get("endpoint") or source.get("url") or source.get("path")
        if "query" in source:
            queries.append(str(source["query"]))
        if "queries" in source:
            queries.extend([str(q) for q in _as_sequence(source["queries"])])
    elif isinstance(source, str):
        endpoint = source
    elif source is not None:
        raise TypeError("SPARQL source must be a string endpoint or configuration mapping")

    endpoint = endpoint or config.get("endpoint") or config.get("url")
    if not isinstance(endpoint, str) or not _validate_endpoint(endpoint):
        raise ValueError("SPARQL reader requires an HTTP(S) endpoint URL")

    if "query" in config:
        queries.append(str(config["query"]))
    if "queries" in config:
        queries.extend([str(q) for q in _as_sequence(config["queries"])])

    queries = [query.strip() for query in queries if isinstance(query, str) and query.strip()]
    if not queries:
        raise ValueError("SPARQL reader requires at least one query to execute")

    params = config.get("params")
    if isinstance(params, Mapping):
        base_params: Mapping[str, Any] = params
    elif params is None:
        base_params = {}
    else:
        raise TypeError("SPARQL params configuration must be a mapping if provided")

    request_type = str(config.get("request_type", "POST")).upper()
    if request_type not in {"GET", "POST"}:
        raise ValueError("SPARQL request_type must be either 'GET' or 'POST'")

    payload_mode = str(config.get("payload_mode", "form")).lower()
    if payload_mode not in {"form", "json", "raw"}:
        raise ValueError("payload_mode must be one of {'form', 'json', 'raw'}")

    query_param = str(config.get("query_param", "query"))
    request_timeout = float(config.get("request_timeout", 30.0))
    max_retries = int(config.get("max_retries", 3))
    backoff_factor = float(config.get("retry_backoff_factor", 0.5))

    include_metadata = bool(config.get("include_metadata", False))
    metadata_suffix = str(config.get("metadata_suffix", "__"))
    coerce_types = bool(config.get("coerce_types", True))

    base_headers: Dict[str, str] = {"Accept": _DEFAULT_ACCEPT}
    if payload_mode == "raw":
        base_headers.setdefault("Content-Type", "application/sparql-query")
    for header_map in (config.get("headers"), headers):
        if isinstance(header_map, Mapping):
            base_headers.update({str(k): str(v) for k, v in header_map.items()})

    auth_value = config.get("auth")
    auth = None
    if isinstance(auth_value, Sequence) and len(auth_value) == 2:
        auth = [str(auth_value[0]), str(auth_value[1])]

    parallelism = max(int(config.get("parallelism", len(queries) or 1)), 1)

    payload_config = {
        "endpoint": endpoint,
        "queries": queries,
        "params": dict(base_params),
        "headers": base_headers,
        "auth": auth,
        "request_type": request_type,
        "payload_mode": payload_mode,
        "query_param": query_param,
        "include_metadata": include_metadata,
        "metadata_suffix": metadata_suffix,
        "coerce_types": coerce_types,
        "timeout": request_timeout,
        "max_retries": max_retries,
        "backoff_factor": backoff_factor,
        "parallelism": parallelism,
    }

    return payload_config

register_qdrant_data_source

register_qdrant_data_source(spark: SparkSession) -> None

Register the Qdrant data source with the given SparkSession.

Source code in src/spark_fuse/io/qdrant/datasource.py
22
23
24
25
26
27
28
def register_qdrant_data_source(spark: SparkSession) -> None:
    """Register the Qdrant data source with the given SparkSession."""
    session_id = spark.sparkContext.applicationId
    if session_id in _REGISTERED_SESSIONS:
        return
    spark.dataSource.register(QdrantDataSource)
    _REGISTERED_SESSIONS.add(session_id)

write_qdrant_points

write_qdrant_points(records: Iterable[Mapping[str, Any]], endpoint: Any, *, collection: str, id_field: Optional[str] = 'id', vector_field: str = 'vector', payload_fields: Optional[Sequence[str]] = None, wait: bool = True, batch_size: int = 128, api_key: Optional[str] = None, headers: Optional[Mapping[str, str]] = None, timeout: float = 30.0, max_retries: int = 3, backoff_factor: float = 0.5, create_collection: bool = False, distance: str = 'Cosine', payload_format: str = 'auto', write_method: str = 'auto') -> int

Write an iterable of records to a Qdrant collection via the HTTP API.

Source code in src/spark_fuse/io/qdrant/writer.py
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
def write_qdrant_points(
    records: Iterable[Mapping[str, Any]],
    endpoint: Any,
    *,
    collection: str,
    id_field: Optional[str] = "id",
    vector_field: str = "vector",
    payload_fields: Optional[Sequence[str]] = None,
    wait: bool = True,
    batch_size: int = 128,
    api_key: Optional[str] = None,
    headers: Optional[Mapping[str, str]] = None,
    timeout: float = 30.0,
    max_retries: int = 3,
    backoff_factor: float = 0.5,
    create_collection: bool = False,
    distance: str = "Cosine",
    payload_format: str = "auto",
    write_method: str = "auto",
) -> int:
    """Write an iterable of records to a Qdrant collection via the HTTP API."""

    config_dict = {
        "endpoint": endpoint,
        "collection": collection,
        "api_key": api_key,
        "headers": headers or {},
        "timeout": timeout,
        "max_retries": max_retries,
        "backoff_factor": backoff_factor,
        "batch_size": batch_size,
        "wait": wait,
        "id_field": id_field,
        "vector_field": vector_field,
        "payload_fields": payload_fields,
        "create_collection": create_collection,
        "distance": distance,
        "payload_format": payload_format,
        "write_method": write_method,
    }
    config = _QdrantWriteConfig.from_dict(config_dict)
    return _write_points_iter(records, config)