Commit 0d42e5bc authored by natsukium's avatar natsukium Committed by Yt
Browse files
parent e289901d
Loading
Loading
Loading
Loading
+9 −6
Original line number Diff line number Diff line
@@ -5,11 +5,11 @@
, hatch-fancy-pypi-readme
, hatch-vcs
, hatchling
, attrs
, cattrs
, anyio
, distro
, httpx
, httpx-auth
, openllm-core
, orjson
, soundfile
, transformers
}:
@@ -30,16 +30,19 @@ buildPythonPackage rec {
  ];

  propagatedBuildInputs = [
    attrs
    cattrs
    anyio
    distro
    httpx
    orjson
    openllm-core
  ];

  passthru.optional-dependencies = {
    grpc = [
      bentoml
    ] ++ bentoml.optional-dependencies.grpc;
    auth = [
      httpx-auth
    ];
    agents = [
      transformers
      # diffusers
+12 −7
Original line number Diff line number Diff line
@@ -4,10 +4,12 @@
, pythonOlder
, accelerate
, attrs
, bentoml
, bitsandbytes
, bentoml
, cattrs
, click-option-group
, datasets
, deepmerge
, hatch-fancy-pypi-readme
, hatch-vcs
, hatchling
@@ -15,14 +17,13 @@
, mypy-extensions
, orjson
, peft
, ray
, transformers
, typing-extensions
}:

buildPythonPackage rec {
  pname = "openllm-core";
  version = "0.3.9";
  version = "0.4.22";
  pyproject = true;

  disabled = pythonOlder "3.8";
@@ -31,7 +32,7 @@ buildPythonPackage rec {
    owner = "bentoml";
    repo = "OpenLLM";
    rev = "refs/tags/v${version}";
    hash = "sha256-M/ckvaHTdKFg7xfUgFxu7pRBrS6TGw0m2U3L88b2DKU=";
    hash = "sha256-Hgwc4rneY0d7KZHuBIWRpndLksts5DTvaYuwZOO4sdI=";
  };

  sourceRoot = "source/openllm-core";
@@ -44,8 +45,10 @@ buildPythonPackage rec {

  propagatedBuildInputs = [
    attrs
    bentoml
    cattrs
    # not listed in pyproject.toml, but required at runtime
    click-option-group
    deepmerge
    inflection
    mypy-extensions
    orjson
@@ -54,9 +57,11 @@ buildPythonPackage rec {

  passthru.optional-dependencies = {
    vllm = [
      ray
      # vllm
    ];
    bentoml = [
      bentoml
    ];
    fine-tune = [
      accelerate
      bitsandbytes
@@ -67,7 +72,7 @@ buildPythonPackage rec {
    ] ++ transformers.optional-dependencies.torch
      ++ transformers.optional-dependencies.tokenizers
      ++ transformers.optional-dependencies.accelerate;
    full = with passthru.optional-dependencies; ( vllm ++ fine-tune );
    full = with passthru.optional-dependencies; ( vllm ++ bentoml ++ fine-tune );
  };

  # there is no tests
+26 −43
Original line number Diff line number Diff line
@@ -9,20 +9,18 @@
, accelerate
, bentoml
, bitsandbytes
, build
, click
, ctranslate2
, datasets
, docker
, einops
, fairscale
, flax
, ghapi
, huggingface-hub
, hypothesis
, ipython
, jax
, jaxlib
, jupyter
, jupytext
, keras
, nbformat
, notebook
, openai
@@ -34,14 +32,12 @@
, pytest-randomly
, pytest-rerunfailures
, pytest-xdist
, ray
, safetensors
, scipy
, sentencepiece
, soundfile
, syrupy
, tabulate
, tensorflow
, tiktoken
, transformers
, openai-triton
@@ -73,12 +69,16 @@ buildPythonPackage rec {
    accelerate
    bentoml
    bitsandbytes
    build
    click
    einops
    ghapi
    openllm-client
    openllm-core
    optimum
    safetensors
    scipy
    sentencepiece
    tabulate
    transformers
  ] ++ bentoml.optional-dependencies.io
@@ -92,62 +92,43 @@ buildPythonPackage rec {
      soundfile
      transformers
    ] ++ transformers.optional-dependencies.agents;
    awq = [
      # autoawq
    ];
    baichuan = [
      # cpm-kernels
      sentencepiece
    ];
    chatglm = [
      # cpm-kernels
      sentencepiece
    ];
    ctranslate = [
      ctranslate2
    ];
    falcon = [
      einops
      xformers
    ];
    fine-tune = [
      accelerate
      bitsandbytes
      datasets
      huggingface-hub
      peft
      # trl
    ];
    flan-t5 = [
      flax
      jax
      jaxlib
      keras
      tensorflow
    ];
    ggml = [
      # ctransformers
    ];
    gptq = [
      # auto-gptq
      optimum
    ]; # ++ autogptq.optional-dependencies.triton;
    grpc = [
      openllm-client
    ] ++ openllm-client.optional-dependencies.grpc;
    llama = [
      fairscale
      sentencepiece
      scipy
    ];
      bentoml
    ] ++ bentoml.optional-dependencies.grpc;
    mpt = [
      einops
      openai-triton
    ];
    openai = [
      openai
      tiktoken
    ] ++ openai.optional-dependencies.embeddings;
    opt = [
      flax
      jax
      jaxlib
      keras
      tensorflow
    ];
    ] ++ openai.optional-dependencies.datalib;
    playground = [
      ipython
      jupyter
@@ -159,11 +140,10 @@ buildPythonPackage rec {
      bitsandbytes
    ];
    vllm = [
      ray
      # vllm
    ];
    full = with passthru.optional-dependencies; (
      agents ++ baichuan ++ chatglm ++ falcon ++ fine-tune ++ flan-t5 ++ ggml ++ gptq ++ llama ++ mpt ++ openai ++ opt ++ playground ++ starcoder ++ vllm
      agents ++ awq ++ baichuan ++ chatglm ++ ctranslate ++ falcon ++ fine-tune ++ ggml ++ gptq ++ mpt ++ openai ++ playground ++ starcoder ++ vllm
    );
    all = passthru.optional-dependencies.full;
  };
@@ -187,12 +167,15 @@ buildPythonPackage rec {
    export CI=1
  '';

  disabledTestPaths = [
    # require network access
    "tests/models"
  ];

  disabledTests = [
    # these tests access to huggingface.co
    "test_opt_125m"
    "test_opt_125m"
    "test_flan_t5"
    "test_flan_t5"
    # incompatible with recent TypedDict
    # https://github.com/bentoml/OpenLLM/blob/f3fd32d596253ae34c68e2e9655f19f40e05f666/openllm-python/tests/configuration_test.py#L18-L21
    "test_missing_default"
  ];

  pythonImportsCheck = [ "openllm" ];