Commit 531b820c authored by cianciosa's avatar cianciosa
Browse files

I can not successfully load a keras model into mlx and evaluate it.

parent ac86612c
Loading
Loading
Loading
Loading
+23 −0
Original line number Diff line number Diff line
@@ -9,6 +9,8 @@
#include <string>
#include <cassert>
#include <iostream>
#include <type_traits>
#include <concepts>

#include <hdf5.h>
#include <hdf5_hl.h>
@@ -57,6 +59,9 @@ namespace ml_embeder {
                    const hid_t d;
                    
                public:
                    template<typename T>
                    using buffer = std::vector<T>;

//------------------------------------------------------------------------------
///  @brief Open a group.
///
@@ -83,6 +88,24 @@ namespace ml_embeder {
                    ~dataset() {
                        check_error(H5Dclose(d));
                    }

//------------------------------------------------------------------------------
///  @brief Read data.
///
///  @tparam T The type of the buffer to read.
///
///  @returns A buffer containing the dataset.
//------------------------------------------------------------------------------
                    template<std::floating_point T>
                    buffer<T> read(const size_t size) {
                        buffer<T> b(size);
                        if constexpr (std::is_same<T, float>::value) {
                            check_error(H5Dread(d, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, 0, b.data()));
                        } else {
                            check_error(H5Dread(d, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, 0, b.data()));
                        }
                        return b;
                    }
                };
        
//------------------------------------------------------------------------------
+2 −2
Original line number Diff line number Diff line
@@ -103,8 +103,8 @@ namespace ml_embeder {
//------------------------------------------------------------------------------
        void check_error() {
            zip_error_t *error = zip_get_error(z);
            assert((zip_error_code_zip(error) == NULL &&
                    zip_error_code_system(error) == NULL) &&
            assert((!zip_error_code_zip(error) &&
                    !zip_error_code_system(error)) &&
                   zip_error_strerror(error));
#ifdef NDEBUG
            if (zip_error_code_zip(error) || zip_error_code_system(error)) {
+77 −7
Original line number Diff line number Diff line
@@ -3,7 +3,9 @@
///  @brief Unit test driver.
//------------------------------------------------------------------------------

#include <nlohmann/json.hpp>
#include <ml_embeder.hpp>
#include <mlx/mlx.h>

//------------------------------------------------------------------------------
///  @brief Main program.
@@ -14,13 +16,81 @@
int main(int argc, const char * argv[]) {
    ml_embeder::hdf5::file::initalize(); {
        ml_embeder::zip zip("/Users/m4c/OneDrive - Oak Ridge National Laboratory/eped/eped_model7.1/saved_model.keras");
        ml_embeder::zip::file::buffer b = zip.get_file("model.weights.h5").get_buffer();
        ml_embeder::hdf5::file hf(b.data(), b.size());
        ml_embeder::hdf5::file::group hg1 = hf.open_group("layers");
        ml_embeder::hdf5::file::group hg2 = hg1.open_group("dense");
        ml_embeder::hdf5::file::group hg3 = hg2.open_group("vars");
        ml_embeder::hdf5::file::group::dataset hd1 = hg3.open_dataset("0");
        ml_embeder::hdf5::file::group::dataset hd2 = hg3.open_dataset("1");
        ml_embeder::zip::file::buffer weights = zip.get_file("model.weights.h5").get_buffer();
        ml_embeder::zip::file::buffer configfile = zip.get_file("config.json").get_buffer();
        ml_embeder::hdf5::file hf(weights.data(), weights.size());
        ml_embeder::hdf5::file::group layers = hf.open_group("layers");

        const nlohmann::json config = nlohmann::json::parse(configfile);
        
        if (config["class_name"] != "Sequential") {
            std::cerr << config["class_name"] << " models are not supported." << std::endl;
            exit(0);
        }

        auto layer_func = mlx::core::compile([](const std::vector<mlx::core::array> &in) {
            return in;
        });

        for (auto i : config["config"]["layers"]) {
            if (i["class_name"] == "InputLayer") {
                continue;
            }
            ml_embeder::hdf5::file::group layer = layers.open_group(i["config"]["name"]);
            ml_embeder::hdf5::file::group vars = layer.open_group("vars");
            const int width = i["config"]["units"];
            const bool use_bias = i["config"]["use_bias"];
            const std::string activation = std::string(i["config"]["activation"]);
            const int input_size = i["build_config"]["input_shape"][1];
            const std::string datatype = std::string(i["config"]["dtype"]["config"]["name"]);
            if (use_bias) {
                ml_embeder::hdf5::file::group::dataset weigths = vars.open_dataset("0");
                ml_embeder::hdf5::file::group::dataset biass = vars.open_dataset("1");

                mlx::core::array weight = mlx::core::zeros({0});
                mlx::core::array bias = mlx::core::zeros({0});
                if (datatype == "float32") {
                    ml_embeder::hdf5::file::group::dataset::buffer<float> wb = weigths.template read<float> (input_size*width);
                    ml_embeder::hdf5::file::group::dataset::buffer<float> bb = biass.template read<float> (width);
                    weight = mlx::core::array(wb.data(), {width, input_size});
                    bias = mlx::core::array(bb.data(), {width});
                } else {
                    ml_embeder::hdf5::file::group::dataset::buffer<double> wb = weigths.template read<double> (input_size*width);
                    ml_embeder::hdf5::file::group::dataset::buffer<double> bb = biass.template read<double> (width);
                    weight = mlx::core::array(wb.data(), {width, input_size}, mlx::core::float64);
                    bias = mlx::core::array(bb.data(), {width}, mlx::core::float64);
                }

                layer_func = mlx::core::compile([layer_func, weight, bias](const std::vector<mlx::core::array> &in) {
                    return std::vector<mlx::core::array> ({mlx::core::einsum("ij,...j->...i", {weight, layer_func({in[0]})[0]}) + bias});
                });
            } else {
                ml_embeder::hdf5::file::group::dataset weigths = vars.open_dataset("0");

                mlx::core::array weight = mlx::core::zeros({0});
                if (datatype == "float32") {
                    ml_embeder::hdf5::file::group::dataset::buffer<float> wb = weigths.template read<float> (input_size*width);
                    weight = mlx::core::array(wb.data(), {width, input_size});
                } else {
                    ml_embeder::hdf5::file::group::dataset::buffer<double> wb = weigths.template read<double> (input_size*width);
                    weight = mlx::core::array(wb.data(), {width, input_size}, mlx::core::float64);
                }

                layer_func = mlx::core::compile([layer_func, weight](const std::vector<mlx::core::array> &in) {
                    return std::vector<mlx::core::array> ({mlx::core::einsum("ij,...j->...i", {weight, layer_func({in[0]})[0]})});
                });
            }
            if (activation == "tanh") {
                layer_func = mlx::core::compile([layer_func](const std::vector<mlx::core::array> &in) {
                    return std::vector<mlx::core::array> ({mlx::core::tanh(layer_func({in[0]})[0])});
                });
            }
        }

        mlx::core::array x = mlx::core::ones({100000,9});
        mlx::core::array y = layer_func({x})[0];
        y.eval();
        std::cout << y << y.shape() << std::endl;
    }
    ml_embeder::hdf5::file::finalize();
}