blob: fa7d1210561c18fcbda20a13a3b28d69612a78de [file] [log] [blame]
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This will be a shared interface by two APIs:
// - The Model Loader API,
// https://github.com/webmachinelearning/model-loader/blob/main/explainer.md
// - The WebNN API,
// https://github.com/webmachinelearning/webnn/blob/main/explainer.md
// We follow WebNN spec's definition,
// - https://webmachinelearning.github.io/webnn/#api-mlcontext
// In the future, this interface may be extended when WebNN is implemented so
// a dictionary may be insufficient.
dictionary MLContextLostInfo {
DOMString message;
};
dictionary MLRankRange {
unsigned long min;
unsigned long max;
};
dictionary MLTensorLimits {
sequence<DOMString> dataTypes;
MLRankRange rankRange;
};
dictionary MLBinarySupportLimits {
MLTensorLimits a;
MLTensorLimits b;
MLTensorLimits output;
};
dictionary MLBatchNormalizationSupportLimits {
MLTensorLimits input;
MLTensorLimits mean;
MLTensorLimits variance;
MLTensorLimits scale;
MLTensorLimits bias;
MLTensorLimits output;
};
dictionary MLConcatSupportLimits {
MLTensorLimits inputs;
MLTensorLimits output;
};
dictionary MLConv2dSupportLimits {
MLTensorLimits input;
MLTensorLimits filter;
MLTensorLimits bias;
MLTensorLimits output;
};
dictionary MLGatherSupportLimits {
MLTensorLimits input;
MLTensorLimits indices;
MLTensorLimits output;
};
dictionary MLGemmSupportLimits {
MLTensorLimits a;
MLTensorLimits b;
MLTensorLimits c;
MLTensorLimits output;
};
dictionary MLGruSupportLimits {
MLTensorLimits input;
MLTensorLimits weight;
MLTensorLimits recurrentWeight;
MLTensorLimits bias;
MLTensorLimits recurrentBias;
MLTensorLimits initialHiddenState;
MLTensorLimits output0;
MLTensorLimits output1;
};
dictionary MLGruCellSupportLimits {
MLTensorLimits input;
MLTensorLimits weight;
MLTensorLimits recurrentWeight;
MLTensorLimits hiddenState;
MLTensorLimits bias;
MLTensorLimits recurrentBias;
MLTensorLimits output;
};
dictionary MLLstmSupportLimits {
MLTensorLimits input;
MLTensorLimits weight;
MLTensorLimits recurrentWeight;
MLTensorLimits bias;
MLTensorLimits recurrentBias;
MLTensorLimits peepholeWeight;
MLTensorLimits initialHiddenState;
MLTensorLimits initialCellState;
MLTensorLimits output0;
MLTensorLimits output1;
MLTensorLimits output2;
};
dictionary MLLstmCellSupportLimits {
MLTensorLimits input;
MLTensorLimits weight;
MLTensorLimits recurrentWeight;
MLTensorLimits hiddenState;
MLTensorLimits cellState;
MLTensorLimits bias;
MLTensorLimits recurrentBias;
MLTensorLimits peepholeWeight;
MLTensorLimits output0;
MLTensorLimits output1;
};
dictionary MLNormalizationSupportLimits {
MLTensorLimits input;
MLTensorLimits scale;
MLTensorLimits bias;
MLTensorLimits output;
};
dictionary MLPreluSupportLimits {
MLTensorLimits input;
MLTensorLimits slope;
MLTensorLimits output;
};
dictionary MLQuantizeDequantizeLinearSupportLimits {
MLTensorLimits input;
MLTensorLimits scale;
MLTensorLimits zeroPoint;
MLTensorLimits output;
};
dictionary MLLogicalNotSupportLimits {
MLTensorLimits a;
MLTensorLimits output;
};
dictionary MLSingleInputSupportLimits {
MLTensorLimits input;
MLTensorLimits output;
};
dictionary MLScatterSupportLimits {
MLTensorLimits input;
MLTensorLimits indices;
MLTensorLimits updates;
MLTensorLimits output;
};
dictionary MLSplitSupportLimits {
MLTensorLimits input;
MLTensorLimits outputs;
};
dictionary MLWhereSupportLimits {
MLTensorLimits condition;
MLTensorLimits trueValue;
MLTensorLimits falseValue;
MLTensorLimits output;
};
dictionary MLOpSupportLimits {
MLInputOperandLayout preferredInputLayout;
[EnforceRange] unsigned long long maxTensorByteLength;
MLTensorLimits input;
MLTensorLimits constant;
MLTensorLimits output;
MLSingleInputSupportLimits argMin;
MLSingleInputSupportLimits argMax;
MLBatchNormalizationSupportLimits batchNormalization;
MLSingleInputSupportLimits cast;
MLSingleInputSupportLimits clamp;
MLConcatSupportLimits concat;
MLConv2dSupportLimits conv2d;
MLConv2dSupportLimits convTranspose2d;
MLSingleInputSupportLimits cumulativeSum;
MLQuantizeDequantizeLinearSupportLimits dequantizeLinear;
// Element-wise binary ops.
MLBinarySupportLimits add;
MLBinarySupportLimits sub;
MLBinarySupportLimits mul;
MLBinarySupportLimits div;
MLBinarySupportLimits max;
MLBinarySupportLimits min;
MLBinarySupportLimits pow;
// Element-wise logical ops.
MLBinarySupportLimits equal;
MLBinarySupportLimits greater;
MLBinarySupportLimits greaterOrEqual;
MLBinarySupportLimits lesser;
MLBinarySupportLimits lesserOrEqual;
MLBinarySupportLimits notEqual;
MLBinarySupportLimits logicalAnd;
MLBinarySupportLimits logicalOr;
MLBinarySupportLimits logicalXor;
MLLogicalNotSupportLimits logicalNot;
MLLogicalNotSupportLimits isNaN;
MLLogicalNotSupportLimits isInfinite;
// Element-wise unary ops.
MLSingleInputSupportLimits abs;
MLSingleInputSupportLimits ceil;
MLSingleInputSupportLimits cos;
MLSingleInputSupportLimits erf;
MLSingleInputSupportLimits exp;
MLSingleInputSupportLimits floor;
MLSingleInputSupportLimits identity;
MLSingleInputSupportLimits log;
MLSingleInputSupportLimits neg;
MLSingleInputSupportLimits reciprocal;
MLSingleInputSupportLimits roundEven;
MLSingleInputSupportLimits sign;
MLSingleInputSupportLimits sin;
MLSingleInputSupportLimits sqrt;
MLSingleInputSupportLimits tan;
MLSingleInputSupportLimits elu;
MLSingleInputSupportLimits expand;
MLGatherSupportLimits gather;
MLGatherSupportLimits gatherElements;
MLGatherSupportLimits gatherND;
MLSingleInputSupportLimits gelu;
MLGemmSupportLimits gemm;
MLGruSupportLimits gru;
MLGruCellSupportLimits gruCell;
MLSingleInputSupportLimits hardSigmoid;
MLSingleInputSupportLimits hardSwish;
MLNormalizationSupportLimits instanceNormalization;
MLNormalizationSupportLimits layerNormalization;
MLSingleInputSupportLimits leakyRelu;
MLSingleInputSupportLimits linear;
MLLstmSupportLimits lstm;
MLLstmCellSupportLimits lstmCell;
MLBinarySupportLimits matmul;
MLSingleInputSupportLimits pad;
MLPreluSupportLimits prelu;
MLQuantizeDequantizeLinearSupportLimits quantizeLinear;
// Pool2d.
MLSingleInputSupportLimits averagePool2d;
MLSingleInputSupportLimits l2Pool2d;
MLSingleInputSupportLimits maxPool2d;
// Reduction ops.
MLSingleInputSupportLimits reduceL1;
MLSingleInputSupportLimits reduceL2;
MLSingleInputSupportLimits reduceLogSum;
MLSingleInputSupportLimits reduceLogSumExp;
MLSingleInputSupportLimits reduceMax;
MLSingleInputSupportLimits reduceMean;
MLSingleInputSupportLimits reduceMin;
MLSingleInputSupportLimits reduceProduct;
MLSingleInputSupportLimits reduceSum;
MLSingleInputSupportLimits reduceSumSquare;
MLSingleInputSupportLimits relu;
MLSingleInputSupportLimits resample2d;
MLSingleInputSupportLimits reshape;
MLSingleInputSupportLimits reverse;
MLScatterSupportLimits scatterElements;
MLScatterSupportLimits scatterND;
MLSingleInputSupportLimits sigmoid;
MLSingleInputSupportLimits slice;
MLSingleInputSupportLimits softmax;
MLSingleInputSupportLimits softplus;
MLSingleInputSupportLimits softsign;
MLSplitSupportLimits split;
MLSingleInputSupportLimits tanh;
MLSingleInputSupportLimits tile;
MLSingleInputSupportLimits transpose;
MLSingleInputSupportLimits triangular;
MLWhereSupportLimits where;
};
typedef record<USVString, MLTensor> MLNamedTensors;
[
RuntimeEnabled=MachineLearningNeuralNetwork,
SecureContext,
Exposed=(Window, Worker)
] interface MLContext {
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState
] readonly attribute Promise<MLContextLostInfo> lost;
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState,
RaisesException
] void destroy();
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState,
RaisesException
] Promise<MLTensor> createTensor(MLTensorDescriptor descriptor);
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState,
RaisesException
] Promise<MLTensor> createConstantTensor(
MLOperandDescriptor descriptor,
AllowSharedBufferSource sourceData);
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState,
RaisesException
] Promise<MLTensor> createExportableTensor(MLTensorDescriptor descriptor, GPUDevice device);
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState,
RaisesException
] void writeTensor(MLTensor dstTensor, AllowSharedBufferSource srcData);
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState,
RaisesException
] Promise<ArrayBuffer> readTensor(
MLTensor sourceTensor);
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState,
RaisesException
] Promise<undefined> readTensor(
MLTensor sourceTensor,
AllowSharedBufferSource destinationData);
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState,
RaisesException,
Measure
] void dispatch(
MLGraph graph, MLNamedTensors inputs, MLNamedTensors outputs);
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState
] MLOpSupportLimits opSupportLimits();
[
RuntimeEnabled=MachineLearningNeuralNetwork,
CallWith=ScriptState,
RaisesException
] Promise<GPUBuffer> exportToGPU(MLTensor tensor);
};