保存时间:2026/4/2 11:40:26
Promise { <state>: "pending" } 这样的输出,这并不是错误,而是表明 navigator.mediaDevices.enumerateDevices() 返回的 Promise 对象还处于 “pending”(进行中)状态,意味着异步操作还没有完成。pending 只是在控制台中查看 navigator.mediaDevices.enumerateDevices() 这个 Promise 对象时的状态显示。camsel 和 micsel 变量已定义:在代码里,camsel 和 micsel 应当是 HTML 中的 select 元素,不过代码里并未对它们进行定义。你得在代码里获取这些元素。Promise 完成:要等 Promise 状态变为 fulfilled(已完成)或者 rejected(已拒绝),才能看到结果。<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Device Enumeration</title>
</head>
<body>
<select id="camsel"></select>
<select id="micsel"></select>
<script>
// 获取 select 元素
const camsel = document.getElementById('camsel');
const micsel = document.getElementById('micsel');
navigator.mediaDevices.enumerateDevices()
.then(function (devices) {
var icam = 0, imic = 0;
devices.forEach(function (device) {
console.log(device.kind + ": " + device.label + " id = " + device.deviceId);
var opt = document.createElement('option');
if (device.kind === "videoinput") {
opt.innerHTML = "Camera #" + (++icam);
camsel.appendChild(opt);
} else if (device.kind === "audioinput") {
opt.innerHTML = "Microphone #" + (++imic);
micsel.appendChild(opt);
}
});
})
.catch(function (err) {
console.log(err.name + ": " + err.message);
});
</script>
</body>
</html>
select 元素,并且通过 document.getElementById 方法获取了这些元素,这样代码就能正常运行了。你可以把上述代码保存为一个 .html 文件,然后在浏览器中打开,就能看到摄像头和麦克风设备被正确添加到对应的选择框里。"use strict"
var isfullscreen=false;
var captureState=false;
var videoTracks,audioTracks;
const alertElem = document.getElementsByClassName("alert")[0];
const fullElem = document.getElementById("full");
const camsel = document.getElementById("camsel");
const micsel = document.getElementById("micsel");
const video = document.getElementById("vid");
const canvas = document.createElement("canvas");
const image = document.getElementById("imgid");
const testbtn = document.getElementById("testbtn");
const getimgbtn = document.getElementById("getimgbtn");
const downimgbtn = document.getElementById("downimgbtn");
const bar = document.getElementById("bar");
const miccan = document.getElementById('miccan');
const miccanCtx = miccan.getContext("2d");
window.addEventListener("DOMContentLoaded",function() {
alertElem.style.display = "none";
videoTracks=audioTracks=null;
testbtn.onclick=function() { initVideo(); };
getimgbtn.onclick=function() { getImage(); };
video.onclick=function() { getImage(); };
image.onclick=function() { getImage(); };
downimgbtn.onclick=function() { downloadImage(); };
const supports = navigator.mediaDevices.getSupportedConstraints();
if (!supports['facingMode']) {
alert('Browser does not support webcam facingMode!');
}
initPoly();
initCameraMicSelects();
});
facingMode 属性,并调用 initPoly 和 initCameraMicSelects 函数。initPoly:function initPoly()
{
if (navigator.mediaDevices === undefined) {
navigator.mediaDevices = {};
}
if (navigator.mediaDevices.getUserMedia === undefined) {
navigator.mediaDevices.getUserMedia = function(constraints) {
var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!getUserMedia) {
return Promise.reject(new Error('getUserMedia is not implemented in this browser'));
}
return new Promise(function(resolve, reject) {
getUserMedia.call(navigator, constraints, resolve, reject);
});
}
}
}
navigator.mediaDevices.getUserMedia 的兼容性问题,确保在不支持的浏览器上也能尝试获取媒体设备。initCameraMicSelects:function initCameraMicSelects()
{
if (!navigator.mediaDevices || !navigator.mediaDevices.enumerateDevices) {
console.log("enumerateDevices() not supported.");
camsel.style.display = "none";
micsel.style.display = "none";
return;
}
navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
var icam=0,imic=0;
devices.forEach(function(device) {
console.log(device.kind + ": " + device.label + " id = " + device.deviceId);
var opt = document.createElement('option');
if( device.kind=="videoinput" ) {
opt.innerHTML = "Camera #"+(++icam);
camsel.appendChild(opt);
}
else if( device.kind=="audioinput" ) {
opt.innerHTML = "Microphone #"+(++imic);
micsel.appendChild(opt);
}
else { //audiooutput
}
});
})
.catch(function(err) {
console.log(err.name + ": " + err.message);
});
}
enumerateDevices 方法,然后获取设备列表,并将摄像头和麦克风设备添加到对应的下拉选择框中。setCameraInfo:function setCameraInfo()
{
var i = camsel.selectedIndex;
var track = videoTracks[i];
var settings = videoTracks[0].getSettings();
const tbl = document.getElementById("tbl");
var res=((settings.width*settings.height)/1e6).toFixed(2);
tbl.rows[0].cells[1].innerHTML = videoTracks[0].label;
tbl.rows[1].cells[1].innerHTML = res+" megapixels";
tbl.rows[2].cells[1].innerHTML = settings.width+" pixels";
tbl.rows[3].cells[1].innerHTML = settings.height+" pixels";
tbl.rows[4].cells[1].innerHTML = settings.aspectRatio;
tbl.rows[5].cells[1].innerHTML = settings.brightness;
tbl.rows[6].cells[1].innerHTML = settings.contrast;
tbl.rows[7].cells[1].innerHTML = settings.facingMode;
tbl.rows[8].cells[1].innerHTML = settings.frameRate+" frames/second";
tbl.rows[9].cells[1].innerHTML = settings.saturation;
tbl.rows[10].cells[1].innerHTML = settings.sharpness;
}
setMicInfo:function setMicInfo()
{
var i = micsel.selectedIndex;
var track = audioTracks[i];
var settings = audioTracks[0].getSettings();
const tbl = document.getElementById("tbl2");
tbl.rows[0].cells[1].innerHTML = audioTracks[0].label;
tbl.rows[1].cells[1].innerHTML = settings.autoGainControl;
tbl.rows[2].cells[1].innerHTML = settings.channelCount;
tbl.rows[3].cells[1].innerHTML = settings.echoCancellation;
tbl.rows[4].cells[1].innerHTML = settings.latency+" second";
tbl.rows[5].cells[1].innerHTML = settings.noiseSuppression;
tbl.rows[6].cells[1].innerHTML = settings.sampleRate+" Hz";
tbl.rows[7].cells[1].innerHTML = settings.sampleSize+" bits";
}
setCameraInfo 类似,获取当前选中的麦克风轨道,获取其设置信息,并将这些信息显示在另一个表格中。initVideo:function initVideo(icam)
{
stopVideo();
var constraints = {
audio: true,
video: {
width: { ideal: 4096 },
height: { ideal: 2160 },
facingMode: { ideal: "user" }
} };
var icam=camsel.selectedIndex;
if( icam==1 ) { constraints.video.facingMode = { exact: "environment" }; }
alertElem.style.display = "none";
navigator.mediaDevices.getUserMedia(constraints)
.then(function(mediaStream) {
visualize(mediaStream);
video.srcObject = mediaStream;
video.onloadedmetadata = function(e) {
video.play();
};
videoTracks = mediaStream.getVideoTracks();
audioTracks = mediaStream.getAudioTracks();
setCameraInfo();
setMicInfo();
console.log(videoTracks[0]);
console.log(videoTracks[0].getSettings());
var i=micsel.selectedIndex;
console.log(audioTracks[i]);
console.log(audioTracks[i].getSettings());
})
.catch(function(err) {
console.log(err.name + ": " + err.message);
if( get_browser()=="Chrome" ) {
alertElem.style.display = "block";
}
});
}
visualize 函数来可视化音频数据。stopAudio 和停止视频函数 stopVideo:function stopAudio()
{
audioTracks.forEach(track => track.stop());
}
function stopVideo()
{
if( videoTracks==null ) return;
videoTracks.forEach(track => track.stop());
}
getImage:function getImage()
{
if( captureState==false ) {
captureState = true;
var rect = vid.getBoundingClientRect();
canvas.width = rect.width;
canvas.height = rect.height;
var ctx = canvas.getContext('2d');
ctx.setTransform(-1,0,0,1,canvas.width,0);
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
const dataURL = canvas.toDataURL("image/png");
image.setAttribute('src', dataURL);
stopAudio();
stopVideo();
video.style.display="none";
image.style.display="block";
getimgbtn.children[0].src="/Public/images/play_arrow-24px.svg";
downimgbtn.disabled=false;
}
else {
captureState = false;
initVideo();
video.style.display="block";
image.style.display="none";
getimgbtn.children[0].src="/Public/images/pause-24px.svg";
downimgbtn.disabled=true;
}
}
downloadImage:function downloadImage()
{
var a = document.createElement('a');
a.download ='myimage.png';
a.href = image.src;
a.click();
}
<a> 元素,设置其 download 属性和 href 属性,模拟点击来下载当前显示的图片。fullscreen:function fullscreen()
{
if( !isfullscreen ) {
isfullscreen=true;
fullElem.children[0].src="/Public/images/fullscreen_exit-24px.svg";
image.classList.add("fullscreen");
video.classList.add("fullscreen");
bar.style.setProperty("bottom","0");
bar.style.setProperty("position","fixed");
document.body.style.overflow = 'hidden';
}
else {
isfullscreen=false;
fullElem.children[0].src="/Public/images/fullscreen-24px.svg";
image.classList.remove("fullscreen");
video.classList.remove("fullscreen");
bar.style.setProperty("bottom","unset");
bar.style.setProperty("position","relative");
document.body.style.overflow = 'visible';
}
}
visualize:function visualize(stream) {
var audioCtx=0;
if(!audioCtx) {
audioCtx = new AudioContext();
}
const source = audioCtx.createMediaStreamSource(stream);
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 2048;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
source.connect(analyser);
draw()
function draw() {
const WIDTH = miccan.width
const HEIGHT = miccan.height;
requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
miccanCtx.fillStyle = 'rgb(200, 200, 200)';
miccanCtx.fillRect(0, 0, WIDTH, HEIGHT);
miccanCtx.lineWidth = 2;
miccanCtx.strokeStyle = 'rgb(0, 0, 0)';
miccanCtx.beginPath();
let sliceWidth = WIDTH * 1.0 / bufferLength;
let x = 0;
for(let i = 0; i < bufferLength; i++) {
let v = dataArray[i] / 128.0;
let y = v * HEIGHT/2;
if(i === 0) {
miccanCtx.moveTo(x, y);
} else {
miccanCtx.lineTo(x, y);
}
x += sliceWidth;
}
miccanCtx.lineTo(miccan.width, miccan.height/2);
miccanCtx.stroke();
}
}
AudioContext 和 AnalyserNode 来分析音频数据,并在一个 <canvas> 元素上绘制音频的时域波形图,实现音频可视化。// server.js
const express = require('express');
const http = require('http');
const socketIo = require('socket.io');
const app = express();
const server = http.createServer(app);
const io = socketIo(server);
io.on('connection', (socket) => {
console.log('A user connected');
socket.on('offer', (data) => {
// 接收并转发 offer 给目标用户
socket.broadcast.emit('offer', data);
});
socket.on('answer', (data) => {
// 接收并转发 answer 给目标用户
socket.broadcast.emit('answer', data);
});
socket.on('ice-candidate', (data) => {
// 接收并转发 ICE 候选者给目标用户
socket.broadcast.emit('ice-candidate', data);
});
socket.on('disconnect', () => {
console.log('A user disconnected');
});
});
const port = 3000;
server.listen(port, () => {
console.log(`Server running on port ${port}`);
});
// 全局变量
let peerConnection;
const socket = io(); // 连接到信令服务器
function startPeerConnection() {
peerConnection = new RTCPeerConnection();
// 添加本地音视频轨道
videoTracks.forEach(track => peerConnection.addTrack(track, video.srcObject));
audioTracks.forEach(track => peerConnection.addTrack(track, video.srcObject));
// 处理 ICE 候选者
peerConnection.onicecandidate = (event) => {
if (event.candidate) {
socket.emit('ice-candidate', {
candidate: event.candidate,
sdpMid: event.candidate.sdpMid,
sdpMLineIndex: event.candidate.sdpMLineIndex
});
}
};
// 处理远程流
peerConnection.ontrack = (event) => {
const remoteVideo = document.createElement('video');
remoteVideo.autoplay = true;
remoteVideo.srcObject = event.streams[0];
document.body.appendChild(remoteVideo);
};
}
// 发送 offer
function sendOffer() {
return peerConnection.createOffer()
.then(offer => {
return peerConnection.setLocalDescription(offer);
})
.then(() => {
socket.emit('offer', peerConnection.localDescription);
})
.catch(error => console.log('Error creating offer:', error));
}
// 接收 offer 并发送 answer
socket.on('offer', (offer) => {
peerConnection.setRemoteDescription(new RTCSessionDescription(offer))
.then(() => {
return peerConnection.createAnswer();
})
.then(answer => {
return peerConnection.setLocalDescription(answer);
})
.then(() => {
socket.emit('answer', peerConnection.localDescription);
})
.catch(error => console.log('Error handling offer:', error));
});
// 接收 answer
socket.on('answer', (answer) => {
peerConnection.setRemoteDescription(new RTCSessionDescription(answer))
.catch(error => console.log('Error handling answer:', error));
});
// 接收 ICE 候选者
socket.on('ice-candidate', (candidate) => {
peerConnection.addIceCandidate(new RTCIceCandidate(candidate))
.catch(error => console.log('Error adding ice candidate:', error));
});
initVideo 函数中,当成功获取本地音视频流后,调用 startPeerConnection 函数来初始化对等连接,并在需要时发送 offer。function initVideo(icam) {
stopVideo();
var constraints = {
audio: true,
video: {
width: { ideal: 4096 },
height: { ideal: 2160 },
facingMode: { ideal: "user" }
} };
var icam=camsel.selectedIndex;
if( icam==1 ) { constraints.video.facingMode = { exact: "environment" }; }
alertElem.style.display = "none";
navigator.mediaDevices.getUserMedia(constraints)
.then(function(mediaStream) {
visualize(mediaStream);
video.srcObject = mediaStream;
video.onloadedmetadata = function(e) {
video.play();
};
videoTracks = mediaStream.getVideoTracks();
audioTracks = mediaStream.getAudioTracks();
setCameraInfo();
setMicInfo();
console.log(videoTracks[0]);
console.log(videoTracks[0].getSettings());
var i=micsel.selectedIndex;
console.log(audioTracks[i]);
console.log(audioTracks[i].getSettings());
startPeerConnection();
sendOffer(); // 发送 offer 以启动连接
})
.catch(function(err) {
console.log(err.name + ": " + err.message);
if( get_browser()=="Chrome" ) {
alertElem.style.display = "block";
}
});
}
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>麦克风测试工具</title>
<style>
canvas {
border: 1px solid black;
}
</style>
</head>
<body>
<h1>麦克风测试工具</h1>
<button id="testbtn">开始测试</button>
<canvas id="miccan" width="400" height="150"></canvas>
<script>
"use strict"
const testbtn = document.getElementById("testbtn");
const miccan = document.getElementById('miccan');
const miccanCtx = miccan.getContext("2d");
testbtn.onclick = function () {
initAudio();
};
function initAudio() {
const constraints = {
audio: true,
video: false
};
navigator.mediaDevices.getUserMedia(constraints)
.then(function (mediaStream) {
visualize(mediaStream);
const audio = new Audio();
audio.srcObject = mediaStream;
audio.play();
})
.catch(function (err) {
console.log(err.name + ": " + err.message);
});
}
function visualize(stream) {
var audioCtx = 0;
if (!audioCtx) {
audioCtx = new (window.AudioContext || window.webkitAudioContext)();
}
const source = audioCtx.createMediaStreamSource(stream);
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 2048;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
source.connect(analyser);
analyser.connect(audioCtx.destination);
draw();
function draw() {
const WIDTH = miccan.width;
const HEIGHT = miccan.height;
requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
miccanCtx.fillStyle = 'rgb(200, 200, 200)';
miccanCtx.fillRect(0, 0, WIDTH, HEIGHT);
miccanCtx.lineWidth = 2;
miccanCtx.strokeStyle = 'rgb(0, 0, 0)';
miccanCtx.beginPath();
let sliceWidth = WIDTH * 1.0 / bufferLength;
let x = 0;
for (let i = 0; i < bufferLength; i++) {
let v = dataArray[i] / 128.0;
let y = v * HEIGHT / 2;
if (i === 0) {
miccanCtx.moveTo(x, y);
} else {
miccanCtx.lineTo(x, y);
}
x += sliceWidth;
}
miccanCtx.lineTo(miccan.width, miccan.height / 2);
miccanCtx.stroke();
}
}
</script>
</body>
</html>
initAudio 函数。initAudio 函数请求获取麦克风的音频输入权限,并在获取成功后,调用 visualize 函数来可视化音频波形,同时创建一个 <audio> 元素并将音频流设置为其源,从而播放音频。visualize 函数使用 AudioContext 和 AnalyserNode 来分析音频数据,并在 <canvas> 元素上绘制音频的时域波形图。.html 文件(例如 mic_test.html),然后使用浏览器打开该文件,点击按钮即可测试麦克风的输入和播放功能。 // 来表示。双斜杠后面直到该行末尾的所有内容都会被视为注释内容,解释器会忽略这部分内容。 例如:// 这是一个单行注释,用于说明下面这行代码的作用
let num = 10; // 声明一个变量 num 并赋值为 10
/* 开始,以 */ 结束。在 /* 和 */ 之间的所有内容都会被当作注释,无论跨多少行,解释器都不会执行这部分内容。例如:/*
这是一个多行注释示例
可以在里面写多行的说明文字
用于对代码块或功能进行更详细的解释
*/
function add(a, b) {
return a + b;
}
<video> 元素,以及一个用于音频可视化的 <canvas> 元素。navigator.mediaDevices.getUserMedia 方法请求获取麦克风和摄像头的访问权限。<video> 元素的 srcObject 属性,从而显示摄像头画面。visualize 函数对麦克风输入的音频进行可视化处理,在 <canvas> 上绘制音频波形。.html 文件,然后用浏览器打开,点击 “开始测试” 按钮就可以进行麦克风和摄像头的测试。about:debug,然后回车,让浏览器打开调试模式,可能会有一些有用的信息显示出来,有助于进一步排查问题。about:config并回车,点击 “接受风险和继续”。在搜索栏中输入media.peerconnection.enabled,查看该设置的值是否为true,若为false,则双击将其改为true。about:webrtc,可以查看 WebRTC 会话的相关信息,包括呼叫信令、连接建立、网络流量等,通过这些信息来判断 WebRTC 的工作状态。还可以输入about:logging,选择 “WebRTC” 日志预设,让 Firefox 生成与 WebRTC 调用相关的详细日志,以便进一步排查问题。DetectRTC.isWebRTCSupported方法可以判断浏览器是否支持整个 WebRTC API,通过DetectRTC.isGetUserMediaSupported方法可以检测浏览器是否支持获取用户摄像头和麦克风权限的getUserMedia API。.py extension is typically text/x-python..py files:.py file to a client, it should set the Content-Type header to text/x-python. For example, in a Python-based web framework like Flask, you can set the MIME type when serving a static Python file:from flask import Flask, send_file
app = Flask(__name__)
@app.route('/get_py_file')
def get_py_file():
return send_file('your_file.py', mimetype='text/x-python')
if __name__ == '__main__':
app.run()
text/x-python to ensure proper handling.text/x-python is the most common MIME type for Python files, in some systems or specific applications, you might also encounter application/x-python being used, though it's less standard..txt file is text/plain.text/plain MIME type is specifically designed for simple, unformatted text files. Here's why it's used and where you might encounter it:.txt file to a client, it should set the Content-Type header to text/plain in the HTTP response. For example, in a Node.js application using the http module:const http = require('http');
const fs = require('fs');
const server = http.createServer((req, res) => {
if (req.url === '/example.txt') {
fs.readFile('example.txt', (err, data) => {
if (err) {
res.statusCode = 500;
res.end('Internal Server Error');
} else {
res.setHeader('Content-Type', 'text/plain');
res.end(data);
}
});
}
});
server.listen(3000, () => {
console.log('Server running on port 3000');
});
.txt file as an email attachment, the email client or the script that generates the email should specify the MIME type of the attachment as text/plain to ensure that the recipient's email client can handle it correctly.text/plain MIME type is used to identify the file type when handling .txt files.text/txt is not a standard MIME type and is not recognized in the official MIME type registry. So, always use text/plain for .txt files..safetensors 文件,对 FP8 权重进行反量化操作,并将结果保存为新的 .safetensors 文件。.safetensors 文件。weight_dequant 函数的 C++ 版本。#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <string>
#include <filesystem>
#include <nlohmann/json.hpp>
// 假设我们有一个简单的反量化函数
std::vector<float> weight_dequant(const std::vector<uint8_t>& x, const std::vector<float>& s, int M, int N) {
std::vector<float> y(M * N);
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
y[i * N + j] = static_cast<float>(x[i * N + j]) * s[i * N + j];
}
}
return y;
}
// 读取 safetensors 文件
std::vector<uint8_t> read_safetensors(const std::string& filename) {
std::ifstream file(filename, std::ios::binary);
std::vector<uint8_t> data((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
return data;
}
// 写入 safetensors 文件
void write_safetensors(const std::string& filename, const std::vector<uint8_t>& data) {
std::ofstream file(filename, std::ios::binary);
file.write(reinterpret_cast<const char*>(data.data()), data.size());
}
// 读取 JSON 文件
nlohmann::json read_json(const std::string& filename) {
std::ifstream file(filename);
nlohmann::json json_data;
file >> json_data;
return json_data;
}
// 主函数
void main(const std::string& fp8_path, const std::string& bf16_path) {
std::filesystem::create_directories(bf16_path);
std::string model_index_file = fp8_path + "/model.safetensors.index.json";
nlohmann::json model_index = read_json(model_index_file);
auto weight_map = model_index["weight_map"];
std::vector<std::string> safetensor_files;
for (const auto& entry : std::filesystem::directory_iterator(fp8_path)) {
if (entry.path().extension() == ".safetensors") {
safetensor_files.push_back(entry.path().string());
}
}
for (const auto& safetensor_file : safetensor_files) {
std::string file_name = std::filesystem::path(safetensor_file).filename().string();
std::vector<uint8_t> data = read_safetensors(safetensor_file);
for (const auto& [weight_name, weight_in_file] : weight_map.items()) {
if (weight_in_file != file_name) {
continue;
}
if (weight_name.ends_with("_scale_inv")) {
continue;
}
// 假设我们可以找到权重和缩放因子
std::vector<uint8_t> weight;
std::vector<float> scale_inv;
if (/* 检查是否为 FP8 权重 */ true) {
std::string scale_inv_name = weight_name + "_scale_inv";
// 读取权重和缩放因子
// ...
std::vector<float> new_weight = weight_dequant(weight, scale_inv, /* M */ 1, /* N */ 1);
// 将新权重转换为 uint8_t
std::vector<uint8_t> new_weight_uint8(new_weight.size());
for (size_t i = 0; i < new_weight.size(); ++i) {
new_weight_uint8[i] = static_cast<uint8_t>(new_weight[i]);
}
std::string new_safetensor_file = bf16_path + "/" + file_name.replace(".safetensors", "." + weight_name + ".safetensors");
write_safetensors(new_safetensor_file, new_weight_uint8);
} else {
// 非 FP8 权重,直接复制
std::string new_safetensor_file = bf16_path + "/" + file_name.replace(".safetensors", "." + weight_name + ".safetensors");
write_safetensors(new_safetensor_file, data);
}
}
}
// 更新模型索引
std::string new_model_index_file = bf16_path + "/model.safetensors.index.json";
nlohmann::json new_weight_map;
for (const auto& [original_weight_name, original_file_name] : weight_map.items()) {
if (!original_weight_name.ends_with("_scale_inv")) {
std::string new_weight_name = original_weight_name;
std::string new_file_name = original_file_name.replace(".safetensors", "." + original_weight_name + ".safetensors");
new_weight_map[new_weight_name] = new_file_name;
}
}
nlohmann::json new_model_index = {{"metadata", {}}, {"weight_map", new_weight_map}};
std::ofstream new_index_file(new_model_index_file);
new_index_file << std::setw(2) << new_model_index;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " --input-fp8-hf-path <path> --output-bf16-hf-path <path>" << std::endl;
return 1;
}
std::string input_fp8_path = argv[1];
std::string output_bf16_path = argv[2];
main(input_fp8_path, output_bf16_path);
return 0;
}
weight_dequant 函数实现了简单的反量化操作。read_safetensors 和 write_safetensors 函数用于读取和写入 .safetensors 文件。nlohmann::json 库来解析 JSON 文件。main 函数实现了主要的转换逻辑。.safetensors 文件格式解析可能需要更复杂的逻辑。nlohmann::json 库,并使用以下命令编译代码:g++ -std=c++17 -o convert convert.cpp -I /path/to/json/include
./convert --input-fp8-hf-path /path/to/fp8 --output-bf16-hf-path /path/to/bf16