| | |
| | | { |
| | | "name": "sign-tool", |
| | | "version": "2.5.6", |
| | | "version": "2.5.9", |
| | | "icon": "public/favicon.ico", |
| | | "main": "dist/electron/main/index.js", |
| | | "author": "", |
| | |
| | | "vue" |
| | | ], |
| | | "dependencies": { |
| | | "@alicloud/facebody20191230": "^4.0.7", |
| | | "@alicloud/viapi-utils": "^1.0.0", |
| | | "@types/tracking": "^1.1.30", |
| | | "axios": "^0.27.2", |
| | | "electron-store": "^8.0.2", |
| | | "element-plus": "^2.2.6", |
| | |
| | | "sound-play": "^1.1.0", |
| | | "speak-tts": "^2.0.8", |
| | | "stompjs": "^2.3.3", |
| | | "tracking": "^1.1.3", |
| | | "vue-router": "^4.0.16" |
| | | } |
| | | } |
| | |
| | | import ElementPlus from 'element-plus' |
| | | import 'element-plus/dist/index.css' |
| | | import router from './router/index' |
| | | import "tracking"; |
| | | import "tracking/build/data/face"; |
| | | import { ElMessage } from 'element-plus'; |
| | | import { createPinia } from 'pinia' |
| | | import App from './App.vue' |
| | | import './samples/node-api' |
| | | |
| | | function getUserMedia(constrains:any, success:any, error:any) { |
| | | if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) { |
| | | //最新标准API |
| | | ElMessage({ type: 'success', message: '支持最新标准API' }); |
| | | navigator.mediaDevices.getUserMedia(constrains).then(success).catch(error); |
| | | } else if (navigator.webkitGetUserMedia) { |
| | | ElMessage({ type: 'success', message: '支持webkit内核浏览器' }); |
| | | //webkit内核浏览器 |
| | | navigator.webkitGetUserMedia(constrains).then(success).catch(error); |
| | | } else if (navigator.mozGetUserMedia) { |
| | | ElMessage({ type: 'success', message: '支持Firefox浏览器' }); |
| | | //Firefox浏览器 |
| | | navagator.mozGetUserMedia(constrains).then(success).catch(error); |
| | | } else if (navigator.getUserMedia) { |
| | | ElMessage({ type: 'success', message: '支持旧版API' }); |
| | | //旧版API |
| | | navigator.getUserMedia(constrains).then(success).catch(error); |
| | | } else { |
| | | ElMessage('浏览器不支持getUserMedia'); |
| | | } |
| | | } |
| | | // 要重写initUserMedia_ 方法,因为chrome的底层api已做调整 |
| | | window.tracking.initUserMedia_ = function (element, opt_options) { |
| | | const options = { |
| | | video: true, |
| | | audio: !!(opt_options && opt_options.audio) |
| | | }; |
| | | getUserMedia(options, function (stream) { |
| | | try { |
| | | element.srcObject = stream; |
| | | } catch (err) { |
| | | element.src = window.URL.createObjectURL(stream); |
| | | } |
| | | }, function (e) { |
| | | Notify(e.message); |
| | | } |
| | | ); |
| | | }; |
| | | // 重写视频捕获方法,因为不能停止 stop无效的bug |
| | | window.tracking.trackVideo_ = function (element, tracker) { |
| | | console.log('trackVideo_'); |
| | | var canvas = document.createElement('canvas'); |
| | | var context = canvas.getContext('2d'); |
| | | var width; |
| | | var height; |
| | | |
| | | var resizeCanvas_ = function () { |
| | | width = element.offsetWidth; |
| | | height = element.offsetHeight; |
| | | canvas.width = width; |
| | | canvas.height = height; |
| | | }; |
| | | resizeCanvas_(); |
| | | element.addEventListener('resize', resizeCanvas_); |
| | | |
| | | var requestId; |
| | | var stopped = false; |
| | | var requestAnimationFrame_ = function () { |
| | | requestId = window.requestAnimationFrame(function () { |
| | | if (element.readyState === element.HAVE_ENOUGH_DATA) { |
| | | try { |
| | | // Firefox v~30.0 gets confused with the video readyState firing an |
| | | // erroneous HAVE_ENOUGH_DATA just before HAVE_CURRENT_DATA state, |
| | | // hence keep trying to read it until resolved. |
| | | context.drawImage(element, 0, 0, width, height); |
| | | } catch (err) { } |
| | | tracking.trackCanvasInternal_(canvas, tracker); |
| | | } |
| | | if (stopped !== true) { |
| | | requestAnimationFrame_(); |
| | | } |
| | | }); |
| | | }; |
| | | var task = new tracking.TrackerTask(tracker); |
| | | task.on('stop', function () { |
| | | stopped = true; |
| | | window.cancelAnimationFrame(requestId); |
| | | }); |
| | | task.on('run', function () { |
| | | stopped = false; |
| | | requestAnimationFrame_(); |
| | | }); |
| | | return task.run(); |
| | | }; |
| | | createApp(App) |
| | | .use(createPinia()) |
| | | .use(ElementPlus) |
| New file |
| | |
| | | // This file is auto-generated, don't edit it |
| | | import facebody20191230, * as $facebody20191230 from '@alicloud/facebody20191230'; |
| | | // 依赖的模块可通过下载工程中的模块依赖文件或右上角的获取 SDK 依赖信息查看 |
| | | import * as $OpenApi from '@alicloud/openapi-client'; |
| | | import Util, * as $Util from '@alicloud/tea-util'; |
| | | import ViapiUtil from '@alicloud/viapi-utils'; |
| | | import fs from 'fs'; |
| | | import { confingInfoStore } from '@/stores/StoresConfing' |
| | | import {sendPationCodeApi} from './httpApi' |
| | | import { sockteStore } from '@/stores/sockteInfo'; |
| | | |
| | | // 要识别的人脸库 |
| | | const faceDatabase= confingInfoStore().confingInfo.face_database |
| | | const accessKeyId='LTAI5tPBxZiqgd9h6gcL9Qzc' |
| | | const accessKeySecret='IE6nsjJMTul2ZHkeQ27bg4wmWIngTu' |
| | | const client=null |
| | | /** |
| | | * 使用AK&SK初始化账号Client |
| | | * @return Client |
| | | * @throws Exception |
| | | */ |
| | | const createClient=()=>{ |
| | | let config = new $OpenApi.Config({ |
| | | // 必填,您的 AccessKey ID |
| | | accessKeyId: 'LTAI5tPBxZiqgd9h6gcL9Qzc', |
| | | // 必填,您的 AccessKey Secret |
| | | accessKeySecret: 'IE6nsjJMTul2ZHkeQ27bg4wmWIngTu', |
| | | }); |
| | | // 访问的域名 |
| | | config.endpoint = `facebody.cn-shanghai.aliyuncs.com`; |
| | | console.log('人脸识别客户端已启动') |
| | | return new facebody20191230(config); |
| | | } |
| | | // 将文件上传到oss |
| | | const getOssUrl=async(baseURL64:any)=>{ |
| | | let file: string = baseURL64; |
| | | let ossUrl: string = await ViapiUtil.upload(accessKeyId, accessKeySecret, file); |
| | | return ossUrl |
| | | } |
| | | /** |
| | | * |
| | | * @param path 图片路径 |
| | | */ |
| | | const faceShibie= async(path:any)=>{ |
| | | const fileStream = fs.createReadStream(path); |
| | | let client=createClient() |
| | | // 通过本地文件 |
| | | const facedata= confingInfoStore().confingInfo.face_database |
| | | const faceScore=confingInfoStore().confingInfo.face_score |
| | | console.log(facedata,'人脸数据库') |
| | | let searchFaceAdvanceRequest = new $facebody20191230.SearchFaceAdvanceRequest(); |
| | | searchFaceAdvanceRequest.imageUrlObject = fileStream; |
| | | searchFaceAdvanceRequest.dbName = facedata |
| | | searchFaceAdvanceRequest.limit = 2; |
| | | let runtime = new $Util.RuntimeOptions({ }); |
| | | console.log('-----监测图片--') |
| | | client.searchFaceAdvance(searchFaceAdvanceRequest, runtime).then(re=>{ |
| | | console.log('返回结果') |
| | | console.log(re.statusCode) |
| | | if(re.statusCode===200){ |
| | | const matchList=re.body.data?.matchList |
| | | console.log('得到的人脸库') |
| | | console.log(matchList) |
| | | if(matchList?.length>0){ |
| | | const faceItems =matchList[0].faceItems |
| | | if(faceItems[0].score>=faceScore){ |
| | | const entityId=faceItems[0].entityId |
| | | console.log(entityId,'得到了人脸识别id,存患者code到vuex') |
| | | sockteStore().setfaceSockte({ |
| | | deviceType: "人脸识别", |
| | | deviceName: "人脸识别", |
| | | result: entityId, |
| | | resultTime: '' |
| | | }) |
| | | }else{ |
| | | console.log('那些人脸都不是') |
| | | } |
| | | } |
| | | |
| | | } |
| | | }).finally(()=>{ |
| | | console.log('---------------都要执行的') |
| | | // 删除图片 |
| | | delImg(path) |
| | | }) |
| | | } |
| | | // base64z转文件后验证 |
| | | const base64toFile = (dataurl:any,filename='file') => { |
| | | if (!fs.existsSync('./imgs')) { |
| | | fs.mkdir('./imgs', (err) => { |
| | | if (err) throw err |
| | | console.log('文件夹创建成功') |
| | | }) |
| | | } |
| | | const path = './imgs/'+ Date.now() +'.png'; |
| | | const base64 = dataurl.replace(/^data:image\/\w+;base64,/,""); //去掉图片base64码前面部分data:image/png;base64 |
| | | // new Buffer 操作权限太大,v6.0后使用Buffer.from()创建构造函数 |
| | | const dataBuffer = new Buffer(base64, 'base64'); //把base64码转成buffer对象, |
| | | // return dataBuffer |
| | | fs.writeFile(path, dataBuffer, function(err){//用fs写入文件 |
| | | if(err){ |
| | | console.log(err); |
| | | }else{ |
| | | console.log('写入成功!',path); |
| | | // getOssUrl(path) |
| | | // 验证图片 |
| | | faceShibie(path) |
| | | } |
| | | }) |
| | | } |
| | | //验证后删除文件图片 |
| | | const delImg=(path:string)=>{ |
| | | fs.unlink(path,(err:any)=>{ |
| | | if(err){ |
| | | console.log('删除失败') |
| | | }else{ |
| | | console.log('删除成功') |
| | | } |
| | | }) |
| | | } |
| | | export { |
| | | createClient, |
| | | faceShibie, |
| | | base64toFile |
| | | } |
| | |
| | | deviceList=args[0].deviceList |
| | | // 存放conging到仓库 |
| | | confingInfoStore().setconfingInfo(args[0]) |
| | | connect() |
| | | // connect() |
| | | // creatorClient(args[0]) |
| | | |
| | | } |
| | |
| | | sockteStore().setfaceSockte( |
| | | { |
| | | deviceName:res.deviceName, |
| | | type:"血压计", |
| | | type:"人脸识别", |
| | | result:res.result, |
| | | resultTime:res.resultTime, |
| | | state:2 |
| | |
| | | import os from "os" |
| | | import Speech from 'speak-tts' |
| | | import { reactive, computed, toRefs, onMounted, ref, watch } from "vue" |
| | | import { sendPationCode, sendPationSet } from '../../samples/sockteStomp' |
| | | import { sendPationCodeApi } from '../../samples/httpApi' |
| | | import { formatDate } from '@/utils/formatTime' |
| | | import state1 from '@/assets/state1.png' |
| | |
| | | } |
| | | else { |
| | | clearInterval(timer) |
| | | const datetimeCON = patientInfoStore().patientInfo.datetime |
| | | patientInfoStore().setpatientInfo({ |
| | | id: 0, |
| | | code: '', |
| | |
| | | deviceCode: '', |
| | | hemoCode: '', |
| | | pureWeight: '', |
| | | datetime: datetimeCON |
| | | datetime: '' |
| | | }) |
| | | state.clockNum = patientInfoStore().viewNumber |
| | | sockteStore().setweightSockte({ |
| | | type: '体重秤', |
| | | deviceName: '', |
| | |
| | | }, 1000) |
| | | // 记录定时器 |
| | | state.timerNum = timer |
| | | console.log(timer) |
| | | } |
| | | //发送消除某些状态 |
| | | const fuxuan=()=>{ |
| | | const datetimeCON = patientInfoStore().patientInfo.datetime |
| | | // 获取是否是一台主机连2个设备 |
| | | const islinkDouble=confingInfoStore().confingInfo.islinkDouble |
| | | // 如果不是就马上清空 |
| | | if(!islinkDouble){ |
| | | patientInfoStore().setpatientInfo({ |
| | | id: 0, |
| | | code: '', |
| | | name: '', |
| | | patientAvatarIcon: '', |
| | | deviceCode: '', |
| | | hemoCode: '', |
| | | pureWeight: '', |
| | | datetime: datetimeCON |
| | | }) |
| | | sockteStore().setweightSockte({ |
| | | type: '体重秤', |
| | | deviceName: '', |
| | | result: '0', |
| | | resultTime: '', |
| | | state: 2 |
| | | }) |
| | | sockteStore().setxyjSockte({ |
| | | type: '血压计', |
| | | deviceName: '', |
| | | result: '', |
| | | resultTime: '', |
| | | state: 2 |
| | | }) |
| | | state.clockNum = patientInfoStore().viewNumber |
| | | state.aimTSL = '' |
| | | state.gao_ya = '' |
| | | state.di_ya = '' |
| | | state.mai_bu = '' |
| | | } |
| | | if (isUseFaceRecogService.value) { |
| | | console.log('开启人脸识别') |
| | | state.dialogVisible = true |
| | | }else{ |
| | | console.log('关闭人脸识别') |
| | | state.dialogVisible = false |
| | | } |
| | | |
| | | } |
| | | const state = reactive({ |
| | | fasongNum:0, |
| | | caozuo: 0,// 点击10下才能关闭 |
| | | logo: logo, |
| | | step5: step5, |
| | |
| | | state1: state1, |
| | | state2: state2, |
| | | state3: state3, |
| | | dialogVisible: false, |
| | | dialogVisible: true, |
| | | isActive: false, |
| | | inputCode: "", |
| | | Newdate: "", |
| | |
| | | settime() |
| | | if (patientInfo.value.id !== 0 && patientInfo.value.name !== '' && patientInfo.value.isScheduled === 1) { |
| | | let str = `${patientInfo.value.name}识别成功。` |
| | | state.dialogVisible = false |
| | | speech.value?.speak({ text: str }).then(() => { |
| | | console.log("播报完成...") |
| | | }) |
| | |
| | | } |
| | | // 没有排班 |
| | | else if (patientInfo.value.isScheduled === 0) { |
| | | //关闭人脸弹框 |
| | | state.dialogVisible = false |
| | | speech.value?.speak({ text: `${patientInfo.value.name}今日没有排班,不能自助签到` }).then(() => { |
| | | console.log("播报完成...") |
| | | }) |
| | | if (isUseFaceRecogService.value) { |
| | | state.dialogVisible = true |
| | | } |
| | | // if (isUseFaceRecogService.value) { |
| | | // state.dialogVisible = true |
| | | // } |
| | | return |
| | | } |
| | | } |
| | |
| | | speech.value?.speak({ text: `称重完成,${tt}kg` }).then(() => { |
| | | console.log("播报完成...") |
| | | }) |
| | | console.log(111111) |
| | | patientCodeLs = mode.patientCode |
| | | for(let x=0;x<3;x++){ |
| | | const res= await updatePatient(mode) |
| | | if(res.code===200){ |
| | | return false |
| | | } |
| | | } |
| | | state.fasongNum=0 |
| | | sundMode(mode) |
| | | } |
| | | else { |
| | | // 临时患者code=当前患者code |
| | | console.log(22222222222) |
| | | patientCodeLs = mode.patientCode |
| | | } |
| | | // 计算目标脱水量 |
| | |
| | | ); |
| | | watch( |
| | | () => xyjInfo.value.resultTime, |
| | | () => { |
| | | async () => { |
| | | if (xyjInfo.value.result !== '') { |
| | | const list = xyjInfo.value.result.split(',') |
| | | if (list.length === 3) { |
| | |
| | | console.log("播报完成...") |
| | | }) |
| | | } else { |
| | | speech.value?.speak({ text: `收缩压:${state.gao_ya},舒张压:${state.di_ya},脉搏:${state.mai_bu}` }).then(() => { |
| | | console.log("播报完成...") |
| | | }) |
| | | const mode = { |
| | | patientCode: patientInfo.value.code, |
| | | weight: '', |
| | | bloodPressure: xyjInfo.value.result |
| | | |
| | | } |
| | | console.log('发送患者结果', mode) |
| | | if (patientCodeLsXy === mode.patientCode) { |
| | | speech.value?.speak({ text: '不能重复测量血压,请重新识别患者再测量血压' }).then(() => { |
| | | console.log("播报完成...") |
| | | }) |
| | | console.log('如果是重复提交 就不要上传到api') |
| | | return false |
| | | } else { |
| | | updatePatient(mode).then(re => { |
| | | speech.value?.speak({ text: `收缩压:${state.gao_ya},舒张压:${state.di_ya},脉搏:${state.mai_bu}` }).then(() => { |
| | | console.log("播报完成...") |
| | | }) |
| | | patientCodeLsXy = mode.patientCode |
| | | console.log(re) |
| | | }) |
| | | sendPationSet(mode) |
| | | } |
| | | // 写入vuex |
| | | // sendPationSet(mode) |
| | | state.fasongNum=0 |
| | | sundMode(mode) |
| | | } |
| | | } else { |
| | | state.gao_ya = "" |
| | |
| | | } |
| | | } |
| | | ); |
| | | // 患者code 发生变化 冲人脸识别来 |
| | | watch( |
| | | ()=>faceInfo.value.result, |
| | | ()=>{ |
| | | console.log('页面收到了人脸识别') |
| | | sendPationCodeApi(faceInfo.value.result) |
| | | // console.log(faceInfo.value.result) |
| | | if(faceInfo.value.result!==''){ |
| | | sendPationCodeApi(faceInfo.value.result) |
| | | } |
| | | } |
| | | ) |
| | | watch(() => viewNumber, |
| | |
| | | state.clockNum = viewNumber.value |
| | | } |
| | | ); |
| | | const sundMode= (mode:any)=>{ |
| | | state.fasongNum++ |
| | | // const res= await updatePatient(mode) |
| | | updatePatient(mode).then(re=>{ |
| | | ElMessage.success('结果发送成功') |
| | | setTimeout(() => { |
| | | fuxuan() |
| | | },5000) |
| | | return false |
| | | }).catch(re=>{ |
| | | ElMessage('结果发送失败') |
| | | if(state.fasongNum<3){ |
| | | setTimeout(sundMode,1000); |
| | | } |
| | | else{ |
| | | return false |
| | | } |
| | | }) |
| | | } |
| | | const inputChabge = () => { |
| | | sendPationCodeApi(state.inputCode) |
| | | setTimeout(function () { |
| | |
| | | } |
| | | onMounted(() => { |
| | | console.log('页面初始化', os.hostname()) |
| | | console.log('页面初始化读取配置文件', confingInfoStore().confingInfo) |
| | | // 是否开启脸识别 |
| | | isUseFaceRecogService.value = confingInfoStore().confingInfo.isUseFaceRecogService |
| | | if (isUseFaceRecogService.value) { |
| | | state.dialogVisible = true |
| | | } |
| | | state.haodu = (document.documentElement.clientHeight - 180) / 2 + 'px' |
| | | state.clockNum = patientInfoStore().viewNumber |
| | | setInterval(function () { |
| | | inputRef.value.focus(); |
| | | }, 1000) |
| | | speech.value = new Speech(); |
| | | speech.value?.setLanguage('zh-CN') |
| | | speech.value?.init().then(() => { |
| | | console.log('语音初始化成功') |
| | | }) |
| | | |
| | | setTimeout(()=>{ |
| | | console.log('3秒后执行') |
| | | // 是否开启脸识别 |
| | | isUseFaceRecogService.value = confingInfoStore().confingInfo.isUseFaceRecogService |
| | | console.log('人脸识别',isUseFaceRecogService.value) |
| | | if (isUseFaceRecogService.value) { |
| | | console.log('开启人脸识别') |
| | | state.dialogVisible = true |
| | | }else{ |
| | | console.log('关闭人脸识别') |
| | | state.dialogVisible = false |
| | | } |
| | | state.haodu = (document.documentElement.clientHeight - 180) / 2 + 'px' |
| | | state.clockNum = patientInfoStore().viewNumber |
| | | setInterval(function () { |
| | | inputRef.value.focus(); |
| | | }, 1000) |
| | | speech.value = new Speech(); |
| | | speech.value?.setLanguage('zh-CN') |
| | | speech.value?.init().then(() => { |
| | | console.log('语音初始化成功') |
| | | }) |
| | | },3000) |
| | | }) |
| | | return { |
| | | ...toRefs(state), guyanbi, loginRef, isLink, netLink, weightInfo, patientInfo, xyjInfo, inputRef, inputChabge |
| | | ...toRefs(state),isUseFaceRecogService, guyanbi, loginRef, isLink, netLink, weightInfo, patientInfo, xyjInfo, inputRef, inputChabge |
| | | } |
| | | } |
| | | } |
| | | </script> |
| | | <template> |
| | | <div> |
| | | <div class="pagehome"> |
| | | <el-dialog v-model="dialogVisible" width="80%" center :show-close="false"> |
| | | <div class="devcont"> |
| | | <div class="pagehome" v-if="isUseFaceRecogService"> |
| | | <el-dialog v-model="dialogVisible" width="800px" center :show-close="false"> |
| | | <template> |
| | | <div class="my-header"> |
| | | <h4>人脸识别中。。。</h4> |
| | |
| | | body { |
| | | background: #F3F6FE; |
| | | padding: 0; |
| | | margin: 0; |
| | | |
| | | } |
| | | .devcont{ |
| | | width: 100%; |
| | | height: 99%; |
| | | } |
| | | |
| | | .inpu { |
| | | margin-top: 20px; |
| | | // margin-top: 20px; |
| | | position: absolute; |
| | | top:0.2rem; |
| | | right: 40px; |
| | | z-index: 999; |
| | | } |
| | | |
| | | .header { |
| | |
| | | box-shadow: 1px 2px 4px 0px rgba(201, 223, 246, 0.5); |
| | | background-color: rgba(255, 255, 255, 1); |
| | | border-radius: 20px; |
| | | height: 426px; |
| | | height: 100%; |
| | | padding-left: 1.5rem; |
| | | padding-top: 5%; |
| | | padding-right: 0.5rem; |
| | |
| | | <template> |
| | | <div class="mask"> |
| | | <div class="go"> |
| | | <div class="titleH" style="color: white;"> |
| | | <h3>请把头移动到摄像头能拍到的位置,不要动 !</h3> |
| | | </div> |
| | | <div class="box"> |
| | | <video id="videoCamera" class="canvas1" :width="videoWidth" :height="videoHeight" autoPlay></video> |
| | | <br/> |
| | | <canvas id="canvasCamera" class="canvas2" :width="300" :height="300"></canvas> |
| | | </div> |
| | | <!-- <div class="footer"> |
| | | <el-button @click="getCompetence" icon="el-icon-video-camera"> 打开摄像头 </el-button> |
| | | <el-button @click="drawImage" icon="el-icon-camera"> 拍照 </el-button> |
| | | <el-button @click="stopNavigator" icon="el-icon-switch-button"> 关闭摄像头 </el-button> |
| | | <el-button @click="resetCanvas" icon="el-icon-refresh"> 重置 </el-button> |
| | | </div> --> |
| | | </div> |
| | | <div class="wrapp"> |
| | | <div class="inpu"> |
| | | <el-input v-model="inputCode" ref="inputRef" id="inputCode" class="inputCode" @change="inputChabge" |
| | | placeholder="请输入患者卡号或扫描条码" /> |
| | | </div> |
| | | <div |
| | | class="status" |
| | | :style="{ color: msg === '检测到人脸' ? 'green' : 'red' }"> |
| | | {{ msg }} |
| | | </div> |
| | | <div class="rWrapp"> |
| | | <video id="myVideo" preload="preload" autoplay loop muted /> |
| | | <canvas ref="myCanvas" id="myCanvas" class="myCanvas" width="200" height="200" ></canvas> |
| | | </div> |
| | | <div v-if="imgSrc" class="img_bg_camera"> |
| | | <p>效果预览</p> |
| | | <img :src="imgSrc" class="tx_img" /> |
| | | </div> |
| | | </div> |
| | | </template> |
| | | <script lang="ts" setup> |
| | | import { ref, reactive, onMounted, toRefs, nextTick } from "vue"; |
| | | import { ElMessage, ElMessageBox } from "element-plus"; |
| | | import {sundSocket} from "@/samples/socketClient" |
| | | import { confingInfoStore } from '@/stores/StoresConfing' |
| | | const loading = ref(false); |
| | | const os = ref(false); //控制摄像头开关 |
| | | let thisVideo = ref(""); |
| | | let thisContext = ref(""); |
| | | let thisCancas = ref(""); |
| | | const videoWidth = ref(500); |
| | | const videoHeight = ref(500); |
| | | const postOptions = ref([]); |
| | | const certCtl = ref(""); |
| | | const mask = ref(true); |
| | | |
| | | //查询参数 |
| | | const queryParams = reactive({ |
| | | pageNum: 1, |
| | | pageSize: 10, |
| | | imgSrc: undefined, |
| | | }); |
| | | const closedPhono = ref(null); |
| | | |
| | | const emit = defineEmits(["closed"]); |
| | | const props = defineProps({ |
| | | visible: { type: Boolean }, |
| | | }); |
| | | const { visible } = toRefs(props); |
| | | const handleChange = (val) => { |
| | | console.log(visible); |
| | | }; |
| | | |
| | | //调用摄像头权限 |
| | | const getCompetence = () => { |
| | | nextTick(() => { |
| | | os.value = false; |
| | | thisCancas = document.getElementById("canvasCamera"); |
| | | thisContext = thisCancas.getContext("2d"); |
| | | thisVideo = document.getElementById("videoCamera"); |
| | | closedPhono.value = thisVideo; |
| | | if (navigator.mediaDevices === undefined) { |
| | | navigator.mediaDevices = {}; |
| | | } |
| | | |
| | | if (navigator.mediaDevices.getUserMedia === undefined) { |
| | | navigator.mediaDevices.getUserMedia = function (constraints) { |
| | | // 首先获取现存的getUserMedia(如果存在) |
| | | let getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.getUserMedia; |
| | | if (!getUserMedia) { |
| | | return Promise.reject(new Error("getUserMedia is not implemented in this browser")); |
| | | } |
| | | return new Promise(function (resolve, reject) { |
| | | getUserMedia.call(navigator, constraints, resolve, reject); |
| | | }); |
| | | }; |
| | | } |
| | | |
| | | const constraints = { |
| | | audio: false, |
| | | video: { width: videoWidth.value, height: videoHeight.value, transform: "scaleX(-1)" }, |
| | | }; |
| | | |
| | | navigator.mediaDevices |
| | | .getUserMedia(constraints) |
| | | .then(function (stream) { |
| | | if ("srcObject" in thisVideo) { |
| | | thisVideo.srcObject = stream; |
| | | } else { |
| | | thisVideo.src = window.URL.createObjectURL(stream); |
| | | } |
| | | thisVideo.onloadedmetadata = function (e) { |
| | | console.log('摄像头打开了') |
| | | SendTime() |
| | | thisVideo.play(); |
| | | }; |
| | | }) |
| | | .catch((err) => { |
| | | ElMessage.error("没有开启摄像头权限或浏览器版本不兼容"); |
| | | }); |
| | | }); |
| | | }; |
| | | |
| | | //绘制图片 |
| | | const drawImage = () => { |
| | | thisCancas = document.getElementById("canvasCamera"); |
| | | thisContext = thisCancas.getContext("2d"); |
| | | thisVideo = document.getElementById("videoCamera"); |
| | | thisContext.drawImage(thisVideo, 0, 0, 300, 300); |
| | | //获取图片地址 |
| | | queryParams.imgSrc = thisCancas.toDataURL('image/png'); |
| | | // console.log(queryParams.imgSrc); |
| | | const str=`<STX>{"photo":"${queryParams.imgSrc}"}<ETX>` |
| | | sundSocket(str) |
| | | }; |
| | | |
| | | //清空画布 |
| | | const clearCanvas = (id) => { |
| | | let c = document.getElementById(id); |
| | | let cxt = c.getContext("2d"); |
| | | cxt.clearRect(0, 0, 500, 500); |
| | | |
| | | }; |
| | | |
| | | //重置画布 |
| | | const resetCanvas = () => { |
| | | queryParams.imgSrc = ""; |
| | | clearCanvas("canvasCamera"); |
| | | }; |
| | | const SendTime=()=>{ |
| | | setInterval(drawImage,confingInfoStore().confingInfo.faceRecogDetectInterval*1000) |
| | | <script setup lang="ts"> |
| | | import { onMounted, ref } from "vue"; |
| | | import {faceShibie,base64toFile} from '@/samples/faceApi' |
| | | import { sendPationCodeApi } from '../../samples/httpApi' |
| | | import { confingInfoStore } from "@/stores/StoresConfing"; |
| | | |
| | | const msg = ref<string>("没识别到人脸..."); |
| | | |
| | | let trackerTask: any = null; |
| | | const inputCode=ref('') |
| | | const inputRef = ref() |
| | | |
| | | // 标识用的画布 |
| | | const myCanvas = ref<HTMLCanvasElement | null>(null); |
| | | let imgSrc:''; |
| | | // 实例人脸检查器 |
| | | const myTracker: any = new tracking.ObjectTracker("face"); |
| | | myTracker.setInitialScale(4); |
| | | myTracker.setStepSize(2); |
| | | myTracker.setEdgesDensity(0.1); |
| | | |
| | | // 监听人脸检查器 |
| | | myTracker.on("track", (event: tracking.TrackEvent) => { |
| | | const context = myCanvas.value?.getContext("2d") as CanvasRenderingContext2D; |
| | | if (myCanvas.value) { |
| | | context.clearRect(0, 0, myCanvas.value.width, myCanvas.value.height); |
| | | } |
| | | |
| | | //关闭摄像头 |
| | | const stopNavigator = () => { |
| | | // thisVideo = document.getElementById("videoCamera"); |
| | | if (closedPhono.value && closedPhono.value !== null) { |
| | | thisVideo.srcObject.getTracks()[0].stop(); |
| | | os.value = true; |
| | | } else { |
| | | ElMessage.error("没有开启摄像头权限或浏览器版本不兼容"); |
| | | if (event.data.length === 0) { |
| | | msg.value = "没识别到人脸..."; |
| | | } else if(event.data.length === 1) { |
| | | trackerTask.stop(); |
| | | msg.value = "检测到人脸"; |
| | | const myCanvas = document.getElementById("myCanvas");// |
| | | const thisContext = myCanvas?.getContext("2d"); |
| | | const myVideo = document.querySelector("#myVideo") as HTMLVideoElement; |
| | | thisContext.drawImage(myVideo, 0,0, 250, 200); |
| | | imgSrc = myCanvas?.toDataURL('image/png'); |
| | | // 转文件 |
| | | base64toFile(imgSrc) |
| | | setTimeout(() => { |
| | | console.log('监测到人脸后1s') |
| | | trackerTask.run(); |
| | | }, 3000); |
| | | // @ts-ignore |
| | | if (typeof window.stream === "object") { |
| | | myVideo.srcObject = null; |
| | | // @ts-ignore |
| | | window.stream.getTracks().forEach((track) => track.stop()); |
| | | } |
| | | }; |
| | | onMounted(()=>{ |
| | | console.log('页面初始化读取配置文件',confingInfoStore().confingInfo) |
| | | getCompetence() |
| | | }) |
| | | defineExpose({ |
| | | stopNavigator, |
| | | }); |
| | | </script> |
| | | <style scoped> |
| | | .footer { |
| | | height: 50px; |
| | | background-color: white; |
| | | justify-content: space-between; |
| | | float: left; |
| | | z-index: 1999; |
| | | } |
| | | }); |
| | | const inputChabge = () => { |
| | | sendPationCodeApi(inputCode.value) |
| | | setTimeout(function () { |
| | | inputCode.value = '' |
| | | }, 1000) |
| | | } |
| | | onMounted(() => { |
| | | // 触发人脸检查器 |
| | | console.log('人脸识别初始化') |
| | | const isUseFaceRecogService = confingInfoStore().confingInfo.isUseFaceRecogService |
| | | console.log('人脸识别',isUseFaceRecogService) |
| | | setInterval(function () { |
| | | inputRef.value.focus(); |
| | | }, 1000) |
| | | if (isUseFaceRecogService) { |
| | | console.log('开启人脸识别初始化') |
| | | setTimeout(()=>{ |
| | | trackerTask = tracking.track("#myVideo", myTracker, { camera: true }); |
| | | },2000) |
| | | |
| | | |
| | | .mask { |
| | | z-index: 999; |
| | | text-align: center; |
| | | } |
| | | .go { |
| | | width: 100%; |
| | | background-color:#409EFF; |
| | | text-align: center; |
| | | display: inline-block; |
| | | } |
| | | .box { |
| | | width: 100%; |
| | | text-align: center; |
| | | |
| | | background-color: #d9ecff; |
| | | } |
| | | .canvas1{ |
| | | margin-top: 100px; |
| | | border: 2px solid #409EFF; |
| | | border-radius: 10%; |
| | | |
| | | } |
| | | .canvas2{ |
| | | visibility:hidden; |
| | | }else{ |
| | | console.log('关闭人脸识别') |
| | | |
| | | } |
| | | |
| | | |
| | | </style> |
| | | |
| | | }); |
| | | </script> |
| | | <style lang="less" scoped> |
| | | .wrapp { |
| | | height: 100%; |
| | | background-size: 100% 100%; |
| | | padding-top: 10px; |
| | | .rWrapp { |
| | | width: 500px; |
| | | height: 500px; |
| | | margin: auto; |
| | | // margin-top: 30px; |
| | | position: relative; |
| | | .myCanvas { |
| | | position: absolute; |
| | | top: 0; |
| | | left: 0; |
| | | border-radius: 50%; |
| | | width: 100%; |
| | | height: 100%; |
| | | } |
| | | #myVideo { |
| | | width: 100%; |
| | | height: 100%; |
| | | border-radius: 50%; |
| | | object-fit: cover; |
| | | } |
| | | } |
| | | .status { |
| | | //margin-top: 100px; |
| | | text-align: center; |
| | | } |
| | | .img_bg_camera{ |
| | | position: absolute; |
| | | top: -500px; |
| | | |
| | | z-index: -999; |
| | | } |
| | | } |
| | | </style> |
| | | |