| | |
| | | <template> |
| | | <div class="mask"> |
| | | <div class="go"> |
| | | <div class="titleH" style="color: white;"> |
| | | <h3>请把头移动到摄像头能拍到的位置,不要动 !</h3> |
| | | </div> |
| | | <div class="box"> |
| | | <video id="videoCamera" class="canvas1" :width="videoWidth" :height="videoHeight" autoPlay></video> |
| | | <br/> |
| | | <canvas id="canvasCamera" class="canvas2" :width="300" :height="300"></canvas> |
| | | </div> |
| | | <!-- <div class="footer"> |
| | | <el-button @click="getCompetence" icon="el-icon-video-camera"> 打开摄像头 </el-button> |
| | | <el-button @click="drawImage" icon="el-icon-camera"> 拍照 </el-button> |
| | | <el-button @click="stopNavigator" icon="el-icon-switch-button"> 关闭摄像头 </el-button> |
| | | <el-button @click="resetCanvas" icon="el-icon-refresh"> 重置 </el-button> |
| | | </div> --> |
| | | </div> |
| | | <div class="wrapp"> |
| | | <div class="inpu"> |
| | | <el-input v-model="inputCode" ref="inputRef" id="inputCode" class="inputCode" @change="inputChabge" |
| | | placeholder="请输入患者卡号或扫描条码" /> |
| | | </div> |
| | | <div |
| | | class="status" |
| | | :style="{ color: msg === '检测到人脸' ? 'green' : 'red' }"> |
| | | {{ msg }} |
| | | </div> |
| | | <div class="rWrapp"> |
| | | <video id="myVideo" preload="preload" autoplay loop muted /> |
| | | <canvas ref="myCanvas" id="myCanvas" class="myCanvas" width="200" height="200" ></canvas> |
| | | </div> |
| | | <div v-if="imgSrc" class="img_bg_camera"> |
| | | <p>效果预览</p> |
| | | <img :src="imgSrc" class="tx_img" /> |
| | | </div> |
| | | </div> |
| | | </template> |
| | | <script lang="ts" setup> |
| | | import { ref, reactive, onMounted, toRefs, nextTick } from "vue"; |
| | | import { ElMessage, ElMessageBox } from "element-plus"; |
| | | import {sundSocket} from "@/samples/socketClient" |
| | | import { confingInfoStore } from '@/stores/StoresConfing' |
| | | const loading = ref(false); |
| | | const os = ref(false); //控制摄像头开关 |
| | | let thisVideo = ref(""); |
| | | let thisContext = ref(""); |
| | | let thisCancas = ref(""); |
| | | const videoWidth = ref(500); |
| | | const videoHeight = ref(500); |
| | | const postOptions = ref([]); |
| | | const certCtl = ref(""); |
| | | const mask = ref(true); |
| | | |
| | | //查询参数 |
| | | const queryParams = reactive({ |
| | | pageNum: 1, |
| | | pageSize: 10, |
| | | imgSrc: undefined, |
| | | }); |
| | | const closedPhono = ref(null); |
| | | |
| | | const emit = defineEmits(["closed"]); |
| | | const props = defineProps({ |
| | | visible: { type: Boolean }, |
| | | }); |
| | | const { visible } = toRefs(props); |
| | | const handleChange = (val) => { |
| | | console.log(visible); |
| | | }; |
| | | |
| | | //调用摄像头权限 |
| | | const getCompetence = () => { |
| | | nextTick(() => { |
| | | os.value = false; |
| | | thisCancas = document.getElementById("canvasCamera"); |
| | | thisContext = thisCancas.getContext("2d"); |
| | | thisVideo = document.getElementById("videoCamera"); |
| | | closedPhono.value = thisVideo; |
| | | if (navigator.mediaDevices === undefined) { |
| | | navigator.mediaDevices = {}; |
| | | } |
| | | |
| | | if (navigator.mediaDevices.getUserMedia === undefined) { |
| | | navigator.mediaDevices.getUserMedia = function (constraints) { |
| | | // 首先获取现存的getUserMedia(如果存在) |
| | | let getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.getUserMedia; |
| | | if (!getUserMedia) { |
| | | return Promise.reject(new Error("getUserMedia is not implemented in this browser")); |
| | | } |
| | | return new Promise(function (resolve, reject) { |
| | | getUserMedia.call(navigator, constraints, resolve, reject); |
| | | }); |
| | | }; |
| | | } |
| | | |
| | | const constraints = { |
| | | audio: false, |
| | | video: { width: videoWidth.value, height: videoHeight.value, transform: "scaleX(-1)" }, |
| | | }; |
| | | |
| | | navigator.mediaDevices |
| | | .getUserMedia(constraints) |
| | | .then(function (stream) { |
| | | if ("srcObject" in thisVideo) { |
| | | thisVideo.srcObject = stream; |
| | | } else { |
| | | thisVideo.src = window.URL.createObjectURL(stream); |
| | | } |
| | | thisVideo.onloadedmetadata = function (e) { |
| | | console.log('摄像头打开了') |
| | | SendTime() |
| | | thisVideo.play(); |
| | | }; |
| | | }) |
| | | .catch((err) => { |
| | | ElMessage.error("没有开启摄像头权限或浏览器版本不兼容"); |
| | | }); |
| | | }); |
| | | }; |
| | | |
| | | //绘制图片 |
| | | const drawImage = () => { |
| | | thisCancas = document.getElementById("canvasCamera"); |
| | | thisContext = thisCancas.getContext("2d"); |
| | | thisVideo = document.getElementById("videoCamera"); |
| | | thisContext.drawImage(thisVideo, 0, 0, 300, 300); |
| | | //获取图片地址 |
| | | queryParams.imgSrc = thisCancas.toDataURL('image/png'); |
| | | // console.log(queryParams.imgSrc); |
| | | const str=`<STX>{"photo":"${queryParams.imgSrc}"}<ETX>` |
| | | sundSocket(str) |
| | | }; |
| | | |
| | | //清空画布 |
| | | const clearCanvas = (id) => { |
| | | let c = document.getElementById(id); |
| | | let cxt = c.getContext("2d"); |
| | | cxt.clearRect(0, 0, 500, 500); |
| | | |
| | | }; |
| | | |
| | | //重置画布 |
| | | const resetCanvas = () => { |
| | | queryParams.imgSrc = ""; |
| | | clearCanvas("canvasCamera"); |
| | | }; |
| | | const SendTime=()=>{ |
| | | setInterval(drawImage,confingInfoStore().confingInfo.faceRecogDetectInterval*1000) |
| | | <script setup lang="ts"> |
| | | import { onMounted, ref } from "vue"; |
| | | import {faceShibie,base64toFile} from '@/samples/faceApi' |
| | | import { sendPationCodeApi } from '../../samples/httpApi' |
| | | import { confingInfoStore } from "@/stores/StoresConfing"; |
| | | |
| | | const msg = ref<string>("没识别到人脸..."); |
| | | |
| | | let trackerTask: any = null; |
| | | const inputCode=ref('') |
| | | const inputRef = ref() |
| | | |
| | | // 标识用的画布 |
| | | const myCanvas = ref<HTMLCanvasElement | null>(null); |
| | | let imgSrc:''; |
| | | // 实例人脸检查器 |
| | | const myTracker: any = new tracking.ObjectTracker("face"); |
| | | myTracker.setInitialScale(4); |
| | | myTracker.setStepSize(2); |
| | | myTracker.setEdgesDensity(0.1); |
| | | |
| | | // 监听人脸检查器 |
| | | myTracker.on("track", (event: tracking.TrackEvent) => { |
| | | const context = myCanvas.value?.getContext("2d") as CanvasRenderingContext2D; |
| | | if (myCanvas.value) { |
| | | context.clearRect(0, 0, myCanvas.value.width, myCanvas.value.height); |
| | | } |
| | | |
| | | //关闭摄像头 |
| | | const stopNavigator = () => { |
| | | // thisVideo = document.getElementById("videoCamera"); |
| | | if (closedPhono.value && closedPhono.value !== null) { |
| | | thisVideo.srcObject.getTracks()[0].stop(); |
| | | os.value = true; |
| | | } else { |
| | | ElMessage.error("没有开启摄像头权限或浏览器版本不兼容"); |
| | | if (event.data.length === 0) { |
| | | msg.value = "没识别到人脸..."; |
| | | } else if(event.data.length === 1) { |
| | | trackerTask.stop(); |
| | | msg.value = "检测到人脸"; |
| | | const myCanvas = document.getElementById("myCanvas");// |
| | | const thisContext = myCanvas?.getContext("2d"); |
| | | const myVideo = document.querySelector("#myVideo") as HTMLVideoElement; |
| | | thisContext.drawImage(myVideo, 0,0, 250, 200); |
| | | imgSrc = myCanvas?.toDataURL('image/png'); |
| | | // 转文件 |
| | | base64toFile(imgSrc) |
| | | setTimeout(() => { |
| | | console.log('监测到人脸后1s') |
| | | trackerTask.run(); |
| | | }, 3000); |
| | | // @ts-ignore |
| | | if (typeof window.stream === "object") { |
| | | myVideo.srcObject = null; |
| | | // @ts-ignore |
| | | window.stream.getTracks().forEach((track) => track.stop()); |
| | | } |
| | | }; |
| | | onMounted(()=>{ |
| | | console.log('页面初始化读取配置文件',confingInfoStore().confingInfo) |
| | | getCompetence() |
| | | }) |
| | | defineExpose({ |
| | | stopNavigator, |
| | | }); |
| | | </script> |
| | | <style scoped> |
| | | .footer { |
| | | height: 50px; |
| | | background-color: white; |
| | | justify-content: space-between; |
| | | float: left; |
| | | z-index: 1999; |
| | | } |
| | | }); |
| | | const inputChabge = () => { |
| | | sendPationCodeApi(inputCode.value) |
| | | setTimeout(function () { |
| | | inputCode.value = '' |
| | | }, 1000) |
| | | } |
| | | onMounted(() => { |
| | | // 触发人脸检查器 |
| | | console.log('人脸识别初始化') |
| | | const isUseFaceRecogService = confingInfoStore().confingInfo.isUseFaceRecogService |
| | | console.log('人脸识别',isUseFaceRecogService) |
| | | setInterval(function () { |
| | | inputRef.value.focus(); |
| | | }, 1000) |
| | | if (isUseFaceRecogService) { |
| | | console.log('开启人脸识别初始化') |
| | | setTimeout(()=>{ |
| | | trackerTask = tracking.track("#myVideo", myTracker, { camera: true }); |
| | | },2000) |
| | | |
| | | |
| | | .mask { |
| | | z-index: 999; |
| | | text-align: center; |
| | | } |
| | | .go { |
| | | width: 100%; |
| | | background-color:#409EFF; |
| | | text-align: center; |
| | | display: inline-block; |
| | | } |
| | | .box { |
| | | width: 100%; |
| | | text-align: center; |
| | | |
| | | background-color: #d9ecff; |
| | | } |
| | | .canvas1{ |
| | | margin-top: 100px; |
| | | border: 2px solid #409EFF; |
| | | border-radius: 10%; |
| | | |
| | | } |
| | | .canvas2{ |
| | | visibility:hidden; |
| | | }else{ |
| | | console.log('关闭人脸识别') |
| | | |
| | | } |
| | | |
| | | |
| | | </style> |
| | | |
| | | }); |
| | | </script> |
| | | <style lang="less" scoped> |
| | | .wrapp { |
| | | height: 100%; |
| | | background-size: 100% 100%; |
| | | padding-top: 10px; |
| | | .rWrapp { |
| | | width: 500px; |
| | | height: 500px; |
| | | margin: auto; |
| | | // margin-top: 30px; |
| | | position: relative; |
| | | .myCanvas { |
| | | position: absolute; |
| | | top: 0; |
| | | left: 0; |
| | | border-radius: 50%; |
| | | width: 100%; |
| | | height: 100%; |
| | | } |
| | | #myVideo { |
| | | width: 100%; |
| | | height: 100%; |
| | | border-radius: 50%; |
| | | object-fit: cover; |
| | | } |
| | | } |
| | | .status { |
| | | //margin-top: 100px; |
| | | text-align: center; |
| | | } |
| | | .img_bg_camera{ |
| | | position: absolute; |
| | | top: -500px; |
| | | |
| | | z-index: -999; |
| | | } |
| | | } |
| | | </style> |
| | | |