六智科技 преди 7 месеца
родител
ревизия
af632fdf6d

+ 33 - 7
src/assets/camera.html

@@ -82,11 +82,11 @@
     document.body.style.width = `${ width }px`;
     document.body.style.height = `${ height }px`;
 
-    stream = await navigator.mediaDevices.getUserMedia({ audio: false, video: { width, height } });
+    stream = await getMediaStream({ width, height });
 
     video = document.createElement('video');
     video.srcObject = stream;
-    video.addEventListener('canplay', updateCoordinate);
+    video.addEventListener('canplay', () => video.paused && video.play());
     document.body.appendChild(video);
 
     canvas = document.createElement('canvas');
@@ -102,14 +102,39 @@
     context.drawImage(
       video,
       x * dpr, y * dpr, canvas.width, canvas.height,
-      0, 0, canvas.width, canvas.height,
+      0, 0, canvas.width , canvas.height,
     );
     const base64 = canvas.toDataURL('image/png');
     context.clearRect(0, 0, canvas.width, canvas.height);
     return base64;
   }
 
-  function updateCoordinate() {
+  /**
+   *
+   * @param constraints
+   * @param {number} constraints.width
+   * @param {number} constraints.height
+   * @returns {Promise<MediaStream>}
+   */
+  async function getMediaStream(constraints) {
+    const stream = await navigator.mediaDevices.getUserMedia({ video: constraints ?? true, audio: false, });
+    if ( constraints?.width != null && constraints?.height != null ) {
+      // 修正宽高
+      const track = stream.getVideoTracks()[ 0 ];
+      const { width: CW = 1, height: CH = 1, aspectRatio: CAR = CW / CH, ..._constraints } = constraints;
+      const { width: SW = 1, height: SH = 1, aspectRatio: SAR = SW / SH } = track.getSettings();
+      if ( SAR > CAR || (
+        CW === SH && CH === SW
+      ) ) {
+        await track.applyConstraints({ ..._constraints, height: CW, width: CH });
+      }
+    }
+    return stream;
+  }
+
+  function updateCoordinate(offsetX = 0, offsetY = 0) {
+    offsetX = offsetX * zoom;
+    offsetY = offsetY * zoom;
     const track = stream.getVideoTracks()[ 0 ];
     const { width, height } = track.getSettings();
 
@@ -121,8 +146,8 @@
     const root = document.documentElement.getBoundingClientRect();
     const rect = video.getBoundingClientRect();
 
-    x = rect.width / 2 - root.width / 2;
-    y = rect.height / 2 - root.height / 2;
+    x = rect.width / 2 - root.width / 2 + offsetX;
+    y = rect.height / 2 - root.height / 2 + offsetY;
 
     canvas.width = root.width * dpr;
     canvas.height = root.height * dpr;
@@ -141,8 +166,9 @@
   function log() {
     const track = stream.getVideoTracks()[ 0 ];
     const setting = track.getSettings();
-    console.log(setting);
     console.group(`获取摄像头:`);
+    console.log(`width`, setting.width);
+    console.log(`height`, setting.height);
     console.groupEnd();
   }
 </script>

+ 3 - 6
src/modules/camera/ShadeFace.vue

@@ -6,12 +6,9 @@ const Shade: SVGPathData[] = [
   'M0 183A136 183 0 11272 183 136 183 0 110 183Z',
 ].map(d => new SVGPathData(d));
 
-const {
-  translateX = 0, translateY = 0,
-  scaleX = 1, scaleY = 1,
-} = defineProps<{
+const { translateX = 0, translateY = 0, scale = 1, } = defineProps<{
   translateX?: number; translateY?: number;
-  scaleX?: number; scaleY?: number
+  scale?: number;
 }>();
 
 const paths = ref<string[]>([]);
@@ -19,7 +16,7 @@ const paths = ref<string[]>([]);
 watchEffect(() => {
   paths.value = Shade.map(data => data
     .transform(SVGPathDataTransformer.TRANSLATE(translateX, translateY))
-    .transform(SVGPathDataTransformer.SCALE(scaleX, scaleY))
+    .transform(SVGPathDataTransformer.SCALE(scale, scale))
     .encode(),
   );
 });

+ 3 - 6
src/modules/camera/ShadeTongueDown.vue

@@ -6,12 +6,9 @@ const Shade: SVGPathData[] = [
   'M5 205C5 205 32 47 72 13 73 11 74 9 75 8 78 5 87-1 119 5 146 9 158 7 163 5 171 2 178 1 185 3 193 5 201 9 209 19 227 41 252 91 259 161 259 161 266 201 273 205 273 205 252 326 218 348 216 349 214 350 212 352 205 358 190 367 168 367 146 366 120 367 107 368 101 368 94 367 88 365 72 358 41 339 26 294 7 232 3 215 3 215 3 215 0 205 5 205Z',
 ].map(d => new SVGPathData(d));
 
-const {
-  translateX = 0, translateY = 0,
-  scaleX = 1, scaleY = 1,
-} = defineProps<{
+const { translateX = 0, translateY = 0, scale = 1, } = defineProps<{
   translateX?: number; translateY?: number;
-  scaleX?: number; scaleY?: number
+  scale?: number;
 }>();
 
 const paths = ref<string[]>([]);
@@ -19,7 +16,7 @@ const paths = ref<string[]>([]);
 watchEffect(() => {
   paths.value = Shade.map(data => data
     .transform(SVGPathDataTransformer.TRANSLATE(translateX, translateY))
-    .transform(SVGPathDataTransformer.SCALE(scaleX, scaleY))
+    .transform(SVGPathDataTransformer.SCALE(scale, scale))
     .encode(),
   );
 });

+ 3 - 6
src/modules/camera/ShadeTongueUp.vue

@@ -7,12 +7,9 @@ const Shade: SVGPathData[] = [
   'M139 127C139 127 49 81 27 152 6 223 51 357 130 364 205 370 244 331 249 181 249 181 259 75 139 127Z',
 ].map(d => new SVGPathData(d));
 
-const {
-  translateX = 0, translateY = 0,
-  scaleX = 1, scaleY = 1,
-} = defineProps<{
+const { translateX = 0, translateY = 0, scale = 1, } = defineProps<{
   translateX?: number; translateY?: number;
-  scaleX?: number; scaleY?: number
+  scale?: number;
 }>();
 
 const paths = ref<string[]>([]);
@@ -20,7 +17,7 @@ const paths = ref<string[]>([]);
 watchEffect(() => {
   paths.value = Shade.map(data => data
     .transform(SVGPathDataTransformer.TRANSLATE(translateX, translateY))
-    .transform(SVGPathDataTransformer.SCALE(scaleX, scaleY))
+    .transform(SVGPathDataTransformer.SCALE(scale, scale))
     .encode(),
   );
 });

+ 37 - 44
src/modules/camera/camera.config.ts

@@ -9,6 +9,7 @@ const ShadeFace = defineAsyncComponent(() => import('./ShadeFace.vue'));
 
 export const DEFAULT_WIDTH = 270;
 export const DEFAULT_HEIGHT = 366;
+export const DEFAULT_ZOOM = 4;
 
 export interface ConfigProps {
   key: string;
@@ -19,49 +20,41 @@ export interface ConfigProps {
   shade: Component;
   example?: string;
   audio?: string;
+  video?: { scale?: number; offsetX?: number; offsetY?: number; };
 }
 
-function preinstall(zoom = 1, scale = 1) {
-  const width = DEFAULT_WIDTH * scale;
-  const height = DEFAULT_HEIGHT * scale;
-  return {
-    video: { width, height, zoom },
-    shade: { scaleX: scale, scaleY: scale },
-  };
-}
-
-export default {
-  ...preinstall(4, 1.5),
-  segmented: [
-    {
-      title: '舌面拍摄', key: 'upImg',
-      shade: ShadeTongueUp,
-      example: getClientURL('~/camera/step-11.example.png'),
-      audio: getClientURL('~/camera/step-11.audio.wav'),
-      required: true,
-      description: '请确保舌面无食物残渣、没有染色,舌尖向下伸直、 舌体放松、舌面平展、口张大、请避免在有色光线下拍摄。',
-      before: { label: '请将舌头放入框内,点击拍照' },
-      after: { label: '请确认照片', example: '', audio: '' },
-    },
-    {
-      title: '舌下拍摄', key: 'downImg',
-      shade: ShadeTongueDown,
-      example: getClientURL('~/camera/step-21.example.png'),
-      audio: getClientURL('~/camera/step-21.audio.wav'),
-      required: true,
-      description: '舌尖向上抵住上颚、舌体放松、口张大、露出舌下,请避免在有色光线下拍摄。',
-      before: { label: '请将舌下放入框内,点击拍照' },
-      after: { label: '请确认照片', example: '', audio: '' },
-    },
-    {
-      title: '面部拍摄', key: 'faceImg',
-      shade: ShadeFace,
-      example: getClientURL('~/camera/step-31.example.png'),
-      audio: getClientURL('~/camera/step-31.audio.wav'),
-      required: true,
-      description: '请摘下眼镜、平视前方、不要浓妆、不要遮挡面部,请避免在有色光线下拍摄。',
-      before: { label: '请将面部放入框内,点击拍照' },
-      after: { label: '请确认照片', example: '', audio: '' },
-    },
-  ] as ( ConfigProps & { before: Partial<ConfigProps>; after: Partial<ConfigProps> } )[],
-};
+export default [
+  {
+    title: '舌面拍摄', key: 'upImg',
+    shade: ShadeTongueUp,
+    example: getClientURL('~/camera/step-11.example.png'),
+    audio: getClientURL('~/camera/step-11.audio.wav'),
+    required: true,
+    description: '请确保舌面无食物残渣、没有染色,舌尖向下伸直、 舌体放松、舌面平展、口张大、请避免在有色光线下拍摄。',
+    before: { label: '请将舌头放入框内,点击拍照' },
+    after: { label: '请确认照片', example: '', audio: '' },
+    video: { scale: 1, offsetX: 0, offsetY: 50, },
+  },
+  {
+    title: '舌下拍摄', key: 'downImg',
+    shade: ShadeTongueDown,
+    example: getClientURL('~/camera/step-21.example.png'),
+    audio: getClientURL('~/camera/step-21.audio.wav'),
+    required: true,
+    description: '舌尖向上抵住上颚、舌体放松、口张大、露出舌下,请避免在有色光线下拍摄。',
+    before: { label: '请将舌下放入框内,点击拍照' },
+    after: { label: '请确认照片', example: '', audio: '' },
+    video: { scale: 1, offsetX: 0, offsetY: 50, },
+  },
+  {
+    title: '面部拍摄', key: 'faceImg',
+    shade: ShadeFace,
+    example: getClientURL('~/camera/step-31.example.png'),
+    audio: getClientURL('~/camera/step-31.audio.wav'),
+    required: true,
+    description: '请摘下眼镜、平视前方、不要浓妆、不要遮挡面部,请避免在有色光线下拍摄。',
+    before: { label: '请将面部放入框内,点击拍照' },
+    after: { label: '请确认照片', example: '', audio: '' },
+    video: { scale: 2, offsetX: 0, offsetY: 0, },
+  },
+] as ( ConfigProps & { before: Partial<ConfigProps>; after: Partial<ConfigProps> } )[];

+ 8 - 9
src/modules/camera/camera.page.vue

@@ -5,7 +5,7 @@ import { saveFileMethod, uploadFileMethod } from '@/request/api/camera.api';
 import { useVisitor }                       from '@/stores';
 import { tryOnMounted, tryOnUnmounted }     from '@vueuse/core';
 import { useForm, useRequest }              from 'alova/client';
-import Config, { type ConfigProps }         from './camera.config';
+import Segmented, { type ConfigProps }      from './camera.config';
 import Camera                               from './camera.vue';
 
 
@@ -26,7 +26,6 @@ const { form: dataset, loading: submitting, send: submit } = useForm(data => sav
 });
 
 
-const { video, shade, segmented } = Config;
 const step = ref(0);
 const snapshot = ref<string | void>();
 const config = shallowRef<ConfigProps>();
@@ -34,7 +33,7 @@ const config = shallowRef<ConfigProps>();
 const showExample = ref(false);
 
 watch([ step, snapshot ], ([ step, snapshot ], old, onCleanup) => {
-  const { before, after, ..._config } = segmented[ step - 1 ];
+  const { before, after, ..._config } = Segmented[ step - 1 ];
   const old_audio = config.value?.audio;
   config.value = Object.assign(_config, snapshot ? after : before);
 
@@ -85,7 +84,7 @@ const next = async () => {
     }
   }
 
-  if ( step.value === segmented.length ) {
+  if ( step.value === Segmented.length ) {
     submit();
   } else {
     handle();
@@ -111,14 +110,14 @@ tryOnUnmounted(() => {
       <div class="mt-8 text-lg text-center tracking-wider leading-10">{{ config?.description }}</div>
     </header>
     <main class="flex justify-center items-center">
-      <Camera ref="camera" v-bind="video" @loaded="step = 1;">
-        <template #shade>
-          <component :is="config?.shade" v-bind="shade"></component>
+      <Camera ref="camera" v-bind="config?.video" @loaded="step = 1;">
+        <template #shade="{scale}">
+          <component :is="config?.shade" :scale="scale"></component>
           <img v-if="showExample && config?.example" :src="config.example" alt="示例" @click="showExample = false" />
         </template>
       </Camera>
       <div v-if="config?.example"
-           class="size-40 absolute top-4 right-4 cursor-pointer hover:text-primary"
+           class="size-40 absolute -top-8 right-2 cursor-pointer hover:text-primary"
            @click="showExample = !showExample"
       >
         <img class="size-full object-scale-down" :src="config?.example" alt="示例" />
@@ -133,7 +132,7 @@ tryOnUnmounted(() => {
           <img class="h-20" src="@/assets/images/button-confirm.png" alt="确认" @click="next()" />
         </div>
       </div>
-      <div v-else-if="step" class="h-min cursor-pointer hover:text-primary" @click="handle()">
+      <div v-else-if="step" class="h-min text-center cursor-pointer hover:text-primary" @click="handle()">
         <button class="size-28 border-8 rounded-full hover:border-primary"></button>
         <div class="mt-8 text-3xl">{{ showExample ? '开始拍照' : '点击拍照' }}</div>
       </div>

+ 34 - 15
src/modules/camera/camera.vue

@@ -1,30 +1,49 @@
 <script setup lang="ts">
 import Camera from '@/assets/camera.html?url';
+import { DEFAULT_HEIGHT, DEFAULT_WIDTH, DEFAULT_ZOOM } from '@/modules/camera/camera.config';
 
-
-const { width, height, zoom, preview = true } = defineProps<{
-  width: number;
-  height: number;
-  zoom?: number;
-  preview?: boolean
+const {
+  preview = true,
+  scale = 1,
+  offsetX = 0,
+  offsetY = 0,
+} = defineProps<{
+  scale?: number;
+  offsetX?: number;
+  offsetY?: number;
+  preview?: boolean;
 }>();
 const emits = defineEmits<{ loaded: [] }>();
-const style = computed(() => `width: ${ width }px;height: ${ height }px;`);
+const style = computed(() => `width: ${scale * DEFAULT_WIDTH}px;height: ${scale * DEFAULT_HEIGHT}px;`);
 
 const snapshot = ref<string | void>();
 
-const cameraFrameRef = useTemplateRef<HTMLIFrameElement & {
-  contentWindow: {
-    loadCamera(props: { width: number, height: number, zoom?: number }): Promise<void>;
-    handle(promise?: Promise<void>): string;
+const cameraFrameRef = useTemplateRef<
+  HTMLIFrameElement & {
+    contentWindow: {
+      loadCamera(props: { width: number; height: number; zoom?: number }): Promise<void>;
+      handle(promise?: Promise<void>): string;
+    };
   }
-}>('camera-frame');
+>('camera-frame');
 
 const loadCamera = async () => {
-  await cameraFrameRef.value?.contentWindow.loadCamera?.({ width, height, zoom });
+  await cameraFrameRef.value?.contentWindow.loadCamera?.({
+      width: DEFAULT_WIDTH,
+      height: DEFAULT_HEIGHT,
+      zoom: DEFAULT_ZOOM,
+  });
+
+  cameraFrameRef.value?.contentWindow.addEventListener('resize', update);
   emits('loaded');
 };
 
+watch([() => offsetX, () => offsetY], () => { setTimeout(update, 100); });
+
+function update() {
+  cameraFrameRef.value?.contentWindow.updateCoordinate?.(offsetX, offsetY);
+}
+
 defineExpose({
   handle() {
     if ( !preview || !snapshot.value ) {
@@ -40,13 +59,13 @@ defineExpose({
   <div class="relative camera-container" :style="style">
     <iframe ref="camera-frame" :src="Camera" @load="loadCamera()"></iframe>
     <img v-if="snapshot" :src="snapshot" alt="图像" />
-    <slot name="shade" :style="style" :width="width" :height="height"></slot>
+    <slot name="shade" :style="style" :scale="scale"></slot>
   </div>
 </template>
 <style scoped lang="scss">
 .camera-container {
   iframe {
-    clip-path: url("#shade");
+    clip-path: url('#shade');
   }
 
   img {